file_path
stringlengths
21
224
content
stringlengths
0
80.8M
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/cfg/train/FrankaBallPushingPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:FrankaBallPushing,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: True normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 10000000 max_epochs: ${resolve_default:10000,${....max_iterations}} save_best_after: 50 save_frequency: 100 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 32 minibatch_size: 32768 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/cfg/train/FrankaBallCatchingPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [256, 128, 64] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:FrankaBallCatching,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: True normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 10000000 max_epochs: ${resolve_default:100000,${....max_iterations}} save_best_after: 50 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 16 minibatch_size: 16384 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/cfg/train/FrankaBallBalancingPPO.yaml
params: seed: ${...seed} algo: name: a2c_continuous model: name: continuous_a2c_logstd network: name: actor_critic separate: False space: continuous: mu_activation: None sigma_activation: None mu_init: name: default sigma_init: name: const_initializer val: 0 fixed_sigma: True mlp: units: [128, 64, 32] activation: elu d2rl: False initializer: name: default regularizer: name: None load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint load_path: ${...checkpoint} # path to the checkpoint to load config: name: ${resolve_default:FrankaBallBalancing,${....experiment}} full_experiment_name: ${.name} env_name: rlgpu device: ${....rl_device} device_name: ${....rl_device} ppo: True mixed_precision: True normalize_input: True normalize_value: True num_actors: ${....task.env.numEnvs} reward_shaper: scale_value: 0.01 normalize_advantage: True gamma: 0.99 tau: 0.95 learning_rate: 5e-4 lr_schedule: adaptive kl_threshold: 0.008 score_to_win: 10000000 max_epochs: ${resolve_default:100000,${....max_iterations}} save_best_after: 50 save_frequency: 200 print_stats: True grad_norm: 1.0 entropy_coef: 0.0 truncate_grads: True e_clip: 0.2 horizon_length: 32 minibatch_size: 16384 mini_epochs: 8 critic_coef: 4 clip_value: True seq_len: 4 bounds_loss_coef: 0.0001
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Ball_Catching.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.ball_catching.tool import Tool from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Gf, Usd, UsdGeom class FrankaBallCatchingTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 27 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"ball_vel": torch_zeros(), "ball_tool_dist": torch_zeros()} return def set_up_scene(self, scene) -> None: # Franka franka_translation = torch.tensor([0.3, 0.0, 0.0]) self.get_franka(franka_translation) self.get_tool() self.get_ball() # Here the env is cloned (cannot clone particle systems right now) super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add tool self._tool = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/tool_mesh", name="tool_view", reset_xform_properties=False) self._tool_center = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/center_cube", name="tool_center_view", reset_xform_properties=False) # Add ball self._ball = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._tool) scene.add(self._tool_center) scene.add(self._ball) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_tool(self): tool = Tool(prim_path=self.default_zero_env_path + "/tool", name="tool") self._sim_config.apply_articulation_settings("tool", get_prim_at_path(tool.prim_path), self._sim_config.parse_actor_config("tool")) def get_ball(self): ball = DynamicSphere( name = 'ball', position=[-0.8,0,1.5], orientation=[1,0,0,0], prim_path=self.default_zero_env_path + "/ball", radius=0.01, color=np.array([1, 0, 0]), density = 100, mass = 0.001 ) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) # finger pos finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) # franka grasp local pose grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.franka_default_dof_pos = torch.tensor( [0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.04, 0.04], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: # Franka hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) # ball self.ball_pos, self.ball_rot = self._ball.get_world_poses(clone=False) ball_vel = self._ball.get_velocities() # ball velocity ball_linvels = ball_vel[:, 0:3] # ball linear velocity # tool tool_pos, tool_rot = self._tool_center.get_world_poses(clone=False) # tool position to_target = tool_pos - self.ball_pos # ball to tool dist self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, self.ball_pos, to_target, ball_linvels, ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) # fix the finger movement so that the tool will always be grasped in hand self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7] self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8] self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0), #+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # reset tool self._tool.set_world_poses(self.default_tool_pos[env_ids], self.default_tool_rot[env_ids], indices = indices) self._tool.set_velocities(self.default_tool_velocity[env_ids], indices = indices) if not self.is_test: # reset ball position within an area: x [-0.1, 0.1], y [-0.1,0.1] self.new_ball_pos = self.default_ball_pos.clone().detach() self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + (0.05 + 0.05) * torch.rand(self._num_envs, device=self._device) -0.05 self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + (0.05 + 0.05) * torch.rand(self._num_envs, device=self._device) -0.05 # reset ball velocity: default_ball_vel = [2.2 0.1, 0.0] # x-axis vel: [1.0, 1.5] # y-axis vel: [0.0, 0.2] self.new_ball_vel = self.default_ball_velocity.clone().detach() self.new_ball_vel[:,0] = (1.0 - 1.0) * torch.rand(self._num_envs, device=self._device) + 1.0 self.new_ball_vel[:,1] = (0.0 - 0.0) * torch.rand(self._num_envs, device=self._device) + 0.0 # reset ball self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices) self._ball.set_velocities(self.new_ball_vel[env_ids], indices = indices) else: self.new_ball_pos = self.default_ball_pos.clone().detach() self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + self.initial_test_value[0] self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + self.initial_test_value[1] self.new_ball_vel = self.default_ball_velocity.clone().detach() self.new_ball_vel[:,0] = self.initial_test_value[2] self.new_ball_vel[:,1] = self.initial_test_value[3] # reset ball self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices) self._ball.set_velocities(self.new_ball_vel[env_ids], indices = indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): # Franka self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # tool self.default_tool_pos, self.default_tool_rot = self._tool.get_world_poses() self.default_tool_velocity = self._tool.get_velocities() # ball self.default_ball_pos, self.default_ball_rot = self._ball.get_world_poses() self.default_ball_velocity = self._ball.get_velocities() # change default velocities self.default_ball_velocity[:,0] = 2.2 self.default_ball_velocity[:,1] = 0.1 self.default_ball_velocity[:,2] = 0.0 self._ball.set_velocities(self.default_ball_velocity) # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # variables for reward ball_pos = self.ball_pos # ball pos ball_vel = self._ball.get_velocities() # ball velocity tool_pos, tool_rot = self._tool_center.get_world_poses() # tool center pos and rot ball_linvels = ball_vel[:, 0:3] # ball linear velocity # 1st reward ball to tool center distance ball_center_dist = torch.norm(tool_pos - ball_pos, p=2, dim=-1) ball_center_XY_dist = torch.norm(tool_pos[:,0:3] - ball_pos[:,0:3], p=2, dim=-1) center_dist_reward = 1.0/(1.0+ball_center_dist*100) # 2nd reward: ball is unmoved norm_ball_linvel = torch.norm(ball_linvels, p=2, dim=-1) ball_vel_reward = 1.0/(1.0+norm_ball_linvel*100) # 3rd reward: rotation not too much rot_diff = torch.norm(tool_rot - self.default_tool_rot, p=2, dim=-1) tool_rot_reward = 1.0/(1.0+rot_diff) # action penalty action_penalty = torch.sum(self.actions[:,0:7] ** 2, dim=-1) action_penalty = 1 - 1.0 / (1.0 + action_penalty) # liveness_reward liveness_reward = torch.where(ball_center_XY_dist<0.03, torch.ones_like(center_dist_reward), torch.zeros_like(center_dist_reward)) # final cumulative reward final_reward = 1.0*center_dist_reward + 1.0*ball_vel_reward + 0.0*tool_rot_reward + 0.5*liveness_reward - 0.01*action_penalty self.rew_buf[:] = final_reward # log additional info self.episode_sums["ball_vel"] += norm_ball_linvel self.episode_sums["ball_tool_dist"] += ball_center_dist def is_done(self) -> None: if not self.is_test: ball_pos = self.ball_pos # ball pos tool_pos, tool_rot = self._tool_center.get_world_poses() # tool center pos and rot ball_center_dist = torch.norm(tool_pos - ball_pos, p=2, dim=-1) # 1st reset: if ball falls from tool # self.reset_buf = torch.where(ball_center_dist > 5.0, torch.ones_like(self.reset_buf), self.reset_buf) # 2nd reset: if ball falls to the ground self.reset_buf = torch.where(self.ball_pos[:,2] < 0.02, torch.ones_like(self.reset_buf), self.reset_buf) # 3rd reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Ball_Balancing.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.ball_balancing.tool import Tool from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Usd, UsdGeom class FrankaBallBalancingTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.ball_radius = self._task_cfg["env"]["ballRadius"] self.ball_initial_position = self._task_cfg["env"]["ballInitialPosition"] self.ball_initial_orientation = self._task_cfg["env"]["ballInitialOrientation"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 27 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"final_reward": torch_zeros(),} return def set_up_scene(self, scene) -> None: franka_translation = torch.tensor([0.35, 0.0, 0.0]) self.get_franka(franka_translation) self.get_tool() self.get_ball() super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add Tool self._tool = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/tool", name="tool_view", reset_xform_properties=False) # Add ball self._ball = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._ball) scene.add(self._tool) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_tool(self): tool = Tool(prim_path=self.default_zero_env_path + "/tool", name="tool") self._sim_config.apply_articulation_settings("tool", get_prim_at_path(tool.prim_path), self._sim_config.parse_actor_config("tool")) def get_ball(self): ball = DynamicSphere( name = 'ball', position=self.ball_initial_position, orientation=self.ball_initial_orientation, prim_path=self.default_zero_env_path + "/ball", radius=self.ball_radius, color=np.array([1, 0, 0]), density = 100, mass = 0.15 ) self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) # default franka pos: for initially grap the tool self.franka_default_dof_pos = torch.tensor( [0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.004, 0.004], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: # Franka hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) # Ball self.ball_pos, self.ball_rot = self._ball.get_world_poses(clone=False) ball_vel = self._ball.get_velocities() # ball velocity ball_linvels = ball_vel[:, 0:3] # ball linear velocity tool_pos, tool_rot = self._tool.get_world_poses(clone=False) to_target = tool_pos - self.ball_pos dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, self.ball_pos, to_target, ball_linvels, # self.location_ball_pos # self.cabinet_dof_pos[:, 3].unsqueeze(-1), # self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) # NOTE HERE: right now I fix the finger movement so that the tool will always be grasped in hand self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7] self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8] self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka (due to initial grasping, cannot randomize) pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0), # + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # reset tool self._tool.set_world_poses(self.default_tool_pos[env_ids], self.default_tool_rot[env_ids], indices = indices) self._tool.set_velocities(self.default_tool_velocity[env_ids], indices = indices) # reset ball position within an area: x [-0.15, 0.15], y [-0.15,0.15] # if not test, randomize ball initial positions for training if not self.is_test: self.new_ball_pos = self.default_ball_pos.clone().detach() self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + (0.15 + 0.15) * torch.rand(self._num_envs, device=self._device) -0.15 self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + (0.15 + 0.15) * torch.rand(self._num_envs, device=self._device) -0.15 self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices) self._ball.set_velocities(self.default_ball_velocity[env_ids], indices = indices) # if is test mode, set the ball to given position (1 environment) else: self.new_ball_pos = self.default_ball_pos.clone().detach() self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + self.initial_test_value[0] self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + self.initial_test_value[1] self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices) self._ball.set_velocities(self.default_ball_velocity[env_ids], indices = indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): # Franka self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # tool self.default_tool_pos, self.default_tool_rot = self._tool.get_world_poses() self.default_tool_velocity = self._tool.get_velocities() # ball self.default_ball_pos, self.default_ball_rot = self._ball.get_world_poses() self.default_ball_velocity = self._ball.get_velocities() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # variables for reward ball_pos = self.ball_pos # ball pos ball_vel = self._ball.get_velocities() # ball velocity tool_pos, tool_rot = self._tool.get_world_poses() # tool center pos and rot ball_linvels = ball_vel[:, 0:3] # ball linear velocity # XXX REWARD # 1st reward: ball keeps in the center (not with z-axis?) (with z-axis is good) # ball_center_dist = torch.norm(tool_pos[:,0:2] - ball_pos[:,0:2], p=2, dim=-1) ball_center_dist_3d = torch.norm(tool_pos - ball_pos, p=2, dim=-1) # center_dist_reward = 1-torch.tanh(4*ball_center_dist) # to cubic? center_dist_reward = 1.0/(1.0+ball_center_dist_3d) # 2nd reward: ball unmove norm_ball_linvel = torch.norm(ball_linvels, p=2, dim=-1) ball_vel_reward = 1.0/(1.0+norm_ball_linvel) # 3rd reward: rotation not too much rot_diff = torch.norm(tool_rot - self.default_tool_rot, p=2, dim=-1) tool_rot_reward = 1.0/(1.0+rot_diff) # stay alive liveness = torch.where(ball_pos[:,2]>0.4, torch.ones_like(ball_pos[:,2]), torch.zeros_like(ball_pos[:,2])) # the weight of center_dist_reward and ball_vel_reward should be similar # how about tool rotation reward? final_reward = 10.0*center_dist_reward + 5.0*ball_vel_reward + 1.0*tool_rot_reward + 1.0*liveness self.rew_buf[:] = final_reward # for record self.episode_sums["final_reward"] += final_reward def is_done(self) -> None: if not self.is_test: # 1st reset: if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) ball_pos = self.ball_pos # ball pos tool_pos, tool_rot = self._tool.get_world_poses() # tool center pos and rot ball_center_dist = torch.norm(tool_pos - ball_pos, p=2, dim=-1) # 2nd reset: if ball falls from tool self.reset_buf = torch.where(ball_center_dist > 0.54, torch.ones_like(self.reset_buf), self.reset_buf) # 3rd reset: if ball falls too low self.reset_buf = torch.where(self.ball_pos[:,2] < 0.5, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Cube_Stacking.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from omni.isaac.core.objects import DynamicCuboid from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Gf, Usd, UsdGeom class FrankaCubeStackingTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 28 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"cube_cube_dist": torch_zeros(), "finger_to_cube_dist": torch_zeros(), "is_stacked": torch_zeros(), "success_rate": torch_zeros()} return def set_up_scene(self, scene) -> None: # Franka franka_translation = torch.tensor([0.3, 0.0, 0.0]) self.get_franka(franka_translation) self.get_cube() self.get_target_cube() # Here the env is cloned super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add cube self._cube = RigidPrimView(prim_paths_expr="/World/envs/.*/cube", name="cube_view", reset_xform_properties=False) # Add location_ball self._target_cube = RigidPrimView(prim_paths_expr="/World/envs/.*/target_cube", name="target_cube_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._cube) scene.add(self._target_cube) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_cube(self): cube = DynamicCuboid( name = 'cube', position=[-0.04, 0.0, 0.91], orientation=[1,0,0,0], size=0.05, prim_path=self.default_zero_env_path + "/cube", color=np.array([1, 0, 0]), density = 100 ) def get_target_cube(self): target_cube = DynamicCuboid( name = 'target_cube', position=[-0.3, 0.1, 0.025], orientation=[1, 0, 0, 0], prim_path=self.default_zero_env_path + "/target_cube", size=0.05, color=np.array([0, 1, 0]), density = 100 ) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) # finger pos finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) # franka grasp local pose grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.franka_default_dof_pos = torch.tensor( [0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.025, 0.025], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: # Franka hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) # cube cube_pos, cube_rot = self._cube.get_world_poses(clone=False) # target cube tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) # tool position to_target = cube_pos - tar_cube_pos self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, cube_pos, cube_rot, to_target, ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) # self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) # # release cube cube_pos, cube_rot = self._cube.get_world_poses(clone=False) # cube tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) # target pos target_pos = tar_cube_pos.clone().detach() target_pos[:,2] = target_pos[:,2] + 0.025 target_dist = torch.norm(cube_pos - tar_cube_pos, p=2, dim=-1) self.release_condition = torch.logical_and(target_dist<0.08, cube_pos[:,2] >= target_pos[:,2]) # self.franka_dof_targets[:,7] = torch.where(self.release_condition, 0.08, self.franka_dof_targets[:,7]) # self.franka_dof_targets[:,8] = torch.where(self.release_condition, 0.08, self.franka_dof_targets[:,8]) self.franka_dof_targets[:,7] = torch.where(self.release_condition, 0.08, 0.005) self.franka_dof_targets[:,8] = torch.where(self.release_condition, 0.08, 0.005) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0), #+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # reset cube self._cube.set_world_poses(self.default_cube_pos[env_ids], self.default_cube_rot[env_ids], indices = indices) self._cube.set_velocities(self.default_cube_velocity[env_ids], indices = indices) if not self.is_test: # reset target cube # reset target cube position within an area: x [-0.2, 0.2], y [-0.2,0.2] self.new_cube_pos = self.default_target_cube_pos.clone().detach() self.new_cube_pos[:,0] = self.default_target_cube_pos[:,0] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2 self.new_cube_pos[:,1] = self.default_target_cube_pos[:,1] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2 self._target_cube.set_world_poses(self.new_cube_pos[env_ids], self.default_target_cube_rot[env_ids], indices = indices) self._target_cube.set_velocities(self.default_target_cube_velocity[env_ids], indices = indices) # if is test mode else: self.new_cube_pos = self.default_target_cube_pos.clone().detach() self.new_cube_pos[:,0] = self.default_target_cube_pos[:,0] + self.initial_test_value[0] self.new_cube_pos[:,1] = self.default_target_cube_pos[:,1] + self.initial_test_value[1] self._target_cube.set_world_poses(self.new_cube_pos[env_ids], self.default_target_cube_rot[env_ids], indices = indices) self._target_cube.set_velocities(self.default_target_cube_velocity[env_ids], indices = indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): if key == "success_rate": self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) else: self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length self.episode_sums[key][env_ids] = 0 def post_reset(self): # Franka self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # Cube self.default_cube_pos, self.default_cube_rot = self._cube.get_world_poses() self.default_cube_velocity = self._cube.get_velocities() # Target cube self.default_target_cube_pos, self.default_target_cube_rot = self._target_cube.get_world_poses() self.default_target_cube_velocity = self._target_cube.get_velocities() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # reward info joint_positions = self.franka_dof_pos cube_pos, cube_rot = self._cube.get_world_poses(clone=False) # cube cube_vel = self._cube.get_velocities() cube_vel = cube_vel[:,0:3] cube_vel_norm = torch.norm(cube_vel, p=2, dim=-1) tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) # target pos target_pos = tar_cube_pos.clone().detach() target_pos[:,2] = target_pos[:,2] + 0.02 # target_pos[:,0] = target_pos[:,0] -0.015 lfinger_pos, lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) # franka finger rfinger_pos, rfinger_rot = self._frankas._rfingers.get_world_poses(clone=False) finger_pos = (lfinger_pos + rfinger_pos)/2 # 1st reward: cube to target distance cube_targe_dist = torch.norm(target_pos - cube_pos, p=2, dim=-1) cube_tar_dist_reward = 1.0/(1.0+cube_targe_dist) cube_targe_XY_dist = torch.norm(target_pos[:,0:2] - cube_pos[:,0:2] , p=2, dim=-1) cube_tar_XY_dist_reward = 1.0/(1.0+cube_targe_XY_dist**2) # 2nd reward: if cube is stacked, task complete finger_to_cube_dist = torch.norm(finger_pos - cube_pos, p=2, dim=-1) is_stacked = torch.where(torch.logical_and(cube_targe_dist<0.05, cube_pos[:,2]>=target_pos[:,2]), torch.ones_like(cube_tar_dist_reward), torch.zeros_like(cube_tar_dist_reward)) self.is_complete = torch.where(torch.logical_and(finger_to_cube_dist>0.05, torch.logical_and(cube_vel_norm<0.05, is_stacked==1)), torch.ones_like(cube_tar_dist_reward), torch.zeros_like(cube_tar_dist_reward)) # self.is_complete = torch.where(torch.logical_and(finger_to_cube_dist>0.05, torch.logical_and(cube_vel_norm<0.05, is_stacked==1)), # torch.ones_like(cube_tar_dist_reward), torch.zeros_like(cube_tar_dist_reward)) # 3rd reward: finger to cube distanfce finger_cube_dist_reward = 1.0/(1.0+finger_to_cube_dist) finger_cube_dist_reward = torch.where(is_stacked==1, 1-finger_cube_dist_reward, finger_cube_dist_reward) # 4th reward: finger closeness reward # finger_close_reward = torch.zeros_like(cube_tar_dist_reward) finger_close_reward = (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]) finger_close_reward = torch.where(is_stacked !=1, finger_close_reward, -finger_close_reward) # 5th reward: cube velocity reward cube_vel_reward = 1.0/(1.0+cube_vel_norm) # if cube falls on the ground self.is_fall = torch.where(cube_pos[:,2]<0.05, torch.ones_like(cube_tar_dist_reward), cube_tar_dist_reward) # final reward final_reward = 2*cube_tar_dist_reward + 0.0*finger_cube_dist_reward + 0.0*finger_close_reward + 0.0*cube_vel_reward \ + 10*self.is_complete - 0.5*self.is_fall + 0.0*is_stacked + 0.0*cube_tar_XY_dist_reward final_reward = torch.where(cube_targe_dist<0.2, final_reward+2.0*cube_tar_XY_dist_reward, final_reward) self.rew_buf[:] = final_reward self.episode_sums["success_rate"] += self.is_complete self.episode_sums["cube_cube_dist"] += cube_targe_dist self.episode_sums["finger_to_cube_dist"] += finger_to_cube_dist self.episode_sums["is_stacked"] += is_stacked def is_done(self) -> None: if not self.is_test: # reset: if task is complete # self.reset_buf = torch.where(self.is_complete==1, torch.ones_like(self.reset_buf), self.reset_buf) # reset if cube falls on the ground cube_pos, cube_rot = self._cube.get_world_poses(clone=False) # self.reset_buf = torch.where(self.is_fall==1, torch.ones_like(self.reset_buf), self.reset_buf) # rest if cube is too far away from the target cube tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) cube_target_XY_dist = torch.norm(tar_cube_pos[:,0:2] - cube_pos[:,0:2] , p=2, dim=-1) self.reset_buf = torch.where(cube_target_XY_dist > 0.8, torch.ones_like(self.reset_buf), self.reset_buf) # reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Peg_In_Hole.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.peg_in_hole.table import Table from Models.peg_in_hole.tool import Tool from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Usd, UsdGeom class FrankaPegInHoleTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.location_ball_radius = self._task_cfg["env"]["locationBallRadius"] self.location_ball_initial_position = self._task_cfg["env"]["locationBallPosition"] self.location_ball_initial_orientation = self._task_cfg["env"]["locationBallInitialOrientation"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 28 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"tool_hole_XY_dist": torch_zeros(), "tool_hole_Z_dist": torch_zeros(), "tool_hole_dist": torch_zeros(), "tool_rot_error": torch_zeros(), "peg_rate": torch_zeros(), "norm_finger_vel": torch_zeros(), "rewards": torch_zeros()} return def set_up_scene(self, scene) -> None: franka_translation = torch.tensor([0.5, 0.0, 0.0]) self.get_franka(franka_translation) self.get_table() self.get_tool() super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add Table self._table = RigidPrimView(prim_paths_expr="/World/envs/.*/table/table/table_mesh", name="table_view", reset_xform_properties=False) # Add Tool self._tool = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/tool", name="tool_view", reset_xform_properties=False) # Add location_ball self._location_ball = RigidPrimView(prim_paths_expr="/World/envs/.*/table/table/location_ball", name="location_ball_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._table) scene.add(self._tool) scene.add(self._location_ball) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_table(self): table = Table(prim_path=self.default_zero_env_path + "/table", name="table") self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table")) def get_tool(self): tool = Tool(prim_path=self.default_zero_env_path + "/tool", name="tool") self._sim_config.apply_articulation_settings("tool", get_prim_at_path(tool.prim_path), self._sim_config.parse_actor_config("tool")) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) # tool reference rotation self.tool_ref_rot = torch.tensor([0.5, 0.5, 0.5, 0.5], device=self._device) # self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) # self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) # default franka pos: for initially grap the tool self.franka_default_dof_pos = torch.tensor( [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.015], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) # Tool self.tool_pos, self.tool_rot = self._tool.get_world_poses(clone=False) hole_pos, hole_rot = self._location_ball.get_world_poses() to_target = self.tool_pos - hole_pos dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) # print(torch.norm(to_target, p=2, dim=-1)) self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, self.tool_pos, self.tool_rot, to_target # self.location_ball_pos # self.cabinet_dof_pos[:, 3].unsqueeze(-1), # self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) # NOTE HERE: right now I fix the finger movement so that the object will always be grasped in hand # Later: if the reward is good enough, the hand should be released once the object is in the hole, # this means the last two dofs are also in the action # self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7] # self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8] # release the finger if tool is right above the hole hole_pos, hole_rot = self._location_ball.get_world_poses() tool_pos, tool_rot = self._tool.get_world_poses() hole_pos[:,2] = 0.39 tool_hole_dist = torch.norm(tool_pos - hole_pos, p=2, dim=-1) tool_hole_XY_dist = torch.norm(tool_pos[:,0:2] - hole_pos[:,0:2], p=2, dim=-1) tool_hole_Z_dist = torch.norm(tool_pos[:,2] - hole_pos[:,2], p=2, dim=-1) tool_rot_error = torch.norm(tool_rot - self.tool_ref_rot, p=2, dim=-1) # self.release_condition = torch.logical_and(tool_hole_XY_dist <= 0.1, tool_rot_error<=1) # self.release_condition = torch.logical_and(self.release_condition, tool_hole_Z_dist<=0.1) # self.release_condition = torch.logical_and(tool_hole_dist<0.08, self.is_released) self.release_condition = tool_hole_dist<=0.024 # self.release_condition = torch.logical_and(tool_hole_XY_dist<=0.04, tool_hole_Z_dist<=0.07) # self.release_condition = torch.logical_and(self.release_condition, tool_rot_error<=1) # self.is_released = self.release_condition.clone().detach() self.franka_dof_targets[:,7] = torch.where(self.release_condition, 0.1, 0.015) self.franka_dof_targets[:,8] = torch.where(self.release_condition, 0.1, 0.015) # set franka target joint position self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka (due to initial grasping, cannot randomize) pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0), # + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # reset tool self._tool.set_world_poses(self.default_tool_pos[env_ids], self.default_tool_rot[env_ids], indices = indices) self._tool.set_velocities(self.default_tool_velocity[env_ids], indices = indices) if not self.is_test: # reset table # reset positions: x: [-0.2,0.2], y:[-0.2,0.2] random_x = (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2 random_y = (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2 self.new_table_pos = self.default_table_pos.clone().detach() self.new_table_pos[:,0] = self.default_table_pos[:,0] + random_x self.new_table_pos[:,1] = self.default_table_pos[:,1] + random_y self._table.set_world_poses(self.new_table_pos[env_ids], self.default_table_rot[env_ids], indices = indices) self._table.set_velocities(self.default_table_velocity[env_ids], indices = indices) else: self.new_table_pos = self.default_table_pos.clone().detach() self.new_table_pos[:,0] = self.default_table_pos[:,0] + self.initial_test_value[0] self.new_table_pos[:,1] = self.default_table_pos[:,1] + self.initial_test_value[1] self._table.set_world_poses(self.new_table_pos[env_ids], self.default_table_rot[env_ids], indices = indices) self._table.set_velocities(self.default_table_velocity[env_ids], indices = indices) self.is_released = torch.zeros((1,self._num_envs), device=self._device) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): if key == "peg_rate": self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) else: self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length self.episode_sums[key][env_ids] = 0 def post_reset(self): self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # tool self.default_tool_pos, self.default_tool_rot = self._tool.get_world_poses() self.default_tool_velocity = self._tool.get_velocities() # table self.default_table_pos, self.default_table_rot = self._table.get_world_poses() self.default_table_velocity = self._table.get_velocities() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # Envoroonment parameters: # table height: 0.4 # hole depth: 0.05 # hole radius: 0.01 # tool at surface: Z = 0.43 # tool pegged in hole: Z = 0.38 # tool_pos to tool bottom: Z = 0.03 # tool body length: 0.06 # tool cap length: 0.01 # tool vertical orient: [0.5, 0.5, 0.5, 0.5] # tool_ref_rot = self.tool_ref_rot # tool reference vertical rotation num_envs = self._num_envs tool_pos, tool_rot = self._tool.get_world_poses(clone=False) hole_pos, hole_rot = self._location_ball.get_world_poses(clone=False) hole_pos[:,2] = 0.38 # fix hole pos hole_surf_pos = hole_pos.clone().detach() hole_surf_pos[:,2] = hole_surf_pos[:,2] hole_target_pos = hole_pos.clone().detach() hole_target_pos[:,2] = 0.39 # tool_ref_rot = torch.zeros_like(tool_rot) # tool_ref_rot[:,:] = self.tool_ref_rot # tool reference vertical rotation tool_ref_rot= self.tool_ref_rot lfinger_pos, lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) rfinger_pos, rfinger_rot = self._frankas._rfingers.get_world_poses(clone=False) finger_rot = (lfinger_rot + rfinger_rot)/2 finger_pos = (lfinger_pos + rfinger_pos)/2 finger_rot_ref = torch.tensor([0.0325, -0.3824, 0.9233, -0.0135], device=self._device) # finger velocity lfinger_vel = self._frankas._lfingers.get_velocities() rfinger_vel = self._frankas._rfingers.get_velocities() finger_vel = (lfinger_vel[:,0:3]+rfinger_vel[:,0:3])/2 norm_finger_vel = torch.norm(finger_vel, p=2, dim=-1) # direction vector ref_vector = torch.zeros([num_envs,3], device=self._device) ref_vector[:,0] = 2*(tool_ref_rot[0]*tool_ref_rot[2] - tool_ref_rot[3]*tool_ref_rot[1]) ref_vector[:,1] = 2*(tool_ref_rot[1]*tool_ref_rot[2] + tool_ref_rot[3]*tool_ref_rot[0]) ref_vector[:,2] = 1 - 2*(tool_ref_rot[0]*tool_ref_rot[0] + tool_ref_rot[1]*tool_ref_rot[1]) tool_vector = torch.zeros([num_envs,3], device=self._device) tool_vector[:,0] = 2*(tool_rot[:,0]*tool_rot[:,2] - tool_rot[:,3]*tool_rot[:,1]) tool_vector[:,1] = 2*(tool_rot[:,1]*tool_rot[:,2] + tool_rot[:,3]*tool_rot[:,0]) tool_vector[:,2] = 1 - 2*(tool_rot[:,0]*tool_rot[:,0] + tool_rot[:,1]*tool_rot[:,1]) # roll = atan2(2.0 * (q.q3 * q.q2 + q.q0 * q.q1) , 1.0 - 2.0 * (q.q1 * q.q1 + q.q2 * q.q2)); # pitch = asin(2.0 * (q.q2 * q.q0 - q.q3 * q.q1)); # yaw = atan2(2.0 * (q.q3 * q.q0 + q.q1 * q.q2) , - 1.0 + 2.0 * (q.q0 * q.q0 + q.q1 * q.q1)); tool_roll = torch.atan2(2.0*(tool_rot[:,0]*tool_rot[:,1] + tool_rot[:,2]*tool_rot[:,3]), 1.0-2.0*(tool_rot[:,2]*tool_rot[:,2]+tool_rot[:,1]*tool_rot[:,1])) tool_yaw= torch.atan2(2.0*(tool_rot[:,3]*tool_rot[:,2] + tool_rot[:,0]*tool_rot[:,1]), 1.0-2.0*(tool_rot[:,1]*tool_rot[:,1]+tool_rot[:,2]*tool_rot[:,2])) tool_pitch = torch.asin(2.0*(tool_rot[:,0]*tool_rot[:,2] - tool_rot[:,1]*tool_rot[:,3])) tool_ref_roll = torch.atan2(2.0*(tool_ref_rot[0]*tool_ref_rot[1] + tool_ref_rot[2]*tool_ref_rot[3]), 1.0-2.0*(tool_ref_rot[2]*tool_ref_rot[2]+tool_ref_rot[1]*tool_ref_rot[1])) tool_ref_yaw = torch.atan2(2.0*(tool_ref_rot[3]*tool_ref_rot[2] + tool_ref_rot[0]*tool_ref_rot[1]), 1.0-2.0*(tool_ref_rot[1]*tool_ref_rot[1]+tool_ref_rot[2]*tool_ref_rot[2])) tool_ref_pitch = torch.asin(2.0*(tool_ref_rot[0]*tool_ref_rot[2] - tool_ref_rot[1]*tool_ref_rot[3])) tool_roll_error = torch.abs(tool_roll - tool_ref_roll) tool_pitch_error = torch.abs(tool_pitch - tool_ref_pitch) tool_roll_pitch_reward = 1 - torch.tanh(2*tool_roll_error) + 1 - torch.tanh(2*tool_pitch_error) # tool_roll_yaw_reward = 1 - torch.tanh(2*tool_roll_error) + 1 - torch.tanh(2*tool_yaw_error) # Handle Nan exception # tool_roll_pitch_reward = torch.where(torch.isnan(tool_roll_error+tool_pitch_error), torch.ones_like(tool_roll_pitch_reward), tool_roll_pitch_reward) # 1st reward: tool XY position tool_hole_dist = torch.norm(tool_pos - hole_pos, p=2, dim=-1) tool_hole_XY_dist = torch.norm(tool_pos[:,0:2] - hole_pos[:,0:2], p=2, dim=-1) # tool_XY_pos_reward = 1.0 / (1.0 + (tool_hole_XY_dist) ** 2) tool_XY_pos_reward = 1 - torch.tanh(5*tool_hole_XY_dist) tool_hole_surf_dist = torch.norm(tool_pos - hole_surf_pos, p=2, dim=-1) # tool_surf_pos_reward = 1.0 / (1.0 + (tool_hole_surf_dist) ** 2) tool_surf_pos_reward = 1 - torch.tanh(8*tool_hole_surf_dist) # 2nd reward: tool rotation # tool_rot_error = torch.norm(tool_rot - tool_ref_rot, p=2, dim=-1) tool_rot_error = torch.norm(tool_vector - ref_vector, p=2, dim=-1) # tool_rot_reward = 1.0 / (1.0 + (tool_rot_error) ** 2) tool_rot_reward = 1 - torch.tanh(3*tool_rot_error) self.rot_error = tool_roll_error + tool_pitch_error # 3rd reward: pegging in when tool is above the hole tool_hole_Z_dist = torch.abs(tool_pos[:,2] - hole_pos[:,2]) # tool_pegging_reward = 1.0 / (1.0 + (tool_hole_Z_dist) ** 2) tool_pegging_reward = 1 - torch.tanh(6*tool_hole_Z_dist) # 4th reward: tool hole XYZ position tool_hole_dist = torch.norm(tool_pos - hole_pos, p=2, dim=-1) tool_target_dist = torch.norm(tool_pos - hole_target_pos, p=2, dim=-1) # tool_pos_reward = 1.0 / (1.0 + (tool_hole_dist) ** 2) tool_pos_reward = 1 - torch.tanh(5*tool_hole_dist) finger_rot_error = torch.norm(finger_rot - finger_rot_ref, p=2, dim=-1) finger_rot_reward = 1.0 / (1.0 + (finger_rot_error) ** 2) finger_XY_pos_dist = torch.norm(finger_pos[:,0:2] - hole_pos[:,0:2], p=2, dim=-1) finger_pos_reward = 1 - torch.tanh(5*finger_XY_pos_dist) # 1st penalty: action action_penalty = torch.sum(self.actions[:,0:7] ** 2, dim=-1) action_penalty = 1 - 1.0 / (1.0 + action_penalty) finger_vel_penalty = torch.tanh(20*torch.abs(norm_finger_vel-0.1)) # tool_rot_penalty = 1 - 1.0 / (1.0 + (tool_rot_error) ** 2) # tool_pos_penalty = 1 - 1.0 / (1.0 + (tool_hole_dist) ** 2) # final cumulative reward # final_reward = 5*tool_XY_pos_reward + 5*tool_rot_reward + 2*tool_pegging_reward- 0.001*action_penalty # final_reward = 10*tool_surf_pos_reward + 5*tool_rot_reward + 0*tool_hole_XY_dist- 0.001*action_penalty - 1.0*tool_rot_penalty - 1.0*tool_pos_penalty # final_reward = torch.where(tool_hole_surf_dist<0.05, 10*tool_pos_reward + 5*tool_rot_reward- 0.001*action_penalty, final_reward) # final_reward = torch.where(tool_hole_dist<0.1, 1*tool_pos_reward + 3*tool_rot_reward , 3*tool_pos_reward + 1*tool_rot_reward) # final_reward = 2*tool_surf_pos_reward + 2*tool_rot_reward + 0*finger_rot_reward - 0.001*action_penalty # final_reward = torch.where(tool_surf_pos_reward<0.1, 2*tool_pos_reward + 2*tool_rot_reward + 0*finger_rot_reward + 2*tool_pegging_reward-0.001*action_penalty, final_reward) final_reward = 3.5*tool_XY_pos_reward + 1.48*tool_roll_pitch_reward- 0.001*action_penalty + 2.0*tool_pegging_reward final_reward = torch.where((self.rot_error)<0.08, final_reward+0.5, final_reward) final_reward = torch.where((self.rot_error)>0.2, final_reward-1, final_reward) final_reward = torch.where(tool_hole_Z_dist>0.15, final_reward-1, final_reward) final_reward = torch.where(tool_hole_Z_dist<0.05, final_reward+0.1, final_reward) final_reward = torch.where(tool_hole_XY_dist<0.05, final_reward+0.5, final_reward) final_reward = torch.where(tool_hole_XY_dist>0.1, final_reward-10, final_reward) final_reward = torch.where(norm_finger_vel>0.15, final_reward-1, final_reward) # amplify different sub-rewards w.r.t. conditions # final_reward = torch.where(tool_hole_XY_dist>=0.005, final_reward + 2*tool_XY_pos_reward, final_reward) # tool-hole XY position # final_reward = torch.where(tool_rot_error > 0.05, final_reward + 2*tool_rot_reward, final_reward) # tool rotation position # final_reward = torch.where(torch.logical_and(tool_hole_XY_dist<0.05, tool_rot_error<0.05), final_reward + 10*tool_pegging_reward+2*tool_rot_reward, final_reward) # tool-hole Z position # final_reward = torch.where(torch.logical_and(tool_hole_surf_dist<0.05, tool_rot_error<0.06), # 10*tool_pos_reward + 5*tool_rot_reward + 2*tool_pegging_reward- 0.001*action_penalty, # final_reward) # tool-hole Z position # extra bonus/penalty cases: # final_reward = torch.where(tool_hole_XY_dist<=0.01, final_reward+0.1, final_reward) # tool-hole XY position bonus # final_reward = torch.where(tool_rot_error <0.1, final_reward+0.01, final_reward) # final_reward = torch.where(tool_hole_XY_dist <0.005, final_reward+0.01, final_reward) # final_reward = torch.where(tool_hole_Z_dist <0.1, final_reward+0.02, final_reward) # final_reward = 10*tool_pos_reward + 4*tool_rot_reward # final_reward = torch.where(tool_hole_XY_dist>0.1, 5.0*tool_pos_reward + 1.0*tool_rot_reward, 1.0*tool_pos_reward + 5.0*tool_rot_reward) # final_reward = torch.where(tool_rot_error<0.1, final_reward+2*tool_pos_reward, final_reward) # final_reward = torch.where(tool_hole_XY_dist<0.05, final_reward+5*tool_rot_reward, final_reward) # final_reward = torch.where(tool_rot_error <0.1, final_reward+0.2, final_reward) # final_reward = torch.where(tool_hole_XY_dist <0.1, final_reward+0.5, final_reward) # final_reward = torch.where(torch.logical_and(tool_hole_Z_dist <0.15, tool_hole_XY_dist <0.1), final_reward+1, final_reward) # final_reward = torch.where(torch.logical_and(tool_hole_XY_dist<=0.005, tool_hole_Z_dist<=0.005), final_reward+10000, final_reward) # task complete final_reward = torch.where(tool_target_dist<0.01, final_reward+100, final_reward) # task complete final_reward = torch.where(torch.isnan(final_reward), torch.zeros_like(final_reward), final_reward) # task complete # trigger to determine if job is done self.is_pegged = torch.where(tool_target_dist<0.01, torch.ones_like(final_reward), torch.zeros_like(final_reward)) # task complete self.rew_buf[:] = final_reward # print("hole_Z_pos", hole_pos[:2]) # print("tool_Z_pos", tool_pos[:2]) # print("tool_hole_XY_dist", tool_hole_XY_dist) # print("tool_hole_Z_dist", tool_hole_Z_dist) # print("tool_target_dist", tool_target_dist) # print("hole_surf_pos", hole_surf_pos) # print("norm_finger_vel", norm_finger_vel) # print("tool_rot", tool_rot) # print("tool_rot_error", self.rot_error ) # print("tool_ref_rot", tool_ref_rot) # print("hole_rot", hole_rot) # print("finger_rot", finger_rot) # finger_rot_ref: 0.0325, -0.3824, 0.9233, -0.0135 # 0.0 0.92388 0.3826 0 # hole_pos tensor([[ 1.5000, 0.0000, 0.3800], [-1.5000, 0.0000, 0.3800]], device='cuda:0') # tool_hole_Z_dist tensor([0.0820, 0.0789], device='cuda:0') # tool_rot_error tensor([0.0629, 0.0621], device='cuda:0') # tool_hole_XY_dist tensor([0.0012, 0.0037], device='cuda:0') # tool_rot_error tensor([0.7979, 0.7810, 0.7889, 0.7811], device='cuda:0') # tool_hole_XY_dist tensor([0.0536, 0.0585, 0.0378, 0.0451], device='cuda:0') # tool_hole_Z_dist tensor([0.0343, 0.0353, 0.0368, 0.0350], device='cuda:0') # tool_hole_dist tensor([0.0636, 0.0683, 0.0528, 0.0571], device='cuda:0') self.episode_sums["tool_hole_XY_dist"] += tool_hole_XY_dist self.episode_sums["tool_hole_Z_dist"] += tool_hole_Z_dist self.episode_sums["tool_hole_dist"] += tool_hole_dist self.episode_sums["tool_rot_error"] += tool_roll_error+tool_pitch_error # self.episode_sums["tool_X_pos"] += tool_pos[:,0] # self.episode_sums["tool_Y_pos"] += tool_pos[:,1] # self.episode_sums["tool_Z_pos"] += tool_pos[:,2] # self.episode_sums["tool_rot"] += tool_rot self.episode_sums["peg_rate"] += self.is_pegged self.episode_sums["norm_finger_vel"] += norm_finger_vel self.episode_sums["rewards"] += final_reward def is_done(self) -> None: if not self.is_test: # reset if tool is pegged in hole # self.reset_buf = torch.where(self.is_pegged==1, torch.ones_like(self.reset_buf), self.reset_buf) # reset if tool is below the table and not pegged in hole # self.reset_buf = torch.where(self.tool_pos[:,2] < 0.3, torch.ones_like(self.reset_buf), self.reset_buf) # # self.reset_buf = torch.where(torch.logical_and(self.tool_pos[:,2] < 0.43, self.rot_error>1.5), torch.ones_like(self.reset_buf), self.reset_buf) # reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) # self.reset_buf = torch.where(self.is_pegged==1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Door_Open.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.door_open.door import Door from Models.door_open.door_view import DoorView from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Usd, UsdGeom class FrankaDoorOpenTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 28 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"door_yaw_deg": torch_zeros(), "grasp_handle_dist": torch_zeros(), "handle_yaw_deg": torch_zeros(), "handle_pos_error": torch_zeros(), "open_rate": torch_zeros(), "rewards": torch_zeros(), "handle_yaw_error": torch_zeros()} return def set_up_scene(self, scene) -> None: franka_translation = torch.tensor([0.5, 0.0, 0.0]) self.get_franka(franka_translation) self.get_door() super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add door self._door = DoorView(prim_paths_expr="/World/envs/.*/door/door", name="door_view") scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._door) scene.add(self._door._handle) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_door(self): door = Door(prim_path=self.default_zero_env_path + "/door", name="door") self._sim_config.apply_articulation_settings("door", get_prim_at_path(door.prim_path), self._sim_config.parse_actor_config("door")) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) # finger pos finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) # franka grasp local pose grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) # XXX assume to be the local pos of the handle door_local_handle_pose = torch.tensor([-0.1, -0.23, 0.81, 1.0, 0.0, 0.0, 0.0], device=self._device) self.door_local_handle_pos = door_local_handle_pose[0:3].repeat((self._num_envs, 1)) self.door_local_handle_rot = door_local_handle_pose[3:7].repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.door_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.door_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.franka_default_dof_pos = torch.tensor( [1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: # Franka hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) self.door_pos, self.door_rot = self._door.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos self.door_dof_pos = self._door.get_joint_positions(clone=False) self.door_dor_vel = self._door.get_joint_velocities(clone=False) self.franka_grasp_rot, self.franka_grasp_pos, self.door_handle_rot, self.door_handle_pos = self.compute_grasp_transforms( hand_rot, hand_pos, self.franka_local_grasp_rot, self.franka_local_grasp_pos, self.door_rot, self.door_pos, self.door_local_handle_rot, self.door_local_handle_pos, ) self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) # handle self.handle_pos, self.handle_rot = self._door._handle.get_world_poses(clone=False) self.handle_pos[:,1] = self.handle_pos[:,1] - 0.3 # fix hand-point y-axis error # position error: from franka grasp to door handle grasp_handle_pos_error = self.handle_pos - self.franka_grasp_pos # grasp_handle_pos_error = self.handle_pos - (self.franka_lfinger_pos + self.franka_rfinger_pos)/2 dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, self.handle_pos, self.handle_rot, grasp_handle_pos_error, # self.handle_pos, # self.handle_rot, # self.location_ball_pos # self.cabinet_dof_pos[:, 3].unsqueeze(-1), # self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def compute_grasp_transforms( self, hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos, door_rot, door_pos, door_local_handle_rot, door_local_handle_pos, ): global_franka_rot, global_franka_pos = tf_combine( hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos ) global_door_rot, global_door_pos = tf_combine( door_rot, door_pos, door_local_handle_rot, door_local_handle_pos ) return global_franka_rot, global_franka_pos, global_door_rot, global_door_pos def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) if not self.is_test: # reset door: only 1 joint # reset door positions: x: [-0.1,0.1], y:[-0.4,0.4] self.new_door_pos = self.default_door_pos.clone().detach() self.new_door_pos[:,0] = self.default_door_pos[:,0] + (0.05 + 0.05) * torch.rand(self._num_envs, device=self._device) -0.05 self.new_door_pos[:,1] = self.default_door_pos[:,1] + (0.1 + 0.1) * torch.rand(self._num_envs, device=self._device) -0.1 self._door.set_world_poses(self.new_door_pos[env_ids], self.default_door_rot[env_ids], indices = indices) else: self.new_door_pos = self.default_door_pos.clone().detach() self.new_door_pos[:,0] = self.default_door_pos[:,0] + self.initial_test_value[0] self.new_door_pos[:,1] = self.default_door_pos[:,1] + self.initial_test_value[1] self._door.set_world_poses(self.new_door_pos[env_ids], self.default_door_rot[env_ids], indices = indices) # reset door joints door_pos = torch.zeros((num_indices, 1), device=self._device) door_vel = torch.zeros((num_indices, 1), device=self._device) self._door.set_joint_positions(door_pos, indices=indices) self._door.set_joint_velocities(door_vel, indices=indices) self._door.set_joint_position_targets(self.door_dof_targets[env_ids], indices=indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): if key == "open_rate": self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) else: self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length self.episode_sums[key][env_ids] = 0.0 def post_reset(self): # Franka self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # Door self.door_dof_targets = torch.zeros( (self._num_envs, 1), dtype=torch.float, device=self._device ) self.default_door_pos, self.default_door_rot = self._door.get_world_poses() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # info extraction # env num_envs = self._num_envs # Franka joint_positions = self.franka_dof_pos gripper_forward_axis = self.gripper_forward_axis gripper_up_axis = self.gripper_up_axis franka_grasp_pos, franka_grasp_rot = self.franka_grasp_pos, self.franka_grasp_rot franka_lfinger_pos, franka_lfinger_rot = self.franka_lfinger_pos, self.franka_lfinger_rot franka_rfinger_pos, franka_rfinger_rot = self.franka_rfinger_pos, self.franka_rfinger_rot actions = self.actions finger_pos = (franka_lfinger_pos + franka_rfinger_pos)/2 finger_rot = (franka_lfinger_pos + franka_rfinger_pos)/2 # door door_inward_axis = self.door_inward_axis door_up_axis = self.door_up_axis door_dof_pos = self.door_dof_pos door_pos, door_rot = self.door_pos, self.door_rot # handle handle_pos, handle_rot = self.handle_pos, self.handle_rot # handle_pos[:,1] = handle_pos[:,1] - 0.3 # fix hand-point y-axis error handle_local_pos, handle_local_rot = self._door._handle.get_local_poses() # preprocessing # distance from grasp to handle grasp_handle_dist = torch.norm(finger_pos - handle_pos, p=2, dim=-1) # distance of each finger to the handle along Z-axis lfinger_Z_dist = torch.abs(franka_lfinger_pos[:, 2] - handle_pos[:, 2]) rfinger_Z_dist = torch.abs(franka_rfinger_pos[:, 2] - handle_pos[:, 2]) # how far the door has been opened out # quaternions to euler angles door_yaw = torch.atan2(2.0*(door_rot[:,0]*door_rot[:,3] + door_rot[:,1]*door_rot[:,2]), 1.0-2.0*(door_rot[:,2]*door_rot[:,2]+door_rot[:,3]*door_rot[:,3])) handle_yaw = torch.atan2(2.0*(handle_rot[:,0]*handle_rot[:,3] + handle_rot[:,1]*handle_rot[:,2]), 1.0-2.0*(handle_rot[:,2]*handle_rot[:,2]+handle_rot[:,3]*handle_rot[:,3])) door_ref_yaw = torch.deg2rad(torch.tensor([60], device=self._device)) door_yaw_error = torch.abs(door_ref_yaw - handle_yaw) self.door_yaw_error = door_yaw_error.clone().detach() # handle destination if opened handle_ref_pos = handle_pos.clone().detach() # target_open_deg = door_ref_yaw*torch.ones((num_envs,1), device=self._device) # open the door by 60 degrees # target_open_rad = math.radians(60) handle_ref_pos[:,0] = handle_ref_pos[:,0]*torch.cos(door_ref_yaw) + handle_ref_pos[:,1]*torch.sin(door_ref_yaw) handle_ref_pos[:,1] = -handle_ref_pos[:,0]*torch.sin(door_ref_yaw) + handle_ref_pos[:,1]*torch.cos(door_ref_yaw) self.handle_pos_error = torch.norm(handle_ref_pos - handle_pos, p=2, dim=-1) # gripper direction alignment axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis) axis2 = tf_vector(handle_rot, door_inward_axis) axis3 = tf_vector(franka_grasp_rot, gripper_up_axis) axis4 = tf_vector(handle_rot, door_up_axis) dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper # reward functions # 1st rewards: distance from hand to the drawer grasp_dist_reward = 1.0 / (1.0 + grasp_handle_dist ** 2) grasp_dist_reward *= grasp_dist_reward grasp_dist_reward = torch.where(grasp_handle_dist <= 0.02, grasp_dist_reward * 2, grasp_dist_reward) # 2nd reward for matching the orientation of the hand to the drawer (fingers wrapped) rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2) # 3rd reward: bonus if left finger is above the drawer handle and right below around_handle_reward = torch.zeros_like(rot_reward) around_handle_reward = torch.where(self.franka_lfinger_pos[:, 2] > handle_pos[:, 2], torch.where(self.franka_rfinger_pos[:, 2] < handle_pos[:, 2], around_handle_reward + 0.5, around_handle_reward), around_handle_reward) # 4th reward: distance of each finger from the handle finger_dist_reward = torch.zeros_like(rot_reward) finger_dist_reward = torch.where(franka_lfinger_pos[:, 2] > handle_pos[:, 2], torch.where(franka_rfinger_pos[:, 2] < handle_pos[:, 2], (0.04 - lfinger_Z_dist) + (0.04 - rfinger_Z_dist), finger_dist_reward), finger_dist_reward) # 5th reward: finger closeness finger_close_reward = torch.zeros_like(rot_reward) finger_close_reward = torch.where(grasp_handle_dist <=0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward) # 6th reward: how far the door has been opened out # instead of using rotation, may use pos as reference open_reward = (1.0 / (1.0 + door_yaw_error ** 2)) * around_handle_reward + handle_yaw # open_reward = (1.0 / (1.0 + self.handle_pos_error)) * around_handle_reward # 1st penalty action_penalty = torch.sum(actions ** 2, dim=-1) final_reward = 2.0 * grasp_dist_reward + 0.5 * rot_reward + 10.0 * around_handle_reward + 70.0 * open_reward + \ 100.0 * finger_dist_reward+ 10.0 * finger_close_reward - 0.01 * action_penalty # bonus for opening door properly final_reward = torch.where(door_yaw_error < 0.7, final_reward + 0.5, final_reward) final_reward = torch.where(door_yaw_error < 0.5, final_reward + around_handle_reward, final_reward) final_reward = torch.where(door_yaw_error < 0.2, final_reward + (2.0 * around_handle_reward), final_reward) # in case: Nan value occur final_reward = torch.where(torch.isnan(final_reward), torch.zeros_like(final_reward), final_reward) self.rew_buf[:] = final_reward # self.rew_buf[:] = torch.rand(self._num_envs) # if the door is opened to ref position -> task complete self.is_opened = torch.where(torch.rad2deg(handle_yaw)>=70, torch.ones_like(final_reward), torch.zeros_like(final_reward)) self.episode_sums["door_yaw_deg"] += torch.rad2deg(door_yaw) self.episode_sums["handle_yaw_deg"] += torch.rad2deg(handle_yaw) self.episode_sums["handle_pos_error"] += self.handle_pos_error self.episode_sums["handle_yaw_error"] += door_yaw_error self.episode_sums["grasp_handle_dist"] += grasp_handle_dist self.episode_sums["open_rate"] += self.is_opened self.episode_sums["rewards"] += final_reward # print("handle_pos", handle_pos) # print("handle_rot", handle_rot) # print("door_pos", door_pos) # print("door_rot", door_rot) # print("handle_local_pos", handle_local_pos) # print("handle_local_rot", handle_local_rot) # print("grasp_handle_dist", grasp_handle_dist) # print("door_yaw", door_yaw) def is_done(self) -> None: if not self.is_test: # reset if door is fully opened # self.reset_buf = torch.where(self.is_opened==1, torch.ones_like(self.reset_buf), self.reset_buf) # reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) # self.reset_buf = torch.where(self.is_opened==1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Ball_Pushing.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.ball_pushing.table import Table from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Usd, UsdGeom class FrankaBallPushingTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.ball_radius = self._task_cfg["env"]["ballRadius"] self.ball_initial_position = self._task_cfg["env"]["ballInitialPosition"] self.ball_initial_orientation = self._task_cfg["env"]["ballInitialOrientation"] # self.ball_initial_position[0] = (0.1 + 0.1) * np.random.rand(1) -0.1 # self.ball_initial_position[1] = (0.2 + 0.2) * np.random.rand(1) -0.2 # initial_x = (0.1 + 0.1) * torch.rand(self._num_envs) -0.1 # initial_y = (0.2 + 0.2) * torch.rand(self._num_envs) -0.2 self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 30 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"success_rate": torch_zeros(), "ball_hole_XY_dist": torch_zeros()} return def set_up_scene(self, scene) -> None: franka_translation = torch.tensor([0.6, 0.0, 0.0]) self.get_franka(franka_translation) self.get_table() self.get_ball() super().set_up_scene(scene) self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add ball self._ball = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view", reset_xform_properties=False) # Add location_ball self._location_ball = RigidPrimView(prim_paths_expr="/World/envs/.*/table/table/location_ball", name="location_ball_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._ball) scene.add(self._location_ball) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_table(self): table = Table(prim_path=self.default_zero_env_path + "/table", name="table") self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table")) def get_ball(self): ball = DynamicSphere( name = 'ball', position=self.ball_initial_position, orientation=self.ball_initial_orientation, prim_path=self.default_zero_env_path + "/ball", radius=self.ball_radius, color=np.array([1, 0, 0]), density = 100 ) self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball")) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.franka_default_dof_pos = torch.tensor( [1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos # self.franka_grasp_rot, self.franka_grasp_pos, self.drawer_grasp_rot, self.drawer_grasp_pos = self.compute_grasp_transforms( # hand_rot, # hand_pos, # self.franka_local_grasp_rot, # self.franka_local_grasp_pos, # drawer_rot, # drawer_pos, # self.drawer_local_grasp_rot, # self.drawer_local_grasp_pos, # ) self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) # Ball self.ball_pos, self.ball_rot = self._ball.get_world_poses(clone=False) self.ball_vel = self._ball.get_velocities() # hole-location ball self.location_ball_pos, self.location_ball_rot = self._location_ball.get_world_poses(clone=False) to_target = self.location_ball_pos - self.ball_pos dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, self.ball_vel, to_target, self.ball_pos, # self.location_ball_pos # self.cabinet_dof_pos[:, 3].unsqueeze(-1), # self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self.franka_dof_targets[:,7] = 0.015 self.franka_dof_targets[:,8] = 0.015 self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = tensor_clamp( self.franka_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # reset ball # if not test, randomize ball initial positions for training if not self.is_test: # reset ball position: x [-0.1, 0.1], y [-0.1,0.1] self.new_ball_pos = self.default_ball_pos.clone().detach() self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + (0.1 + 0.1) * torch.rand(self._num_envs, device=self._device) -0.1 self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + (0.1 + 0.1) * torch.rand(self._num_envs, device=self._device) -0.1 self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices) self._ball.set_velocities(self.default_ball_velocity[env_ids], indices = indices) # if is test mode, set the ball to given position (1 environment) else: self.new_ball_pos = self.default_ball_pos.clone().detach() self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + self.initial_test_value[0] self.new_ball_pos[:,1] = self.default_ball_pos[:,1] +self.initial_test_value[1] self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices) self._ball.set_velocities(self.default_ball_velocity[env_ids], indices = indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): if key == "success_rate": self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) else: self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])/self._max_episode_length self.episode_sums[key][env_ids] = 0. def post_reset(self): self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # Ball self.default_ball_pos, self.default_ball_rot = self._ball.get_world_poses() self.default_ball_velocity = self._ball.get_velocities() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # get objects positions and orientations joint_positions = self.franka_dof_pos # franka dofs pos num_envs = self._num_envs # num of sim env finger_pos = (self.franka_lfinger_pos + self.franka_lfinger_pos)/2 # franka finger pos (lfinger+rfinger)/2 self.finger_pos = finger_pos gripper_forward_axis = self.gripper_forward_axis gripper_up_axis = self.gripper_up_axis # franka_grasp_pos = self.franka_grasp_pos # franka_grasp_rot = self.franka_grasp_rot # ball_grasp_pos = self.ball_grasp_pos # ball_grasp_rot = self.ball_grasp_rot # ball_inward_axis = self.ball_inward_axis # ball_up_axis = self.ball_up_axis # franka_dof_pos = self.franka_dof_pos ball_init_pos = self.default_ball_pos ball_pos = self.ball_pos # ball pos # ball_rot = self.ball_rot # ball rot # ball_vel = self._ball.get_velocities() # ball velocity # table_pos = self.table_pos # table pos # table_rot = self.table_rot # table rot hole_pos = self.location_ball_pos # locate hole pos # hole_pos[:,1] = hole_pos[:,1] - 0.8 # Y-axis # hole_pos[:,2] = hole_pos[:,2] + 0.44 # Z-axis # 1st reward: distance from ball to hole ball_hole_dist = torch.norm(hole_pos - ball_pos, p=2, dim=-1) ball_hole_XY_dist = torch.norm(hole_pos[:,0:2] - ball_pos[:,0:2], p=2, dim=-1) # dist_reward = 1.0 / (1.0 + ball_hole_dist ** 2) # dist_reward *= 2*dist_reward # dist_reward = torch.where(ball_hole_dist <= 0.05, dist_reward+10, dist_reward) # ball_hole_dist = torch.norm(hole_pos - ball_pos, p=2, dim=-1) # dist_reward = 1.0/(1.0+ball_hole_dist**2) dist_reward = 1-torch.tanh(3*ball_hole_XY_dist) # regulize the dist_reward in [0,1] # dist_reward = -(ball_hole_XY_dist)**2 # 2nd reward: distance from finger to ball # finger_ball_dist = torch.norm(finger_pos - ball_pos, p=2, dim=-1) ball_to_init_dist = torch.norm(ball_pos[:,0:2] - ball_init_pos[:,0:2], p=2, dim=-1) self.ball_to_init_dist = ball_to_init_dist finger_ball_dist = torch.norm(finger_pos - ball_pos, p=2, dim=-1) finger_ball_reward = 1.0/(1.0+finger_ball_dist**2) # 1st penalty: regularization on the actions (summed for each environment) action_penalty = torch.sum(self.actions ** 2, dim=-1) action_penalty = 1-torch.tanh(action_penalty/2.5) # 5th penalty if ball is not moved ball_unmove_penalty = torch.zeros_like(dist_reward) ball_unmove_penalty = torch.where(ball_to_init_dist<0.3, torch.tanh(15*(0.3-ball_to_init_dist)), ball_unmove_penalty) falling_bonus = torch.where(torch.logical_and(ball_hole_XY_dist < 0.1 , ball_pos[:,2]<0.38), torch.ones_like(dist_reward), torch.zeros_like(dist_reward)) falling_penalty = torch.zeros_like(dist_reward) falling_penalty = torch.where(torch.logical_and(ball_hole_XY_dist > 0.001 , ball_pos[:,2]<0.38), falling_penalty+10, falling_penalty) # falling_penalty = torch.where(ball_hole_XY_dist<0.2, falling_penalty-100, falling_penalty) # dist_reward = torch.where(ball_hole_XY_dist<0.3, 1-torch.tanh(10*ball_hole_XY_dist), dist_reward) # dist_reward = torch.where(ball_to_init_dist>0.01, dist_reward, dist_reward*0) dist_reward = torch.where(ball_pos[:,0]<hole_pos[:,0], torch.zeros_like(dist_reward), dist_reward) dist_penalty = torch.tanh(3*ball_hole_XY_dist) final_reward = 10.0*dist_reward - 0.0*ball_unmove_penalty + 100.0*falling_bonus - 0.0*action_penalty \ - 0.0*falling_penalty + 0.0*finger_ball_reward - 0.0*dist_penalty # final_reward = torch.where(finger_pos[:,2] < (ball_pos[:,2]), final_reward-0.5, final_reward) # final_reward = torch.where(torch.logical_and(finger_ball_dist > 0, ball_to_init_dist<0.05), final_reward-0.5, final_reward) # final_reward = torch.where(ball_hole_XY_dist>0.2, final_reward-1, final_reward) self.is_complete = torch.where(torch.logical_and(ball_hole_XY_dist < 0.01 , ball_pos[:,2]<0.38), torch.ones_like(final_reward), torch.zeros_like(final_reward)) # task complete # final_reward = torch.where(ball_hole_XY_dist < 0.6, final_reward+3.0*dist_reward, final_reward) self.rew_buf[:] = final_reward self.episode_sums["success_rate"] += self.is_complete self.episode_sums["ball_hole_XY_dist"] += ball_hole_XY_dist def is_done(self) -> None: if not self.is_test: # reset if ball falls from the edge or in hole self.reset_buf = torch.where(self.ball_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf) # self.reset_buf = torch.where(self.is_complete==1, torch.ones_like(self.reset_buf), self.reset_buf) # reset if franka grasp is below the ball and ball is not moved # self.reset_buf = torch.where(self.finger_pos[:, 2] < 0.2, torch.ones_like(self.reset_buf), self.reset_buf) # self.reset_buf = torch.where(torch.logical_and(self.finger_pos[:, 2] < 0.3, self.ball_to_init_dist < 0.1), torch.ones_like(self.reset_buf), self.reset_buf) # reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Point_Reaching.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.point_reaching.target_ball import TargetBall from omni.isaac.core.objects import DynamicCuboid from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Gf, Usd, UsdGeom class FrankaPointReachingTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 27 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"success_rate": torch_zeros()} return def set_up_scene(self, scene) -> None: # Franka franka_translation = torch.tensor([0.3, 0.0, 0.0]) self.get_franka(franka_translation) self.get_target_ball() # Here the env is cloned super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add location_ball self._target_ball = RigidPrimView(prim_paths_expr="/World/envs/.*/target_ball/target_ball/ball_mesh", name="target_ball_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._target_ball) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_target_ball(self): target_ball = TargetBall(prim_path=self.default_zero_env_path + "/target_ball", name="target_ball") self._sim_config.apply_articulation_settings("target_ball", get_prim_at_path(target_ball.prim_path), self._sim_config.parse_actor_config("target_ball")) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) # finger pos finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) # franka grasp local pose grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.franka_default_dof_pos = torch.tensor( [0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.01, 0.01], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: # Franka hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) self.franka_dof_pos = franka_dof_pos self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) finger_center = (self.franka_lfinger_pos + self.franka_rfinger_pos)/2 dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) # target ball target_ball_pos, target_ball_rot = self._target_ball.get_world_poses(clone=False) # tool position to_target = finger_center - target_ball_pos self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, target_ball_pos, finger_center, to_target, ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7] self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8] self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0), #+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) if not self.is_test: # reset target cube # reset target cube position within an area: x [-0.2, 0.2], y [-0.4,0.4], z [-0.2,0.2] self.new_cube_pos = self.default_target_ball_pos.clone().detach() self.new_cube_pos[:,0] = self.default_target_ball_pos[:,0] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2 self.new_cube_pos[:,1] = self.default_target_ball_pos[:,1] + (0.4 + 0.4) * torch.rand(self._num_envs, device=self._device) -0.4 self.new_cube_pos[:,2] = self.default_target_ball_pos[:,2] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2 self._target_ball.set_world_poses(self.new_cube_pos[env_ids], self.default_target_ball_rot[env_ids], indices = indices) self._target_ball.set_velocities(self.default_target_ball_velocity[env_ids], indices = indices) # if is test mode else: self.new_cube_pos = self.default_target_ball_pos.clone().detach() self.new_cube_pos[:,0] = self.default_target_ball_pos[:,0] + self.initial_test_value[0] self.new_cube_pos[:,1] = self.default_target_ball_pos[:,1] + self.initial_test_value[1] self.new_cube_pos[:,2] = self.default_target_ball_pos[:,2] + self.initial_test_value[2] self._target_ball.set_world_poses(self.new_cube_pos[env_ids], self.default_target_ball_rot[env_ids], indices = indices) self._target_ball.set_velocities(self.default_target_ball_velocity[env_ids], indices = indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 # fill extras self.extras["episode"] = {} for key in self.episode_sums.keys(): if key == "success_rate": self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) else: self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length self.episode_sums[key][env_ids] = 0 def post_reset(self): # Franka self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # Target cube self.default_target_ball_pos, self.default_target_ball_rot = self._target_ball.get_world_poses() self.default_target_ball_velocity = self._target_ball.get_velocities() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: # Reward info self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses() self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses() finger_center = (self.franka_lfinger_pos + self.franka_rfinger_pos)/2 lfinger_vel = self._frankas._lfingers.get_velocities() rfinger_vel = self._frankas._lfingers.get_velocities() finger_vel = (lfinger_vel + rfinger_vel)/2 finger_vel_norm = torch.norm(finger_vel, p=2, dim=-1) target_ball_pos, target_ball_rot = self._target_ball.get_world_poses() # distance ball_center_dist = torch.norm(target_ball_pos - finger_center, p=2, dim=-1) center_dist_reward = 1.0/(1.0+ball_center_dist) # velocity finger_vel_reward = 1.0/(1.0+finger_vel_norm) # is complete is_complete = torch.where( torch.logical_and(ball_center_dist<0.03, finger_vel_norm<0.02), torch.ones_like(finger_vel_norm), torch.zeros_like(finger_vel_norm)) final_reward = 1.0*center_dist_reward + 10.0*is_complete + 0.1*finger_vel_reward self.rew_buf[:] = final_reward self.episode_sums["success_rate"] += is_complete def is_done(self) -> None: if not self.is_test: # reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Cloth_Placing.py
from omniisaacgymenvs.tasks.base.rl_task import RLTask from Models.Franka.Franka import Franka from Models.Franka.Franka_view import FrankaView from Models.cloth_placing.target_table import TargetTable from omni.isaac.core.prims import ParticleSystem, ClothPrim, ClothPrimView from omni.isaac.core.materials import ParticleMaterial from omni.physx.scripts import physicsUtils, particleUtils, deformableUtils from omni.isaac.core.objects import DynamicCuboid from omni.isaac.core.prims import RigidPrim, RigidPrimView from omni.isaac.core.utils.prims import get_prim_at_path from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage from omni.isaac.core.utils.torch.transformations import * from omni.isaac.core.utils.torch.rotations import * from omni.isaac.cloner import Cloner import numpy as np import torch import math from pxr import Gf, Usd, UsdGeom class FrankaClothPlacingTask(RLTask): def __init__( self, name, sim_config, env, offset=None ) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self.action_scale = self._task_cfg["env"]["actionScale"] self.start_position_noise = self._task_cfg["env"]["startPositionNoise"] self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"] self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"] self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"] self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"] self.open_reward_scale = self._task_cfg["env"]["openRewardScale"] self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"] self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"] self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"] self.distX_offset = 0.04 self.dt = 1/60. self._num_observations = 27 self._num_actions = 9 # Flag for testing self.is_test = False self.initial_test_value = None self.is_action_noise = False RLTask.__init__(self, name, env) # Extra info for TensorBoard self.extras = {} torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False) self.episode_sums = {"center_dist": torch_zeros()} return def set_up_scene(self, scene) -> None: # Franka franka_translation = torch.tensor([0.3, 0.0, 0.0]) self.get_franka(franka_translation) self.get_table() # Here the env is cloned (cannot clone particle systems right now) super().set_up_scene(scene) # Add Franka self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view") # Add bin self._target_table = RigidPrimView(prim_paths_expr="/World/envs/.*/target_table/target_table/mesh", name="target_table_view", reset_xform_properties=False) # Add location_ball self._location_cube = RigidPrimView(prim_paths_expr="/World/envs/.*/target_table/target_table/location_cube", name="location_cube_view", reset_xform_properties=False) scene.add(self._frankas) scene.add(self._frankas._hands) scene.add(self._frankas._lfingers) scene.add(self._frankas._rfingers) scene.add(self._location_cube) scene.add(self._target_table) # generate cloth near franka franka_positions = self._frankas.get_world_poses()[0] self.initialize_cloth(franka_positions) # Create a view to deal with all the cloths self._cloths = ClothPrimView(prim_paths_expr="/World/Env*/cloth", name="cloth_view") self._scene.add(self._cloths) self.init_data() return def get_franka(self, translation): franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation, use_modified_collision = True) self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka")) def get_table(self): target_table = TargetTable(prim_path=self.default_zero_env_path + "/target_table", name="target_table") self._sim_config.apply_articulation_settings("target_table", get_prim_at_path(target_table.prim_path), self._sim_config.parse_actor_config("target_table")) # Set as testing mode def set_as_test(self): self.is_test = True # Set action noise def set_action_noise(self): self.is_action_noise = True # Set initial test values for testing mode def set_initial_test_value(self, value): # for ball pushing: initial x,y positions of the ball self.initial_test_value = value def init_data(self) -> None: def get_env_local_pose(env_pos, xformable, device): """Compute pose in env-local coordinates""" world_transform = xformable.ComputeLocalToWorldTransform(0) world_pos = world_transform.ExtractTranslation() world_quat = world_transform.ExtractRotationQuat() px = world_pos[0] - env_pos[0] py = world_pos[1] - env_pos[1] pz = world_pos[2] - env_pos[2] qx = world_quat.imaginary[0] qy = world_quat.imaginary[1] qz = world_quat.imaginary[2] qw = world_quat.real return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float) stage = get_current_stage() hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device) lfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device ) rfinger_pose = get_env_local_pose( self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device ) # finger pos finger_pose = torch.zeros(7, device=self._device) finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0 finger_pose[3:7] = lfinger_pose[3:7] hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3])) # franka grasp local pose grasp_pose_axis = 1 franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3]) franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device) self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1)) self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1)) self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1)) self.franka_default_dof_pos = torch.tensor( [0.0, 0.0, 0.0, -1.0, 0.0, 1.0, 0.5, 0.0001, 0.0001], device=self._device ) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) def get_observations(self) -> dict: # Franka hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False) franka_dof_pos = self._frankas.get_joint_positions(clone=False) franka_dof_pos = torch.nan_to_num(franka_dof_pos) franka_dof_vel = self._frankas.get_joint_velocities(clone=False) franka_dof_vel = torch.nan_to_num(franka_dof_vel) self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) dof_pos_scaled = ( 2.0 * (franka_dof_pos - self.franka_dof_lower_limits) / (self.franka_dof_upper_limits - self.franka_dof_lower_limits) - 1.0 ) # Cloth self.cloths_pos = self._cloths.get_world_positions(clone=False) self.cloths_pos = torch.nan_to_num(self.cloths_pos) # shape (M,121,3) # cloths_pos_flat = torch.flatten(self.cloths_pos, start_dim=1) # shape (M,121,3) cloth_mean_x = torch.mean(self.cloths_pos[:,:,0], dim=1).reshape(self.num_envs, 1) cloth_mean_y = torch.mean(self.cloths_pos[:,:,1], dim=1).reshape(self.num_envs, 1) cloth_mean_z = torch.mean(self.cloths_pos[:,:,2], dim=1).reshape(self.num_envs, 1) self.cloths_pos_mean = torch.cat((cloth_mean_x, cloth_mean_y, cloth_mean_z),1) # location cube self.location_cube_pos, self.location_cube_rot = self._location_cube.get_world_poses(clone=False) self.location_cube_pos = torch.nan_to_num(self.location_cube_pos) to_target = self.cloths_pos_mean - self.location_cube_pos self.obs_buf = torch.cat( ( dof_pos_scaled, franka_dof_vel * self.dof_vel_scale, # cloths_pos_flat, self.cloths_pos_mean, to_target, self.location_cube_pos, # self.handle_rot, # self.location_ball_pos # self.cabinet_dof_pos[:, 3].unsqueeze(-1), # self.cabinet_dof_vel[:, 3].unsqueeze(-1), ), dim=-1, ) observations = { self._frankas.name: { "obs_buf": self.obs_buf } } return observations def pre_physics_step(self, actions) -> None: if not self._env._world.is_playing(): return reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) # if action noise if self.is_action_noise is True: # Gaussian white noise with 0.01 variance self.actions = self.actions + (0.5)*torch.randn_like(self.actions) targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits) env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device) # Release condition location_cube_pos, location_cube_rot = self._location_cube.get_world_poses() location_cube_pos = torch.nan_to_num(location_cube_pos) cloths_pos = self._cloths.get_world_positions() cloths_pos = torch.nan_to_num(cloths_pos) cloth_mean_x = torch.mean(cloths_pos[:,:,0], dim=1).reshape(self.num_envs, 1) cloth_mean_y = torch.mean(cloths_pos[:,:,1], dim=1).reshape(self.num_envs, 1) cloth_mean_z = torch.mean(cloths_pos[:,:,2], dim=1).reshape(self.num_envs, 1) cloths_pos_mean = torch.cat((cloth_mean_x, cloth_mean_y, cloth_mean_z),1) center_dist = torch.norm(location_cube_pos[:,0:2] - cloths_pos_mean[:,0:2], p=2, dim=-1) cloth_vel = self._cloths.get_velocities() cloth_vel = torch.nan_to_num(cloth_vel) cloth_vel_x = torch.mean(cloth_vel[:,:,0], dim=1).reshape(self.num_envs, 1) cloth_vel_y = torch.mean(cloth_vel[:,:,1], dim=1).reshape(self.num_envs, 1) cloth_vel_z = torch.mean(cloth_vel[:,:,2], dim=1).reshape(self.num_envs, 1) cloths_vel_mean = torch.cat((cloth_vel_x, cloth_vel_y, cloth_vel_z),1) vel = torch.norm(cloths_vel_mean, p=2, dim=-1) release_condition = torch.logical_and(center_dist<0.07, cloths_pos_mean[:,2] > location_cube_pos[:,2]) release_condition = torch.logical_and(release_condition, vel<0.1) self.franka_dof_targets[:,7] = torch.where(release_condition, 0.15, self.franka_dof_targets[:,7]) self.franka_dof_targets[:,8] = torch.where(release_condition, 0.15, self.franka_dof_targets[:,8]) self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids): indices = env_ids.to(dtype=torch.int32) num_indices = len(indices) # reset franka pos = torch.clamp( self.franka_default_dof_pos.unsqueeze(0), #+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5), self.franka_dof_lower_limits, self.franka_dof_upper_limits, ) dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device) dof_pos[:, :] = pos self.franka_dof_targets[env_ids, :] = pos self.franka_dof_pos[env_ids, :] = pos self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices) self._frankas.set_joint_positions(dof_pos, indices=indices) self._frankas.set_joint_velocities(dof_vel, indices=indices) # Reset cloth self._cloths.set_world_positions(self.default_cloth_pos, indices=indices) self._cloths.set_velocities(self.default_cloth_vel, indices=indices) if not self.is_test: # Reset cloth bin # reset positions: x: [-0.1,0.2], y:[-0.35,0.35] random_x = (0.2 + 0.1) * torch.rand(self._num_envs, device=self._device) - 0.1 random_y = (0.35 + 0.35) * torch.rand(self._num_envs, device=self._device) - 0.35 self.new_location_cube_pos = self.default_target_table_pos.clone().detach() self.new_location_cube_pos[:,0] = self.default_target_table_pos[:,0] + random_x self.new_location_cube_pos[:,1] = self.default_target_table_pos[:,1] + random_y self._target_table.set_world_poses(self.new_location_cube_pos[env_ids], self.default_target_table_rot[env_ids], indices = indices) self._target_table.set_velocities(self.default_target_table_velocity[env_ids], indices = indices) else: random_x = self.initial_test_value[0] random_y = self.initial_test_value[1] self.new_location_cube_pos = self.default_target_table_pos.clone().detach() self.new_location_cube_pos[:,0] = self.default_target_table_pos[:,0] + random_x self.new_location_cube_pos[:,1] = self.default_target_table_pos[:,1] + random_y self._target_table.set_world_poses(self.new_location_cube_pos[env_ids], self.default_target_table_rot[env_ids], indices = indices) self._target_table.set_velocities(self.default_target_table_velocity[env_ids], indices = indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): # Franka self.num_franka_dofs = self._frankas.num_dof self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device) dof_limits = self._frankas.get_dof_limits() self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits) self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1 self.franka_dof_targets = torch.zeros( (self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device ) # Cloth self.default_cloth_pos = self._cloths.get_world_positions() self.default_cloth_vel = torch.zeros([self._num_envs, self._cloths.max_particles_per_cloth, 3], device=self._device) # Target table self.default_target_table_pos, self.default_target_table_rot = self._target_table.get_world_poses() self.default_target_table_velocity = self._target_table.get_velocities() # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def initialize_cloth(self, franka_positions): stage = get_current_stage() # parameters dimx = 10 dimy = 10 scale = 0.3 for i in range(self._num_envs): # Note here: cannot put into the same envs (env/env_i) due to unknown bugs env_path = "/World/Env" + str(i) env = UsdGeom.Xform.Define(stage, env_path) # set up the geometry cloth_path = env.GetPrim().GetPath().AppendChild("cloth") plane_mesh = UsdGeom.Mesh.Define(stage, cloth_path) tri_points, tri_indices = deformableUtils.create_triangle_mesh_square(dimx=dimx, dimy=dimy, scale=scale) initial_positions = torch.zeros((self.num_envs, len(tri_points), 3)) plane_mesh.GetPointsAttr().Set(tri_points) plane_mesh.GetFaceVertexIndicesAttr().Set(tri_indices) plane_mesh.GetFaceVertexCountsAttr().Set([3] * (len(tri_indices) // 3)) # initial locations of the cloth franka_positions_np = franka_positions.detach().to('cpu').numpy() init_loc = Gf.Vec3f(float(franka_positions_np[i][0] - 0.5), float(franka_positions_np[i][1] ), float(franka_positions_np[i][2] + 0.65)) physicsUtils.setup_transform_as_scale_orient_translate(plane_mesh) physicsUtils.set_or_add_translate_op(plane_mesh, init_loc) physicsUtils.set_or_add_orient_op(plane_mesh, Gf.Rotation(Gf.Vec3d([1, 0, 0]), 90).GetQuat()) initial_positions[i] = torch.tensor(init_loc) + torch.tensor(plane_mesh.GetPointsAttr().Get()) particle_system_path = env.GetPrim().GetPath().AppendChild("particleSystem") particle_material_path = env.GetPrim().GetPath().AppendChild("particleMaterial") particle_material = ParticleMaterial( prim_path=particle_material_path, drag=0.1, lift=0.3, friction=10.0 ) # parameters for the properties of the cloth # radius = 0.005 radius = 0.5 * (scale / dimx) # size rest offset according to plane resolution and width so that particles are just touching at rest restOffset = radius contactOffset = restOffset * 1.5 particle_system = ParticleSystem( prim_path=particle_system_path, simulation_owner=self._env._world.get_physics_context().prim_path, rest_offset=restOffset, contact_offset=contactOffset, solid_rest_offset=restOffset, fluid_rest_offset=restOffset, particle_contact_offset=contactOffset, ) # note that no particle material is applied to the particle system at this point. # this can be done manually via self.particle_system.apply_particle_material(self.particle_material) # or to pass the material to the clothPrim which binds it internally to the particle system stretch_stiffness = 100000.0 bend_stiffness = 100.0 shear_stiffness = 100.0 spring_damping = 0.1 particle_mass = 0.005 cloth = ClothPrim( name="clothPrim" + str(i), prim_path=str(cloth_path), particle_system=particle_system, particle_material=particle_material, stretch_stiffness=stretch_stiffness, bend_stiffness=bend_stiffness, shear_stiffness=shear_stiffness, spring_damping=spring_damping, particle_mass = particle_mass, self_collision=True, self_collision_filter=True, ) self._scene.add(cloth) def calculate_metrics(self) -> None: # center_dist = torch.norm(self.location_cube_pos - self.cloths_pos_mean, p=2, dim=-1) location_cube_pos = self.location_cube_pos center_dist = torch.norm(location_cube_pos - self.cloths_pos_mean, p=2, dim=-1) center_dist_reward = 1.0/(1.0+center_dist) # finger reward # franka_lfinger_pos = torch.nan_to_num(self.franka_lfinger_pos) # franka_rfinger_pos = torch.nan_to_num(self.franka_rfinger_pos) # finger_center = (franka_lfinger_pos + franka_rfinger_pos)/2 # target = self.location_cube_pos # target[:,2] = target[:,2] + 0.3 # finger_dist = torch.norm(finger_center - target, p=2, dim=-1) # finger_dist_reward = 1.0/(1.0+finger_dist) lfinger_vel = torch.nan_to_num(self._frankas._lfingers.get_velocities()) rfinger_vel = torch.nan_to_num(self._frankas._rfingers.get_velocities()) finger_vel = (lfinger_vel + rfinger_vel)/2 finger_vel_norm = torch.norm(finger_vel, p=2, dim=-1) finger_vel_reward = 1.0/(1.0+finger_vel_norm) # finger rotation franka_lfinger_rot = torch.nan_to_num(self.franka_lfinger_rot) franka_rfinger_rot = torch.nan_to_num(self.franka_rfinger_rot) mean_rot = (franka_lfinger_rot + franka_rfinger_rot)/2 rot_target = torch.zeros_like(franka_lfinger_rot) rot_target[:,2] = 1 rot_distance = torch.norm(mean_rot - rot_target, p=2, dim=-1) rot_distance_reward = 1.0/(1.0+rot_distance) # cloth velocities cloth_vel = self._cloths.get_velocities() cloth_vel = torch.nan_to_num(cloth_vel) cloth_vel_x = torch.mean(cloth_vel[:,:,0], dim=1).reshape(self.num_envs, 1) cloth_vel_y = torch.mean(cloth_vel[:,:,1], dim=1).reshape(self.num_envs, 1) cloth_vel_z = torch.mean(cloth_vel[:,:,2], dim=1).reshape(self.num_envs, 1) cloths_vel_mean = torch.cat((cloth_vel_x, cloth_vel_y, cloth_vel_z),1) vel = torch.norm(cloths_vel_mean, p=2, dim=-1) vel_reward = 1.0/(1.0+vel) # stay alive live_reward = torch.where(self.cloths_pos_mean[:,2] > 0.3, torch.ones_like(self.cloths_pos_mean[:,2]), torch.zeros_like(self.cloths_pos_mean[:,2])) # franka velocities # franka_dof_vel = self._frankas.get_joint_velocities() # franka_dof_vel = torch.nan_to_num(franka_dof_vel) # dof_vel_mean = torch.norm(franka_dof_vel, p=2, dim=-1) # dof_vel_reward = 1.0/(1.0+dof_vel_mean) # is complete is_complete = torch.where(torch.logical_and(center_dist < 0.05, vel<0.1), torch.ones_like(center_dist), torch.zeros_like(center_dist)) # if torch.any(torch.isnan(self.cloths_pos_mean)): # print("NAN", self.cloths_pos_mean) # x_dist = torch.abs(self.location_cube_pos[:,0] - self.cloths_pos_mean[:,0]) # x_dist_reward = 1.0/(1.0+x_dist) # y_dist = torch.abs(self.location_cube_pos[:,1] - self.cloths_pos_mean[:,1]) # y_dist_reward = 1.0/(1.0+y_dist) # z_dist = torch.abs(self.location_cube_pos[:,2] - self.cloths_pos_mean[:,2]) # z_dist_reward = 1.0/(1.0+z_dist) final_reward = 7.0*center_dist_reward + 10.0*is_complete + 1.0*rot_distance_reward + 1.0*live_reward \ + 1.0*vel_reward + 1.0*finger_vel_reward # TO BE IMPLEMENTED self.rew_buf[:] = final_reward # log additional info self.episode_sums["center_dist"] += center_dist # self.episode_sums["y_dist"] += y_dist # self.episode_sums["z_dist"] += z_dist def is_done(self) -> None: if not self.is_test: cloths_pos_z = self.cloths_pos_mean[:,2] center_dist = torch.norm(self.location_cube_pos- self.cloths_pos_mean, p=2, dim=-1) # if cloth falls to the ground self.reset_buf = torch.where( (cloths_pos_z < 0.1), torch.ones_like(self.reset_buf), self.reset_buf) # if error in franka positions franka_dof_pos = self._frankas.get_joint_positions() is_pos_nan = torch.isnan(franka_dof_pos) is_pos_fault = torch.any(is_pos_nan,1) self.reset_buf = torch.where( is_pos_fault == True, torch.ones_like(self.reset_buf), self.reset_buf) franka_dof_vel = self._frankas.get_joint_velocities() is_vel_nan = torch.isnan(franka_dof_vel) is_vel_fault = torch.any(is_vel_nan,1) self.reset_buf = torch.where( is_vel_fault == True, torch.ones_like(self.reset_buf), self.reset_buf) # or complete the task # reset if max length reached self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) else: self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/utils/task_util.py
def initialize_task(config, env, init_sim=True): from Tasks.Franka_Door_Open import FrankaDoorOpenTask from Tasks.Franka_Cloth_Placing import FrankaClothPlacingTask from Tasks.Franka_Cube_Stacking import FrankaCubeStackingTask from Tasks.Franka_Ball_Pushing import FrankaBallPushingTask from Tasks.Franka_Ball_Balancing import FrankaBallBalancingTask from Tasks.Franka_Ball_Catching import FrankaBallCatchingTask from Tasks.Franka_Peg_In_Hole import FrankaPegInHoleTask from Tasks.Franka_Point_Reaching import FrankaPointReachingTask # Mappings from strings to environments task_map = { "FrankaDoorOpen": FrankaDoorOpenTask, "FrankaBallPushing": FrankaBallPushingTask, "FrankaBallBalancing": FrankaBallBalancingTask, "FrankaBallCatching": FrankaBallCatchingTask, "FrankaPegInHole": FrankaPegInHoleTask, "FrankaClothPlacing": FrankaClothPlacingTask, "FrankaCubeStacking": FrankaCubeStackingTask, "FrankaPointReaching": FrankaPointReachingTask, } from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig sim_config = SimConfig(config) cfg = sim_config.config task = task_map[cfg["task_name"]]( name=cfg["task_name"], sim_config=sim_config, env=env ) env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim) return task
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Final_Policy/README.md
This folder contains all the final policies after training. Used for evaluating the learning performance.
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/manipulator_testing.py
from model.skrl_oige_model import skrl_oige_model from monitor.stl_dense_offline import stl_dense_offline_monitor from optimizer.optimizer import Optimizer import os if __name__ == "__main__": # Task choice: PointReaching, PegInHole, DoorOpen, # BallBalancing, BallPushing, BallCatching # CubeStacking, ClothPlacing task_name = "FrankaBallBalancing" # agent agent_type = "PPO" # TRPO, PPO omniisaacgymenvs_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "../../Gym_Envs") ) agent_path = ( omniisaacgymenvs_path + "/Final_Policy/BallBalancing/BallBalancing_skrl_" + agent_type + "/checkpoints/best_agent.pt" ) # config simulation_max_steps = 100 num_envs = 1 opt_types = [ # "random", "NelderMead", # "DualAnnealing", ] # random, NelderMead, DualAnnealing global_budget = 4 local_budget = 300 # Load model under test (drl agent + oige env) test_model = skrl_oige_model( agent_path=agent_path, agent_type=agent_type, task_name=task_name, num_envs=num_envs, timesteps=simulation_max_steps, ) for opt_type in opt_types: # Load STL monitor based on task monitor = stl_dense_offline_monitor(task_name=task_name, agent_type=agent_type) # global search for i in range(global_budget): print("Global trial: " + str(i)) # Create optimizer optimizer = Optimizer( task_name, test_model, monitor, opt_type=opt_type, budget_size=local_budget, ) # local search results = optimizer.optimize() print(results) # close simulation environment test_model.close_env()
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/model/skrl_oige_model.py
import os import torch from typing import Optional from .load_oige import load_oige_test_env from .agent.PPO_agent import create_skrl_ppo_agent from .agent.TRPO_agent import create_skrl_trpo_agent from skrl.envs.torch import wrap_env class skrl_oige_model(object): """Testing environment model based on SKRL and Omniverse Isaac Gym Environments (OIGE) agent_path: the path to the agent parameters (checkpoint) oige_path: path to the OIGE environment; agent_type: type of DRL agent (PPO, DDPG, TRPO) task_name: the name of the task num_envs: the number of parallel running environments headless: if show the GUI """ def __init__( self, agent_path: str, oige_path: Optional[str] = None, agent_type: Optional[str] = None, task_name: Optional[str] = None, timesteps: Optional[int] = 10000, num_envs: Optional[int] = 1, headless: Optional[bool] = False, ): # setup if oige_path is not None: self.oige_path = oige_path else: self.oige_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "../../../Gym_Envs") ) if agent_type is not None: self.agent_type = agent_type else: self.agent_type = "PPO" if task_name is not None: self.task_name = task_name else: self.task_name = "FrankaBallPushing" self.agent_path = agent_path self.timesteps = timesteps self.headless = headless # Load OIGE env with skrl wrapper self.num_envs = num_envs # for testing, we use only 1 env for now env = load_oige_test_env( task_name=self.task_name, omniisaacgymenvs_path=self.oige_path, num_envs=self.num_envs, ) self.env = wrap_env(env) self.env._env.set_as_test() # Load agent if self.agent_type is "PPO": self.agent = create_skrl_ppo_agent(self.env, self.agent_path) elif self.agent_type is "TRPO": self.agent = create_skrl_trpo_agent(self.env, self.agent_path) else: raise ValueError("Agent type unknown.") # Initialize agent # cfg_trainer = {"timesteps": self.timesteps, "headless": self.headless} self.agent.init() if self.num_envs == 1: self.agent.set_running_mode("eval") else: raise ValueError("Currently only one environment (agent) is supported") # close env def close_env(self): self.env.close() # Compute the trace w.r.t a given initial condition def compute_trace(self, initial_value): # set initial configuration self.env._env.set_initial_test_value(initial_value) # reset env states, infos = self.env.reset() # initialize trace trace = states # simulation loop for timestep in range(self.timesteps): # compute actions with torch.no_grad(): actions = self.agent.act( states, timestep=timestep, timesteps=self.timesteps )[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # record trace states.copy_(next_states) trace = torch.vstack([trace, states]) # terminate simulation with torch.no_grad(): if terminated.any() or truncated.any(): break return trace # Merge trace based on the task type def merge_trace(self, trace): if self.task_name is "FrankaBallPushing": # Ball hole distance ball_hole_distance = trace[:, 24:27].detach().cpu() ball_hole_distance = torch.norm(ball_hole_distance, p=2, dim=-1) # create index trace_length = list(ball_hole_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_trace = torch.vstack((times, ball_hole_distance)) indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist() elif self.task_name is "FrankaBallBalancing": # Ball tool distance ball_tool_distance = trace[:, 21:23].detach().cpu() ball_tool_distance = torch.norm(ball_tool_distance, p=2, dim=-1) # create index trace_length = list(ball_tool_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_trace = torch.vstack((times, ball_tool_distance)) indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist() elif self.task_name is "FrankaBallCatching": # Ball tool distance ball_tool_distance = trace[:, 21:23].detach().cpu() ball_tool_distance = torch.norm(ball_tool_distance, p=2, dim=-1) # create index trace_length = list(ball_tool_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_trace = torch.vstack((times, ball_tool_distance)) indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist() elif self.task_name is "FrankaCubeStacking": # Cube distance cube_distance = trace[:, 25:27].detach().cpu() cube_distance = torch.norm(cube_distance, p=2, dim=-1) # Cube height cube_height_distance = trace[:, 27].detach().cpu() # create index trace_length = list(cube_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_cube_distance = torch.vstack((times, cube_distance)) indexed_cube_distance = torch.transpose( indexed_cube_distance, 0, 1 ).tolist() indexed_cube_height_distance = torch.vstack((times, cube_height_distance)) indexed_cube_height_distance = torch.transpose( indexed_cube_height_distance, 0, 1 ).tolist() indexed_trace = { "distance_cube": indexed_cube_distance, "z_cube_distance": indexed_cube_height_distance, } elif self.task_name is "FrankaDoorOpen": # Ball tool distance handle_rot = trace[:, 21:25].detach().cpu() handle_yaw = torch.atan2( 2.0 * ( handle_rot[:, 0] * handle_rot[:, 3] + handle_rot[:, 1] * handle_rot[:, 2] ), 1.0 - 2.0 * ( handle_rot[:, 2] * handle_rot[:, 2] + handle_rot[:, 3] * handle_rot[:, 3] ), ) handle_yaw = torch.rad2deg(handle_yaw) # create index trace_length = list(handle_yaw.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_trace = torch.vstack((times, handle_yaw)) indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist() elif self.task_name is "FrankaPegInHole": # Ball tool distance tool_hole_distance = trace[:, 25:27].detach().cpu() tool_hole_distance = torch.norm(tool_hole_distance, p=2, dim=-1) # print(tool_hole_distance) # create index trace_length = list(tool_hole_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_trace = torch.vstack((times, tool_hole_distance)) indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist() elif self.task_name is "FrankaPointReaching": # Ball tool distance finger_target_distance = trace[:, 24:27].detach().cpu() finger_target_distance = torch.norm(finger_target_distance, p=2, dim=-1) # create index trace_length = list(finger_target_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_trace = torch.vstack((times, finger_target_distance)) indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist() elif self.task_name is "FrankaClothPlacing": # Cube distance cloth_target_distance = trace[:, 21:24].detach().cpu() cloth_target_distance = torch.norm(cloth_target_distance, p=2, dim=-1) # Cube height cloth_height = trace[:, 20].detach().cpu() # create index trace_length = list(cloth_target_distance.size())[0] times = torch.linspace(1, trace_length, steps=trace_length) # convert to list for computing robustness indexed_distance_cloth_target = torch.vstack((times, cloth_target_distance)) indexed_distance_cloth_target = torch.transpose( indexed_distance_cloth_target, 0, 1 ).tolist() indexed_cloth_height = torch.vstack((times, cloth_height)) indexed_cloth_height = torch.transpose( indexed_cloth_height, 0, 1 ).tolist() indexed_trace = { "distance_cloth_target": indexed_distance_cloth_target, "cloth_height": indexed_cloth_height, } else: raise ValueError("Task name unknown for merging the trace") return indexed_trace
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/model/load_oige.py
""" This is a copy from SKRL's implementation of loading oige environment, with modifications for generating testing oige environment """ import sys import os from contextlib import contextmanager def _omegaconf_to_dict(config) -> dict: """Convert OmegaConf config to dict :param config: The OmegaConf config :type config: OmegaConf.Config :return: The config as dict :rtype: dict """ # return config.to_container(dict) from omegaconf import DictConfig d = {} for k, v in config.items(): d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v return d def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: 0) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(' | ' * indent + " |-- {}: {}".format(key, value)) def load_oige_test_env(task_name: str = "", omniisaacgymenvs_path: str = "", num_envs: int = 1, show_cfg: bool = True, timeout: int = 30): """Load an Omniverse Isaac Gym environment, this is a slight modification of SKRL's implementation :param task_name: The name of the task (default: ""). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param omniisaacgymenvs_path: The path to the ``omniisaacgymenvs`` directory (default: ""). If empty, the path will obtained from omniisaacgymenvs package metadata :type omniisaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: True) :type show_cfg: bool, optional :param timeout: Seconds to wait for data when queue is empty in multi-threaded environment (default: 30) :type timeout: int, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The omniisaacgymenvs package is not installed or the path is wrong :return: Omniverse Isaac Gym environment :rtype: omni.isaac.gym.vec_env.vec_env_base.VecEnvBase or omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT """ import torch from hydra.types import RunMode from hydra._internal.hydra import Hydra from hydra._internal.utils import create_automatic_config_search_path, get_args_parser from omegaconf import OmegaConf from omni.isaac.gym.vec_env import VecEnvBase, TaskStopException import omniisaacgymenvs sys.argv.append("task={}".format(task_name)) sys.argv.append("num_envs={}".format(num_envs)) # get omniisaacgymenvs path from omniisaacgymenvs package metadata if omniisaacgymenvs_path == "": if not hasattr(omniisaacgymenvs, "__path__"): raise RuntimeError("omniisaacgymenvs package is not installed") omniisaacgymenvs_path = list(omniisaacgymenvs.__path__)[0] config_path = os.path.join(omniisaacgymenvs_path, "cfg") # set omegaconf resolvers OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b) OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg) # get hydra config without use @hydra.main config_file = "config" args = get_args_parser().parse_args() search_path = create_automatic_config_search_path(config_file, None, config_path) hydra_object = Hydra.create_main_hydra2(task_name='load_omniisaacgymenv', config_search_path=search_path) config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN) cfg = {} cfg["task"] = _omegaconf_to_dict(config.task) cfg["task_name"] = config.task_name cfg["experiment"] = config.experiment cfg["num_envs"] = config.num_envs cfg["seed"] = config.seed cfg["torch_deterministic"] = config.torch_deterministic cfg["max_iterations"] = config.max_iterations cfg["physics_engine"] = config.physics_engine cfg["pipeline"] = config.pipeline cfg["sim_device"] = config.sim_device cfg["device_id"] = config.device_id cfg["rl_device"] = config.rl_device cfg["num_threads"] = config.num_threads cfg["solver_type"] = config.solver_type cfg["test"] = config.test cfg["checkpoint"] = config.checkpoint cfg["headless"] = config.headless # print config if show_cfg: print("\nOmniverse Isaac Gym environment ({})".format(config.task.name)) _print_cfg(cfg) # internal classes class _OmniIsaacGymVecEnv(VecEnvBase): def step(self, actions): actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone() self._task.pre_physics_step(actions) for _ in range(self._task.control_frequency_inv): self._world.step(render=self._render) self.sim_frame_count += 1 observations, rewards, dones, info = self._task.post_physics_step() return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \ rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy() def set_as_test(self): self._task.set_as_test() def set_initial_test_value(self, value): self._task.set_initial_test_value(value) def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] # load environment sys.path.append(omniisaacgymenvs_path) from utils.task_util import initialize_task env = _OmniIsaacGymVecEnv(headless=config.headless) task = initialize_task(cfg, env, init_sim=True) return env
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/model/agent/TRPO_agent.py
""" Create PPO agent based on SKRL implementation """ import torch.nn as nn import torch from skrl.models.torch import Model, GaussianMixin, DeterministicMixin from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler # Define the models (stochastic and deterministic models) for the agent using mixins. # - Policy: takes as input the environment's observation/state and returns an action # - Value: takes the state as input and provides a value to guide the policy class Policy_2_Layers(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Policy_3_Layers(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Value_2_Layers(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Value_3_Layers(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # Create SKRL PPO agent def create_skrl_trpo_agent(env, agent_path): device = env.device models_trpo_2_layer = {} models_trpo_2_layer["policy"] = Policy_2_Layers(env.observation_space, env.action_space, device) models_trpo_2_layer["value"] = Value_2_Layers(env.observation_space, env.action_space, device) models_trpo_3_layer = {} models_trpo_3_layer["policy"] = Policy_3_Layers(env.observation_space, env.action_space, device) models_trpo_3_layer["value"] = Value_3_Layers(env.observation_space, env.action_space, device) # Configs cfg_trpo = TRPO_DEFAULT_CONFIG.copy() cfg_trpo["state_preprocessor"] = RunningStandardScaler cfg_trpo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg_trpo["value_preprocessor"] = RunningStandardScaler cfg_trpo["value_preprocessor_kwargs"] = {"size": 1, "device": device} # no log to TensorBoard and write checkpoints cfg_trpo["experiment"]["write_interval"] = 0 cfg_trpo["experiment"]["checkpoint_interval"] = 0 try: # Initialize and load agent with 2 layers agent = TRPO(models=models_trpo_2_layer, memory=None, cfg=cfg_trpo, observation_space=env.observation_space, action_space=env.action_space, device=device) agent.load(agent_path) except: # Initialize and load agent with 3 layers agent = TRPO(models=models_trpo_3_layer, memory=None, cfg=cfg_trpo, observation_space=env.observation_space, action_space=env.action_space, device=device) agent.load(agent_path) return agent
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/model/agent/PPO_agent.py
""" Create PPO agent based on SKRL implementation """ import torch.nn as nn import torch from skrl.models.torch import Model, GaussianMixin, DeterministicMixin from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler # Define the shared model (stochastic and deterministic models) for the agent using mixins. class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU()) self.mean_layer = nn.Linear(128, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(128, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # Create SKRL PPO agent def create_skrl_ppo_agent(env, agent_path): device = env.device models_ppo = {} models_ppo["policy"] = Shared(env.observation_space, env.action_space, device) models_ppo["value"] = models_ppo["policy"] # same instance: shared model # Configs cfg_ppo = PPO_DEFAULT_CONFIG.copy() cfg_ppo["state_preprocessor"] = RunningStandardScaler cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg_ppo["value_preprocessor"] = RunningStandardScaler cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard each 100 timesteps and ignore checkpoints cfg_ppo["experiment"]["write_interval"] = 0 cfg_ppo["experiment"]["checkpoint_interval"] = 0 # Initialize and load agent agent = PPO(models=models_ppo, memory=None, cfg=cfg_ppo, observation_space=env.observation_space, action_space=env.action_space, device=device) agent.load(agent_path) return agent
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/monitor/stl_dense_offline.py
from rtamt import STLDenseTimeSpecification from typing import Optional import sys class stl_dense_offline_monitor(object): """STL dense time offline monitor based rtamt agent_path: the path to the agent parameters (checkpoint) oige_path: path to the OIGE environment; task_name: the name of the task """ def __init__( self, task_name: Optional[str] = None, agent_type: Optional[str] = None, oige_path: Optional[str] = None, ): if task_name is not None: self.task_name = task_name else: self.task_name = "FrankaBallPushing" self.agent_type = agent_type self.generate_spec() # generate specification based on task name def generate_spec(self): # Initialization self.spec = STLDenseTimeSpecification() self.spec.name = "STL Dense-time Offline Monitor" ############################################### # Specification according to task # Ball Pushing if self.task_name is "FrankaBallPushing": self.spec.declare_var("distance_ball_hole", "float") self.spec.spec = "eventually[1:299](distance_ball_hole <= 0.3) " # Ball Balancing elif self.task_name is "FrankaBallBalancing": self.spec.declare_var("distance_ball_tool", "float") self.spec.spec = "always[50:200]( distance_ball_tool <= 0.25)" # Ball Catching elif self.task_name is "FrankaBallCatching": self.spec.declare_var("distance_ball_tool", "float") self.spec.spec = "always[50:299]( distance_ball_tool <= 0.1)" # Cube Stacking elif self.task_name is "FrankaCubeStacking": self.spec.declare_var("distance_cube", "float") self.spec.declare_var("z_cube_distance", "float") self.spec.spec = ( "eventually[1:299]((distance_cube<= 0.024) and (z_cube_distance>0) )" ) # Door Open elif self.task_name is "FrankaDoorOpen": self.spec.declare_var("yaw_door", "float") self.spec.spec = "eventually[1:299]( yaw_door >= 20)" # Peg In Hole elif self.task_name is "FrankaPegInHole": self.spec.declare_var("distance_tool_hole", "float") self.spec.spec = "always[250:299]( distance_tool_hole <= 0.1)" # Point Reaching elif self.task_name is "FrankaPointReaching": self.spec.declare_var("distance_finger_target", "float") self.spec.spec = "always[50:299]( distance_finger_target <= 0.12)" # fixed # Cloth Placing elif self.task_name is "FrankaClothPlacing": self.spec.declare_var("distance_cloth_target", "float") self.spec.declare_var("cloth_height", "float") self.spec.spec = "eventually[1:299]( (distance_cloth_target <= 0.25))" # and (cloth_height > 0.1) )" else: raise ValueError("Task name unknown for defining the specification") ################################################ # Load specification try: self.spec.parse() except rtamt.STLParseException as err: print("STL Parse Exception: {}".format(err)) sys.exit() # Compute the robustness given trace def compute_robustness(self, trace): if self.task_name is "FrankaBallPushing": # print(trace) robustness = self.spec.evaluate(["distance_ball_hole", trace]) # print(robustness) elif self.task_name is "FrankaBallBalancing": robustness = self.spec.evaluate(["distance_ball_tool", trace]) elif self.task_name is "FrankaBallCatching": robustness = self.spec.evaluate(["distance_ball_tool", trace]) elif self.task_name is "FrankaCubeStacking": distance_cube = trace["distance_cube"] z_cube_distance = trace["z_cube_distance"] robustness = self.spec.evaluate( ["distance_cube", distance_cube], ["z_cube_distance", z_cube_distance] ) elif self.task_name is "FrankaDoorOpen": robustness = self.spec.evaluate(["yaw_door", trace]) elif self.task_name is "FrankaPegInHole": robustness = self.spec.evaluate(["distance_tool_hole", trace]) elif self.task_name is "FrankaPointReaching": robustness = self.spec.evaluate(["distance_finger_target", trace]) elif self.task_name is "FrankaClothPlacing": distance_cloth_target = trace["distance_cloth_target"] cloth_height = trace["cloth_height"] robustness = self.spec.evaluate( ["distance_cloth_target", distance_cloth_target]#, ["cloth_height", cloth_height] ) else: raise ValueError("Task name unknown for defining the specification") return robustness
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/optimizer/optimizer.py
from typing import Optional import sys import numpy as np import torch import time from scipy.optimize import minimize from scipy.optimize import dual_annealing class Optimizer(object): """Optimizer class for testing task_name: the task name of environment test_model: the model under test monitor: the monitor for the STL specification opt_type: type of the optimizer budget_size: local budget size """ def __init__( self, task_name, test_model, monitor, opt_type: Optional[str] = "random", budget_size: Optional[int] = 1000, ): self.task_name = task_name self.test_model = test_model self.monitor = monitor self.opt_type = opt_type self.budget_size = budget_size self.fal_succ = False self.start_time = time.time() self.fal_time = 0 self.fal_sim = 0 self.worst_rob = 1000 # initial value bounds if self.task_name is "FrankaBallPushing": self.bnds = ((-0.1, 0.1), (-0.1, 0.1)) elif self.task_name is "FrankaBallBalancing": self.bnds = ((-0.15, 0.15), (-0.15, 0.15)) elif self.task_name is "FrankaBallCatching": # self.bnds = ((-0.1, 0.1), (-0.2, 0.2), (1.0, 3.0), (-1.0, 1.0)) self.bnds = ((-0.05, 0.05), (-0.05, 0.05), (1.0, 1.001), (0.0, 0.001)) elif self.task_name is "FrankaCubeStacking": self.bnds = ((-0.2, 0.2), (-0.2, 0.2)) elif self.task_name is "FrankaDoorOpen": self.bnds = ((-0.025, 0.025), (-0.05, 0.05)) elif self.task_name is "FrankaPegInHole": self.bnds = ((-0.1, 0.1), (-0.1, 0.1)) elif self.task_name is "FrankaPointReaching": self.bnds = ((-0.2, 0.2), (-0.4, 0.4), (-0.2, 0.2)) elif self.task_name is "FrankaClothPlacing": self.bnds = ((-0.1, 0.2), (-0.35, 0.35)) else: raise ValueError("Task name unknown for generating the initial values") # generate initial values based on the task type def generate_initial(self): if self.task_name is "FrankaBallPushing": # ball inside an area x:[-0.1,0.1], y:[-0.1,0.1] value_1 = np.random.rand(1) * (0.1 + 0.1) - 0.1 value_2 = np.random.rand(1) * (0.1 + 0.1) - 0.1 initial_value = np.hstack((value_1, value_2)) elif self.task_name is "FrankaBallBalancing": # ball inside an area x:[-0.15,0.15], y:[-0.15,0.15] value_1 = np.random.rand(1) * (0.15 + 0.15) - 0.15 value_2 = np.random.rand(1) * (0.15 + 0.15) - 0.15 initial_value = np.hstack((value_1, value_2)) elif self.task_name is "FrankaBallCatching": # ball inside an area x:[-0.1,0.1], y:[-0.1,0.1] # ball velociry: vx: [1.0,1.5], vy: [0.0,0.2] value_1 = np.random.rand(1) * (0.05 + 0.05) - 0.05 value_2 = np.random.rand(1) * (0.05 + 0.05) - 0.05 value_3 = np.random.rand(1) * (1.0 - 1.0) + 1.0 value_4 = np.random.rand(1) * (0.0 + 0.0) + 0.0 initial_value = np.hstack((value_1, value_2, value_3, value_4)) elif self.task_name is "FrankaCubeStacking": # target cube inside an area x:[-0.2,0.2], y:[-0.2,0.2] value_1 = np.random.rand(1) * (0.2 + 0.2) - 0.2 value_2 = np.random.rand(1) * (0.2 + 0.2) - 0.2 initial_value = np.hstack((value_1, value_2)) elif self.task_name is "FrankaDoorOpen": # target inside an area x:[-0.1,0.1], y:[-0.4,0.4] value_1 = np.random.rand(1) * (0.005 + 0.005) - 0.005 value_2 = np.random.rand(1) * (0.025 + 0.025) - 0.025 initial_value = np.hstack((value_1, value_2)) elif self.task_name is "FrankaPegInHole": # target inside an area x:[-0.2,0.2], y:[-0.2,0.2] value_1 = np.random.rand(1) * (0.1 + 0.1) - 0.1 value_2 = np.random.rand(1) * (0.1 + 0.1) - 0.1 initial_value = np.hstack((value_1, value_2)) elif self.task_name is "FrankaPointReaching": # target inside an area x:[-0.2,0.2], y:[-0.4,0.4], z:[-0.2,0.2] value_1 = np.random.rand(1) * (0.2 + 0.2) - 0.2 value_2 = np.random.rand(1) * (0.4 + 0.4) - 0.4 value_3 = np.random.rand(1) * (0.2 + 0.2) - 0.2 initial_value = np.hstack((value_1, value_2, value_3)) elif self.task_name is "FrankaClothPlacing": # target inside an area x:[-0.1,0.2], y:[-0.35,0.35] value_1 = np.random.rand(1) * (0.2 + 0.1) - 0.1 value_2 = np.random.rand(1) * (0.35 + 0.35) - 0.35 initial_value = np.hstack((value_1, value_2)) else: raise ValueError("Task name unknown for generating the initial values") return initial_value # Generate one function (input: initial values, output: robustness) for testing algorithms def robustness_function(self, initial_value): # print("Initial Value:", initial_value) # Get trace trace = self.test_model.compute_trace(initial_value) indexed_trace = self.test_model.merge_trace(trace) # compute robustness rob_sequence = self.monitor.compute_robustness(indexed_trace) rob_sequence = np.array(rob_sequence) # RTAMT is for monitoring, so for eventually, the robustness computed from the current timepoint to the end # workaround to compute the maximum if ( self.task_name is "FrankaBallPushing" or self.task_name is "FrankaCubeStacking" or self.task_name is "FrankaDoorOpen" or self.task_name is "FrankaPegInHole" or self.task_name is "FrankaClothPlacing" ): min_rob = np.max(rob_sequence[:, 1]) else: min_rob = np.min(rob_sequence[:, 1]) # print("Min Robustness:", min_rob) if min_rob < self.worst_rob: self.worst_rob = min_rob if min_rob < 0 and self.fal_succ == False: self.fal_succ = True self.fal_time = time.time() - self.start_time elif self.fal_succ == False: self.fal_sim += 1 return min_rob # optimization based on the optimizer type def optimize(self): if self.opt_type is "random": results = self.optimize_random() return results elif self.opt_type is "NelderMead": results = self.optimize_nelder_mead() return results elif self.opt_type is "DualAnnealing": results = self.optimize_dual_annealing() return results else: raise ValueError("Optimizer type undefined!") # Random optimization def optimize_random(self): # worst_initial = None # worst_trace = None initial_value_record = None rob_value_record = None # Random optimizer for i in range(self.budget_size): # random initial value initial_value = self.generate_initial() # compute robustness min_rob = self.robustness_function(initial_value) # update record if i == 0: initial_value_record = initial_value rob_value_record = np.array([min_rob]) # worst_initial = initial_value # worst_trace = trace self.worst_rob = min_rob else: initial_value_record = np.vstack((initial_value_record, initial_value)) rob_value_record = np.vstack((rob_value_record, np.array([min_rob]))) if min_rob < self.worst_rob: # worst_initial = initial_value # worst_trace = trace self.worst_rob = min_rob if min_rob < 0: # self.fal_succ = True # self.fal_time = time.time() - self.start_time if i == 0: self.fal_sim = 1 break # results = {'worst_initial': worst_initial, 'worst_rob': worst_rob, # 'initial_value_record': initial_value_record, 'rob_value_record': rob_value_record} if self.fal_succ == False: self.fal_time = time.time() - self.start_time results = [self.fal_succ, self.fal_time, self.fal_sim, self.worst_rob] return results # Nelder Mead optimization def optimize_nelder_mead(self): initial_guess = self.generate_initial() # minimization results = minimize( self.robustness_function, initial_guess, method="Nelder-Mead", bounds=self.bnds, options={"maxfev": self.budget_size, "disp": True}, ) if self.fal_succ == False: self.fal_time = time.time() - self.start_time results = [self.fal_succ, self.fal_time, self.fal_sim, self.worst_rob] return results # Dual Annealing optimization def optimize_dual_annealing(self): # minimization results = dual_annealing( self.robustness_function, bounds=self.bnds, # maxiter=self.budget_size, # global search number maxfun=self.budget_size, # local search number # no_local_search = True, ) if self.fal_succ == False: self.fal_time = time.time() - self.start_time results = [self.fal_succ, self.fal_time, self.fal_sim, self.worst_rob] return results
StanfordVL/OmniGibson/mkdocs.yml
yaml-language-server: $schema=https://squidfunk.github.io/mkdocs-material/schema.json site_name: OmniGibson Documentation repo_name: StanfordVL/OmniGibson repo_url: https://github.com/StanfordVL/OmniGibson theme: name: material logo: assets/OmniGibson_logo.png favicon: assets/OmniGibson_logo.png icon: repo: fontawesome/brands/git-alt features: - navigation.tracking - navigation.tabs - content.code.copy extra: homepage: https://behavior.stanford.edu custom_dir: .overrides # color info palette: # Palette toggle for light mode - media: "(prefers-color-scheme: light)" primary: white accent: indigo scheme: default toggle: icon: material/toggle-switch name: Switch to dark mode # Palette toggle for dark mode - media: "(prefers-color-scheme: dark)" primary: blue grey accent: indigo scheme: slate toggle: icon: material/toggle-switch-off-outline name: Switch to light mode extra_css: - stylesheets/extra.css markdown_extensions: - pymdownx.emoji: emoji_index: !!python/name:material.extensions.emoji.twemoji emoji_generator: !!python/name:materialx.emoji.to_svg - admonition - pymdownx.details - pymdownx.superfences - pymdownx.tabbed: alternate_style: true - pymdownx.highlight: anchor_linenums: true - pymdownx.inlinehilite - pymdownx.snippets: base_path: omnigibson - pymdownx.tasklist: custom_checkbox: true - attr_list - md_in_html # extra plugins plugins: - search - autorefs - mkdocstrings: handlers: python: paths: [omnigibson] # - social - gen-files: scripts: - docs/gen_ref_pages.py - literate-nav: nav_file: SUMMARY.md - section-index - offline nav: - Home: index.md - Getting Started: - Installation: getting_started/installation.md - Quickstart: getting_started/quickstart.md - Examples: getting_started/examples.md - Running on SLURM: getting_started/slurm.md - Modules: - Overview: modules/overview.md - Prim: modules/prim.md - Object: modules/object.md - Object States: modules/object_states.md - Robots: modules/robots.md - Controllers: modules/controllers.md - Sensor: modules/sensor.md - Scene: modules/scene.md - Environment: modules/environment.md - Tutorials: - Demo Collection: tutorials/demo_collection.md - API Reference: reference/* - Miscellaneous: - FAQ: miscellaneous/faq.md - Known Issues & Troubleshooting: miscellaneous/known_issues.md - Contributing: miscellaneous/contributing.md - Changelog: https://github.com/StanfordVL/OmniGibson/releases - Contact Us: miscellaneous/contact.md extra: analytics: provider: google property: G-6L1G6GMR63
StanfordVL/OmniGibson/pyproject.toml
[tool.black] line-length = 120 target-version = ['py27', 'py36', 'py37'] force-exclude = 'omnigibson/(data|external)' [tool.isort] profile = "black" line_length = 120 py_version = 'all' filter_files = true extend_skip_glob = [ 'omnigibson/data/*', 'omnigibson/external/*' ] [tool.pyright] exclude = [ 'omnigibson/data', 'omnigibson/docs', 'omnigibson/docker' ] [tool.pytest.ini_options] testpaths = [ "tests", ] [tool.coverage.run] omit = ["omnigibson/external/*"]
StanfordVL/OmniGibson/requirements-dev.txt
pytest>=6.2.3 pytest-cov>=3.0.0 mkdocs mkdocs-autorefs mkdocs-gen-files mkdocs-material mkdocs-material-extensions mkdocstrings[python] mkdocs-section-index mkdocs-literate-nav pynvml telemoma~=0.1.2
StanfordVL/OmniGibson/setup.py
# read the contents of your README file from os import path from setuptools import find_packages, setup this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, "README.md"), encoding="utf-8") as f: lines = f.readlines() # remove images from README lines = [x for x in lines if ".png" not in x] long_description = "".join(lines) setup( name="omnigibson", version="1.0.0", author="Stanford University", long_description_content_type="text/markdown", long_description=long_description, url="https://github.com/StanfordVL/OmniGibson", zip_safe=False, packages=find_packages(), install_requires=[ "gym~=0.26.2", "numpy~=1.23.5", "scipy~=1.10.1", "GitPython~=3.1.40", "transforms3d~=0.4.1", "networkx~=3.2.1", "PyYAML~=6.0.1", "addict~=2.4.0", "ipython~=8.20.0", "future~=0.18.3", "trimesh~=4.0.8", "h5py~=3.10.0", "cryptography~=41.0.7", "bddl~=3.5.0", "opencv-python~=4.8.1", "nest_asyncio~=1.5.8", "imageio~=2.33.1", "imageio-ffmpeg~=0.4.9", "termcolor~=2.4.0", "progressbar~=2.5", "pymeshlab~=2022.2", "click~=8.1.3", "aenum~=3.1.15", "rtree~=1.2.0", ], tests_require=[], python_requires=">=3", package_data={"": ["omnigibson/global_config.yaml"]}, include_package_data=True, ) # yapf: disable
StanfordVL/OmniGibson/README.md
![splash](./docs/assets/splash.png) # <h1><img height="40" src="./docs/assets/OmniGibson_logo.png" style="float:left;padding-right:10px"> OmniGibson</h1> [![Tests](https://github.com/StanfordVL/OmniGibson/actions/workflows/tests.yml/badge.svg?branch=og-develop&event=push)](https://github.com/StanfordVL/OmniGibson/actions/workflows/tests.yml) [![Docker Image Version (latest by date)](https://img.shields.io/docker/v/stanfordvl/omnigibson?label=docker&sort=semver)](https://hub.docker.com/r/stanfordvl/omnigibson) [![Realtime Speed](https://behavior.stanford.edu/knowledgebase/profile/badge.svg)](https://stanfordvl.github.io/OmniGibson/profiling/) ------- ### Need support? Join our Discord! <a href="https://discord.gg/bccR5vGFEx"><img src="https://discordapp.com/api/guilds/1166422812160966707/widget.png?style=banner3"></a> ------- ### Latest Updates - [03/17/24] **v1.0.0**: First full release with 1,004 pre-sampled tasks, all 50 scenes, and many new objects! [[release notes]](https://github.com/StanfordVL/OmniGibson/releases/tag/v1.0.0) - [08/04/23] **v0.2.0**: More assets! 600 pre-sampled tasks, 7 new scenes, and many new objects 📈 [[release notes]](https://github.com/StanfordVL/OmniGibson/releases/tag/v0.2.0) - [04/10/22] **v0.1.0**: Significantly improved stability, performance, and ease of installation :wrench: [[release notes]](https://github.com/StanfordVL/OmniGibson/releases/tag/v0.1.0) ------- **`OmniGibson`** is a platform for accelerating Embodied AI research built upon NVIDIA's [Omniverse](https://www.nvidia.com/en-us/omniverse/) platform, featuring: * 📸 Photorealistic Visuals and 📐 Physical Realism * 🌊 Fluid and 👕 Soft Body Support * 🏔️ Large-Scale, High-Quality Scenes and 🎾 Objects * 🌡️ Dynamic Kinematic and Semantic Object States * 🤖 Mobile Manipulator Robots with Modular ⚙️ Controllers * 🌎 OpenAI Gym Interface Check out [**`OmniGibson`**'s documentation](https://behavior.stanford.edu/omnigibson/getting_started/installation.html) to get started! ### Citation If you use **`OmniGibson`** or its assets and models, please cite: ``` @inproceedings{ li2022behavior, title={{BEHAVIOR}-1K: A Benchmark for Embodied {AI} with 1,000 Everyday Activities and Realistic Simulation}, author={Chengshu Li and Ruohan Zhang and Josiah Wong and Cem Gokmen and Sanjana Srivastava and Roberto Mart{\'\i}n-Mart{\'\i}n and Chen Wang and Gabrael Levine and Michael Lingelbach and Jiankai Sun and Mona Anvari and Minjune Hwang and Manasi Sharma and Arman Aydin and Dhruva Bansal and Samuel Hunter and Kyu-Young Kim and Alan Lou and Caleb R Matthews and Ivan Villa-Renteria and Jerry Huayang Tang and Claire Tang and Fei Xia and Silvio Savarese and Hyowon Gweon and Karen Liu and Jiajun Wu and Li Fei-Fei}, booktitle={6th Annual Conference on Robot Learning}, year={2022}, url={https://openreview.net/forum?id=_8DoIe8G3t} } ``` ### Profiling Click on the plot to access our profiling page with more examples. [![Profiling](https://behavior.stanford.edu/knowledgebase/profile/plot.png)](https://stanfordvl.github.io/OmniGibson/profiling/)
StanfordVL/OmniGibson/scripts/profiling.css
h1 { display: flex; align-items: center; justify-content: center; padding: 10vh 0 3vh 0; width: 100vw; } h2 { display: flex; align-items: center; justify-content: center; padding: 20vh 0 0 0; } h5 { display: flex; align-items: center; justify-content: center; padding: 1vh 0 9vh 0; font-size: small; font-weight: normal; } .list-group { display: inline-block; position: relative; left: 7vw; width: 13vw; text-align: center; } .tab-content { display: inline-block; width: 60vw; } .row { width: 100vw; } #dl-button { color: #fff; background-color: #3298dc; border-color: transparent; cursor: pointer; text-align: center; width: 10vw; border-radius: 3px; } button:hover { background-color: #2793da; flex: none; } .spacer { flex: auto; } header { margin-bottom: 8px; display: flex; flex-direction: column; justify-content: center; align-items: center; } .header-item { margin-top: 2vh; }
StanfordVL/OmniGibson/scripts/build_docs.sh
#!/usr/bin/env bash # Remove source directory and copy over source files to docs folder rm -rf docs/src mkdir docs/src cp -r omnigibson/* docs/src # Update code source references rm -rf docs/reference python docs/gen_ref_pages.py # Build the docs (written to ./site) mkdocs build
StanfordVL/OmniGibson/scripts/download_datasets.py
""" Helper script to download OmniGibson dataset and assets. """ import os os.environ["OMNIGIBSON_NO_OMNIVERSE"] = "1" from omnigibson.macros import gm from omnigibson.utils.asset_utils import download_og_dataset, download_assets import click def main(): # Only execute if the dataset path or asset path does not exist dataset_exists, assets_exist = os.path.exists(gm.DATASET_PATH), os.path.exists(gm.ASSET_PATH) if not (dataset_exists and assets_exist): # Ask user which dataset to install print(f"OmniGibson will now install data under the following locations:") print(f" dataset (~25GB): {gm.DATASET_PATH}") print(f" assets (~2.5GB): {gm.ASSET_PATH}") print(f"If you want to install data under a different path, please change the DATA_PATH variable in omnigibson/macros.py and rerun scripts/download_dataset.py.") if click.confirm("Do you want to continue?"): # Only download if the dataset path doesn't exist if not dataset_exists: print("Downloading dataset...") download_og_dataset() # Only download if the asset path doesn't exist if not assets_exist: print("Downloading assets...") download_assets() print("\nOmniGibson setup completed!\n") else: print("You chose not to install dataset for now. You can install it later by running python scripts/download_dataset.py.") if __name__ == "__main__": main()
StanfordVL/OmniGibson/scripts/profiling.html
<!DOCTYPE html> <html> <head> <title>OmniGibson Profiling</title> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css"> <link rel="stylesheet" href="profiling.css"> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js"></script> <script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script> <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/Chart.min.js"></script> </head> <body> <header id="header"> <h1>OmniGibson Profiling</h1> <div class="header-item"> <strong class="header-label">Last Update:</strong> <span id="last-update"></span> </div> <div class="header-item"> <strong class="header-label">Repository:</strong> <a id="repository-link" rel="noopener"></a> </div> <br> <div class="header-item"> <button id="dl-button">Download data as JSON</button> </div> </header> <main id="main"> <!-- Basics --> <h2>Baselines</h2> <h5>*Ran with GPU dynamics on</h5> <div class="row"> <div class="col-3"> <div class="list-group list-group-light" id="baseline_tab" role="tablist"> <a class="list-group-item list-group-item-action active px-2 border-0" id="baseline_total_tab" data-toggle="list" href="#baseline_total" role="tab" aria-controls="baseline_total">FPS</a> <a class="list-group-item list-group-item-action px-2 border-0" id="baseline_loading_tab" data-toggle="list" href="#baseline_loading" role="tab" aria-controls="baseline_loading">Loading Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="baseline_omni_tab" data-toggle="list" href="#baseline_omni" role="tab" aria-controls="baseline_omni">Native Omni Step Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="baseline_non_omni_tab" data-toggle="list" href="#baseline_non_omni" role="tab" aria-controls="baseline_non_omni">OmniGibson Step Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="baseline_mem_tab" data-toggle="list" href="#baseline_mem" role="tab" aria-controls="baseline_mem">Memory Usage</a> <a class="list-group-item list-group-item-action px-2 border-0" id="baseline_vram_tab" data-toggle="list" href="#baseline_vram" role="tab" aria-controls="baseline_vram">VRAM Usage</a> </div> </div> <div class="col-9"> <div class="tab-content" id="nav-tabContent"> <div class="tab-pane fade show active" id="baseline_total" role="tabpanel" aria-labelledby="baseline_total_tab"> <canvas id="baseline_total_canvas"></canvas> </div> <div class="tab-pane fade" id="baseline_loading" role="tabpanel" aria-labelledby="baseline_loading_tab"> <canvas id="baseline_loading_canvas"></canvas> </div> <div class="tab-pane fade" id="baseline_omni" role="tabpanel" aria-labelledby="baseline_omni_tab"> <canvas id="baseline_omni_canvas"></canvas> </div> <div class="tab-pane fade" id="baseline_non_omni" role="tabpanel" aria-labelledby="baseline_non_omni_tab"> <canvas id="baseline_non_omni_canvas"></canvas> </div> <div class="tab-pane fade" id="baseline_mem" role="tabpanel" aria-labelledby="baseline_mem_tab"> <canvas id="baseline_mem_canvas"></canvas> </div> <div class="tab-pane fade" id="baseline_vram" role="tabpanel" aria-labelledby="baseline_vram_tab"> <canvas id="baseline_vram_canvas"></canvas> </div> </div> </div> </div> <!-- scenes --> <h2>Scenes</h2> <h5>* Ran with GPU dynamics off</h5> <div class="row"> <div class="col-3"> <div class="list-group list-group-light" id="scene_tab" role="tablist"> <a class="list-group-item list-group-item-action active px-2 border-0" id="scene_total_tab" data-toggle="list" href="#scene_total" role="tab" aria-controls="scene_total">FPS</a> <a class="list-group-item list-group-item-action px-2 border-0" id="scene_loading_tab" data-toggle="list" href="#scene_loading" role="tab" aria-controls="scene_loading">Loading Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="scene_omni_tab" data-toggle="list" href="#scene_omni" role="tab" aria-controls="scene_omni">Native Omni Step Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="scene_non_omni_tab" data-toggle="list" href="#scene_non_omni" role="tab" aria-controls="scene_non_omni">OmniGibson Step Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="scene_mem_tab" data-toggle="list" href="#scene_mem" role="tab" aria-controls="scene_mem">Memory Usage</a> <a class="list-group-item list-group-item-action px-2 border-0" id="scene_vram_tab" data-toggle="list" href="#scene_vram" role="tab" aria-controls="scene_vram">VRAM Usage</a> </div> </div> <div class="col-9"> <div class="tab-content" id="nav-tabContent"> <div class="tab-pane fade show active" id="baseline_total" role="tabpanel" aria-labelledby="scene_total_tab"> <canvas id="scene_total_canvas"></canvas> </div> <div class="tab-pane fade" id="scene_loading" role="tabpanel" aria-labelledby="scene_loading_tab"> <canvas id="scene_loading_canvas"></canvas> </div> <div class="tab-pane fade" id="scene_omni" role="tabpanel" aria-labelledby="scene_omni_tab"> <canvas id="scene_omni_canvas"></canvas> </div> <div class="tab-pane fade" id="scene_non_omni" role="tabpanel" aria-labelledby="scene_non_omni_tab"> <canvas id="scene_non_omni_canvas"></canvas> </div> <div class="tab-pane fade" id="scene_mem" role="tabpanel" aria-labelledby="scene_mem_tab"> <canvas id="scene_mem_canvas"></canvas> </div> <div class="tab-pane fade" id="scene_vram" role="tabpanel" aria-labelledby="scene_vram_tab"> <canvas id="scene_vram_canvas"></canvas> </div> </div> </div> </div> <!-- Non-physics --> <h2>Non-physics Features</h2> <h5>* Ran with GPU dynamics on</h5> <div class="row"> <div class="col-3"> <div class="list-group list-group-light" id="np_tab" role="tablist"> <a class="list-group-item list-group-item-action active px-2 border-0" id="np_total_tab" data-toggle="list" href="#np_total" role="tab" aria-controls="np_total">FPS</a> <a class="list-group-item list-group-item-action px-2 border-0" id="np_loading_tab" data-toggle="list" href="#np_loading" role="tab" aria-controls="np_loading">Loading Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="np_omni_tab" data-toggle="list" href="#np_omni" role="tab" aria-controls="np_omni">Native Omni Step Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="np_non_omni_tab" data-toggle="list" href="#np_non_omni" role="tab" aria-controls="np_non_omni">OmniGibson Step Time</a> <a class="list-group-item list-group-item-action px-2 border-0" id="np_mem_tab" data-toggle="list" href="#np_mem" role="tab" aria-controls="np_mem">Memory Usage</a> <a class="list-group-item list-group-item-action px-2 border-0" id="np_vram_tab" data-toggle="list" href="#np_vram" role="tab" aria-controls="np_vram">VRAM Usage</a> </div> </div> <div class="col-9"> <div class="tab-content" id="nav-tabContent"> <div class="tab-pane fade show active" id="np_total" role="tabpanel" aria-labelledby="np_total_tab"> <canvas id="np_total_canvas"></canvas> </div> <div class="tab-pane fade" id="np_loading" role="tabpanel" aria-labelledby="np_loading_tab"> <canvas id="np_loading_canvas"></canvas> </div> <div class="tab-pane fade" id="np_omni" role="tabpanel" aria-labelledby="np_omni_tab"> <canvas id="np_omni_canvas"></canvas> </div> <div class="tab-pane fade" id="np_non_omni" role="tabpanel" aria-labelledby="np_non_omni_tab"> <canvas id="np_non_omni_canvas"></canvas> </div> <div class="tab-pane fade" id="np_mem" role="tabpanel" aria-labelledby="np_mem_tab"> <canvas id="np_mem_canvas"></canvas> </div> <div class="tab-pane fade" id="np_vram" role="tabpanel" aria-labelledby="np_vram_tab"> <canvas id="np_vram_canvas"></canvas> </div> </div> </div> </div> </main> <footer> <br> <div class="spacer"></div> </footer> <script src="data.js"></script> <script src="profiling.js"></script> </body> </html>
StanfordVL/OmniGibson/scripts/profiling.js
const canvasDict = { 'baseline_total_canvas': ["FPS", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch", "Rs_int, with 3 Fetch"]], 'baseline_loading_canvas': ["Loading time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch", "Rs_int, with 3 Fetch"]], 'baseline_omni_canvas': ["Omni step time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch", "Rs_int, with 3 Fetch"]], 'baseline_non_omni_canvas': ["Non-omni step time", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch", "Rs_int, with 3 Fetch"]], 'baseline_mem_canvas': ["Memory usage", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch", "Rs_int, with 3 Fetch"]], 'baseline_vram_canvas': ["Vram usage", ["Empty scene", "Rs_int", "Rs_int, with 1 Fetch", "Rs_int, with 3 Fetch"]], 'np_total_canvas': ["FPS", ["Empty scene, with 1 Fetch, fluids", "Empty scene, with 1 Fetch, cloth", "Empty scene, with 1 Fetch, macro particles", "Empty scene, with 1 Fetch, cloth, fluids, macro particles"]], 'np_loading_canvas': ["Loading time", ["Empty scene, with 1 Fetch, fluids", "Empty scene, with 1 Fetch, cloth", "Empty scene, with 1 Fetch, macro particles", "Empty scene, with 1 Fetch, cloth, fluids, macro particles"]], 'np_omni_canvas': ["Omni step time", ["Empty scene, with 1 Fetch, fluids", "Empty scene, with 1 Fetch, cloth", "Empty scene, with 1 Fetch, macro particles", "Empty scene, with 1 Fetch, cloth, fluids, macro particles"]], 'np_non_omni_canvas': ["Non-omni step time", ["Empty scene, with 1 Fetch, fluids", "Empty scene, with 1 Fetch, cloth", "Empty scene, with 1 Fetch, macro particles", "Empty scene, with 1 Fetch, cloth, fluids, macro particles"]], 'np_mem_canvas': ["Memory usage", ["Empty scene, with 1 Fetch, fluids", "Empty scene, with 1 Fetch, cloth", "Empty scene, with 1 Fetch, macro particles", "Empty scene, with 1 Fetch, cloth, fluids, macro particles"]], 'np_vram_canvas': ["Vram usage", ["Empty scene, with 1 Fetch, fluids", "Empty scene, with 1 Fetch, cloth", "Empty scene, with 1 Fetch, macro particles", "Empty scene, with 1 Fetch, cloth, fluids, macro particles"]], 'scene_total_canvas': ["FPS", ["Ihlen_0_int, with 1 Fetch", "Pomaria_0_garden, with 1 Fetch", "house_single_floor, with 1 Fetch", "grocery_store_cafe, with 1 Fetch"]], 'scene_loading_canvas': ["Loading time", ["Ihlen_0_int, with 1 Fetch", "Pomaria_0_garden, with 1 Fetch", "house_single_floor, with 1 Fetch", "grocery_store_cafe, with 1 Fetch"]], 'scene_omni_canvas': ["Omni step time", ["Ihlen_0_int, with 1 Fetch", "Pomaria_0_garden, with 1 Fetch", "house_single_floor, with 1 Fetch", "grocery_store_cafe, with 1 Fetch"]], 'scene_non_omni_canvas': ["Non-omni step time", ["Ihlen_0_int, with 1 Fetch", "Pomaria_0_garden, with 1 Fetch", "house_single_floor, with 1 Fetch", "grocery_store_cafe, with 1 Fetch"]], 'scene_mem_canvas': ["Memory usage", ["Ihlen_0_int, with 1 Fetch", "Pomaria_0_garden, with 1 Fetch", "house_single_floor, with 1 Fetch", "grocery_store_cafe, with 1 Fetch"]], 'scene_vram_canvas': ["Vram usage", ["Ihlen_0_int, with 1 Fetch", "Pomaria_0_garden, with 1 Fetch", "house_single_floor, with 1 Fetch", "grocery_store_cafe, with 1 Fetch"]], } $('#baseline_tab a').on('click', function (e) { e.preventDefault() $(this).tab('show') }) $('#np_tab a').on('click', function (e) { e.preventDefault() $(this).tab('show') }) $('#scene_tab a').on('click', function (e) { e.preventDefault() $(this).tab('show') }) function init() { function collectBenchesPerTestCase(entries) { const map = new Map(); for (const entry of entries) { const {commit, date, tool, benches} = entry; for (const bench of benches) { const result = { commit, date, tool, bench }; const title_map = map.get(bench.extra[0]); if (title_map === undefined) { const temp_map = new Map(); temp_map.set(bench.name, [result]); map.set(bench.extra[0], temp_map); } else { const name_map = title_map.get(bench.name); if (name_map === undefined) { title_map.set(bench.name, [result]); } else { name_map.push(result); } } } } return map; } const data = window.BENCHMARK_DATA; // Render header document.getElementById('last-update').textContent = new Date(data.lastUpdate).toString(); const repoLink = document.getElementById('repository-link'); repoLink.href = data.repoUrl; repoLink.textContent = data.repoUrl; // Render footer document.getElementById('dl-button').onclick = () => { const a = document.createElement('a'); a.href = URL.createObjectURL(new Blob([JSON.stringify(data, null, 2)], {type: "application/json"})); a.download = 'OmniGibson Profiling.json'; a.click(); }; // Prepare data points for charts return collectBenchesPerTestCase(data.entries['Benchmark']); } function renderGraph(canvasName, fieldName, runNames) { // get filtered data let filteredData = new Map(Array.from(allData.get(fieldName)).filter(([key, _value]) => { return runNames.includes(key); })); const canvas = document.getElementById(canvasName); const color = ['#178600', '#00add8', '#ffa500', '#ff3838']; const data = { labels: Array.from(filteredData.values())[0].map(value => value.commit.id.slice(0, 7)), datasets: Array.from(filteredData).map(([name, value], index) => { return { label: name, data: value.map(d => ({ 'x': d.commit.id.slice(0, 7), 'y': d.bench.value } )), borderColor: color[index], backgroundColor: 'rgba(0, 0, 0, 0.01)' }; }) }; const options = { tooltips: { callbacks: { afterTitle: items => { const {datasetIndex, index} = items[0]; const data = Array.from(filteredData.values())[datasetIndex][index]; return '\n' + data.commit.message + '\n\n' + data.commit.timestamp + ' committed by @' + data.commit.committer.username + '\n'; }, label: item => { let label = item.value; const { range, unit } = filteredData.values().next().value[item.index].bench; label += ' ' + unit; if (range) { label += ' (' + range + ')'; } return label; }, afterLabel: item => { const { extra } = filteredData.values().next().value[item.index].bench; return extra ? '\n' + extra[0] : ''; } } }, onClick: (_mouseEvent) => { const points = myChart.getElementsAtEventForMode(_mouseEvent, 'nearest', { intersect: true }, true); if (points.length === 0) { return; } const url = Array.from(filteredData.values())[points[0]._datasetIndex][points[0]._index].commit.url; window.open(url, '_blank'); }, title: { display: true, text: fieldName, }, layout: { padding: 0 }, responsive: true, maintainAspectRatio: true }; const myChart = new Chart(canvas, { type: 'line', data, options, }); return myChart; } const allData = init() for (const [canvasName, [fieldName, runNames]] of Object.entries(canvasDict)) { renderGraph(canvasName, fieldName, runNames); }
StanfordVL/OmniGibson/scripts/setup.bat
@echo off :: Make sure that the ISAAC_SIM_PATH variable is set correctly dir /b /o:-n %userprofile%\AppData\Local\ov\pkg\isaac_sim* > NUL if errorlevel 0 ( for /f "tokens=* usebackq" %%f in (`dir /b /o:n %userprofile%\AppData\Local\ov\pkg\isaac_sim*`) do set ISAAC_SIM_PATH=%userprofile%\AppData\Local\ov\pkg\%%f setlocal enabledelayedexpansion echo We found Isaac Sim installed at !ISAAC_SIM_PATH!. OmniGibson will use it by default. endlocal set /p ISAAC_SIM_PATH=If you want to use a different one, please type in the path containing isaac-sim.bat here ^(press enter to skip^) ^>^>^> ) else ( echo We did not find Isaac Sim under %userprofile%\AppData\Local\ov\pkg. echo If you haven't installed Isaac Sim yet, please do so before running this setup script. set /p ISAAC_SIM_PATH=If you have already installed it in a custom location, please type in the path containing isaac-sim.bat here ^>^>^> ) :check_isaac_sim_path if not exist %ISAAC_SIM_PATH%\isaac-sim.bat ( set /p ISAAC_SIM_PATH=isaac-sim.bat not found in %ISAAC_SIM_PATH%! Make sure you have entered the correct path ^>^>^> goto :check_isaac_sim_path ) echo: echo Using Isaac Sim at %ISAAC_SIM_PATH% echo: :: Choose venv name set conda_name=omnigibson echo The new conda environment will be named omnigibson by default. set /p conda_name=If you want to use a different name, please type in here ^(press enter to skip^) ^>^>^> echo: echo Using %conda_name% as the conda environment name echo: :: Get Python version from Isaac Sim FOR /F "tokens=*" %%g IN ('%ISAAC_SIM_PATH%\python.bat -c "import platform; print(platform.python_version())"') do (SET ISAAC_PYTHON_VERSION=%%g) echo Using Python version %ISAAC_PYTHON_VERSION% matching your current Isaac Sim version :: Create a conda environment with the appropriate python version call conda create -y -n %conda_name% python=%ISAAC_PYTHON_VERSION% || goto :error call conda activate %conda_name% || goto :error :: We add some preprocessing information so that the Isaac Sim paths are linked to this environment upon startup :: See https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#macos-and-linux for reference mkdir %CONDA_PREFIX%\etc\conda\activate.d mkdir %CONDA_PREFIX%\etc\conda\deactivate.d type NUL>%CONDA_PREFIX%\etc\conda\activate.d\env_vars.bat type NUL>%CONDA_PREFIX%\etc\conda\deactivate.d\env_vars.bat type NUL>%CONDA_PREFIX%\etc\conda\activate.d\env_vars.ps1 type NUL>%CONDA_PREFIX%\etc\conda\deactivate.d\env_vars.ps1 :: Add support for cmd set CONDA_ACT_FILE_CMD=%CONDA_PREFIX%\etc\conda\activate.d\env_vars.bat echo @echo off>>%CONDA_ACT_FILE_CMD% echo set PYTHONPATH_OLD=%%PYTHONPATH%%>>%CONDA_ACT_FILE_CMD% echo set PYTHONPATH=%%PYTHONPATH%%;%ISAAC_SIM_PATH%\site>>%CONDA_ACT_FILE_CMD% echo set CARB_APP_PATH=%ISAAC_SIM_PATH%\kit>>%CONDA_ACT_FILE_CMD% echo set EXP_PATH=%ISAAC_SIM_PATH%\apps>>%CONDA_ACT_FILE_CMD% echo set ISAAC_PATH=%ISAAC_SIM_PATH%>>%CONDA_ACT_FILE_CMD% set CONDA_DEACT_FILE_CMD=%CONDA_PREFIX%\etc\conda\deactivate.d\env_vars.bat echo @echo off>>%CONDA_DEACT_FILE_CMD% echo set PYTHONPATH=%%PYTHONPATH_OLD%%>>%CONDA_DEACT_FILE_CMD% echo set PYTHONPATH_OLD="">>%CONDA_DEACT_FILE_CMD% :: Add support for powershell set CONDA_ACT_FILE_PWSH=%CONDA_PREFIX%\etc\conda\activate.d\env_vars.ps1 echo $env:PYTHONPATH_OLD="$env:PYTHONPATH">>%CONDA_ACT_FILE_PWSH% echo $env:PYTHONPATH="$env:PYTHONPATH;%ISAAC_SIM_PATH%\site">>%CONDA_ACT_FILE_PWSH% echo $env:CARB_APP_PATH="%ISAAC_SIM_PATH%\kit">>%CONDA_ACT_FILE_PWSH% echo $env:EXP_PATH="%ISAAC_SIM_PATH%\apps">>%CONDA_ACT_FILE_PWSH% echo $env:ISAAC_PATH="%ISAAC_SIM_PATH%">>%CONDA_ACT_FILE_PWSH% set CONDA_DEACT_FILE_PWSH=%CONDA_PREFIX%\etc\conda\deactivate.d\env_vars.ps1 echo $env:PYTHONPATH="$env:PYTHONPATH_OLD">>%CONDA_DEACT_FILE_PWSH% echo $env:PYTHONPATH_OLD="$null">>%CONDA_DEACT_FILE_PWSH% :: Install omnigibson! call pip install -e . || goto :error :: Cycle conda environment so that all dependencies are propagated call conda deactivate || goto :error goto :end :error echo: echo An error occurred during installation. Please check the error message above. echo: exit /b :end echo: echo OmniGibson successfully installed! Please run conda activate %conda_name% to activate the environment. echo:
StanfordVL/OmniGibson/scripts/setup.sh
#!/usr/bin/env bash set -eo &> /dev/null # Make sure that the ISAAC_SIM_PATH variable is set correctly if [[ -d ~/.local/share/ov/pkg ]] && [[ $(ls ~/.local/share/ov/pkg | grep isaac_sim) ]]; then FOUND_ISAAC_SIM_PATH=$(ls -d ~/.local/share/ov/pkg/* | grep isaac_sim | tail -n 1) echo "We found Isaac Sim installed at $FOUND_ISAAC_SIM_PATH. OmniGibson will use it by default." read -p "If you want to use a different one, please type in the path containing isaac-sim.sh here (press enter to skip) >>> " ISAAC_SIM_PATH ISAAC_SIM_PATH=${ISAAC_SIM_PATH:-$FOUND_ISAAC_SIM_PATH} else echo "We did not find Isaac Sim under ~/.local/share/ov/pkg." echo "If you haven't installed Isaac Sim yet, please do so before running this setup script." read -p "If you have already installed it in a custom location, please type in the path containing isaac-sim.sh here >>> " ISAAC_SIM_PATH fi while [[ ! -f "${ISAAC_SIM_PATH}/isaac-sim.sh" ]]; do read -p "isaac-sim.sh not found in $ISAAC_SIM_PATH! Make sure you have entered the correct path >>> " ISAAC_SIM_PATH done echo -e "\nUsing Isaac Sim at $ISAAC_SIM_PATH\n" # Choose venv name echo "The new conda environment will be named omnigibson by default." read -p "If you want to use a different name, please type in here (press enter to skip) >>> " conda_name conda_name=${conda_name:-omnigibson} echo -e "\nUsing $conda_name as the conda environment name\n" # Get Python version from Isaac Sim ISAAC_PYTHON_VERSION=$(${ISAAC_SIM_PATH}/python.sh -c "import platform; print(platform.python_version())") ISAAC_PYTHON_VERSION="${ISAAC_PYTHON_VERSION##*$'\n'}" # get rid of conda activation warnings echo Using Python version $ISAAC_PYTHON_VERSION matching your current Isaac Sim version # Create a conda environment with the appropriate python version source $(conda info --base)/etc/profile.d/conda.sh conda create -y -n $conda_name python=${ISAAC_PYTHON_VERSION} # Now activate the omnigibson environment conda activate $conda_name mkdir -p ${CONDA_PREFIX}/etc/conda/activate.d mkdir -p ${CONDA_PREFIX}/etc/conda/deactivate.d touch ${CONDA_PREFIX}/etc/conda/activate.d/env_vars.sh touch ${CONDA_PREFIX}/etc/conda/deactivate.d/env_vars.sh # We add some preprocessing information so that the Isaac Sim paths are linked to this environment upon startup # See https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html#macos-and-linux for reference CONDA_ACT_FILE="${CONDA_PREFIX}/etc/conda/activate.d/env_vars.sh" echo '#!/bin/sh' > ${CONDA_ACT_FILE} echo "export LD_LIBRARY_PATH_OLD=\$LD_LIBRARY_PATH" >> ${CONDA_ACT_FILE} echo "export PYTHONPATH_OLD=\$PYTHONPATH" >> ${CONDA_ACT_FILE} echo "source ${ISAAC_SIM_PATH}/setup_conda_env.sh" >> ${CONDA_ACT_FILE} CONDA_DEACT_FILE="${CONDA_PREFIX}/etc/conda/deactivate.d/env_vars.sh" echo '#!/bin/sh' > ${CONDA_DEACT_FILE} echo "export LD_LIBRARY_PATH=\$LD_LIBRARY_PATH_OLD" >> ${CONDA_DEACT_FILE} echo "export PYTHONPATH=\$PYTHONPATH_OLD" >> ${CONDA_DEACT_FILE} echo "unset ISAAC_PATH" >> ${CONDA_DEACT_FILE} echo "unset CARB_APP_PATH" >> ${CONDA_DEACT_FILE} echo "unset LD_LIBRARY_PATH_OLD" >> ${CONDA_DEACT_FILE} echo "unset PYTHONPATH_OLD" >> ${CONDA_DEACT_FILE} # Install omnigibson! pip install -e . # Cycle conda environment so that all dependencies are propagated conda deactivate echo -e "\nOmniGibson successfully installed! Please run conda activate $conda_name to activate the environment.\n"
StanfordVL/OmniGibson/scripts/profiling.sh
# warm up isaac sim python tests/benchmark/profiling.py -s Rs_int -g rm output.json # 1st batch: baselines python tests/benchmark/profiling.py -g # baseline python tests/benchmark/profiling.py -g -s Rs_int # for vision research python tests/benchmark/profiling.py -g -s Rs_int -r 1 # for robotics research python tests/benchmark/profiling.py -g -s Rs_int -r 3 # for multi-agent research # 2nd batch: compare different scenes python tests/benchmark/profiling.py -r 1 -s Ihlen_0_int python tests/benchmark/profiling.py -r 1 -s Pomaria_0_garden python tests/benchmark/profiling.py -r 1 -s house_single_floor python tests/benchmark/profiling.py -r 1 -s grocery_store_cafe # 3rd batch: OG non-physics features python tests/benchmark/profiling.py -g -r 1 -w # fluids (water) python tests/benchmark/profiling.py -g -r 1 -c # soft body (cloth) python tests/benchmark/profiling.py -g -r 1 -p # macro particle system (diced objects) python tests/benchmark/profiling.py -g -r 1 -w -c -p # everything
StanfordVL/OmniGibson/omnigibson/simulator.py
from collections import defaultdict import itertools import contextlib import logging import os import shutil import socket from pathlib import Path import atexit import signal from contextlib import nullcontext import numpy as np import json import omnigibson as og import omnigibson.lazy as lazy from omnigibson.macros import gm, create_module_macros from omnigibson.utils.constants import LightingMode from omnigibson.utils.config_utils import NumpyEncoder from omnigibson.utils.python_utils import clear as clear_pu, create_object_from_init_info, Serializable from omnigibson.utils.sim_utils import meets_minimum_isaac_version from omnigibson.utils.usd_utils import clear as clear_uu, FlatcacheAPI, RigidContactAPI, PoseAPI from omnigibson.utils.ui_utils import (CameraMover, disclaimer, create_module_logger, suppress_omni_log, print_icon, print_logo, logo_small) from omnigibson.scenes import Scene from omnigibson.objects.object_base import BaseObject from omnigibson.objects.stateful_object import StatefulObject from omnigibson.object_states.contact_subscribed_state_mixin import ContactSubscribedStateMixin from omnigibson.object_states.joint_break_subscribed_state_mixin import JointBreakSubscribedStateMixin from omnigibson.object_states.factory import get_states_by_dependency_order from omnigibson.object_states.update_state_mixin import UpdateStateMixin, GlobalUpdateStateMixin from omnigibson.prims.material_prim import MaterialPrim from omnigibson.sensors.vision_sensor import VisionSensor from omnigibson.systems.macro_particle_system import MacroPhysicalParticleSystem from omnigibson.transition_rules import TransitionRuleAPI # Create module logger log = create_module_logger(module_name=__name__) # Create settings for this module m = create_module_macros(module_path=__file__) m.DEFAULT_VIEWER_CAMERA_POS = (-0.201028, -2.72566 , 1.0654) m.DEFAULT_VIEWER_CAMERA_QUAT = (0.68196617, -0.00155408, -0.00166678, 0.73138017) m.OBJECT_GRAVEYARD_POS = (100.0, 100.0, 100.0) # Helper functions for starting omnigibson def print_save_usd_warning(_): log.warning("Exporting individual USDs has been disabled in OG due to copyrights.") def _launch_app(): log.info(f"{'-' * 5} Starting {logo_small()}. This will take 10-30 seconds... {'-' * 5}") # If multi_gpu is used, og.sim.render() will cause a segfault when called during on_contact callbacks, # e.g. when an attachment joint is being created due to contacts (create_joint calls og.sim.render() internally). gpu_id = None if gm.GPU_ID is None else int(gm.GPU_ID) config_kwargs = {"headless": gm.HEADLESS or bool(gm.REMOTE_STREAMING), "multi_gpu": False} if gpu_id is not None: config_kwargs["active_gpu"] = gpu_id config_kwargs["physics_gpu"] = gpu_id # Omni's logging is super annoying and overly verbose, so suppress it by modifying the logging levels if not gm.DEBUG: import sys from numba.core.errors import NumbaPerformanceWarning import warnings # TODO: Find a more elegant way to prune omni logging # sys.argv.append("--/log/level=warning") # sys.argv.append("--/log/fileLogLevel=warning") # sys.argv.append("--/log/outputStreamLevel=error") warnings.simplefilter("ignore", category=NumbaPerformanceWarning) # Copy the OmniGibson kit file to the Isaac Sim apps directory. This is necessary because the Isaac Sim app # expects the extensions to be reachable in the parent directory of the kit file. We copy on every launch to # ensure that the kit file is always up to date. assert "EXP_PATH" in os.environ, "The EXP_PATH variable is not set. Are you in an Isaac Sim installed environment?" kit_file = Path(__file__).parent / "omnigibson.kit" kit_file_target = Path(os.environ["EXP_PATH"]) / "omnigibson.kit" try: shutil.copy(kit_file, kit_file_target) except Exception as e: raise e from ValueError("Failed to copy omnigibson.kit to Isaac Sim apps directory.") launch_context = nullcontext if gm.DEBUG else suppress_omni_log with launch_context(None): app = lazy.omni.isaac.kit.SimulationApp(config_kwargs, experience=str(kit_file_target.resolve(strict=True))) assert meets_minimum_isaac_version("2023.1.1"), "This version of OmniGibson supports Isaac Sim 2023.1.1 and above. Please update Isaac Sim." # Omni overrides the global logger to be DEBUG, which is very annoying, so we re-override it to the default WARN # TODO: Remove this once omniverse fixes it logging.getLogger().setLevel(logging.WARNING) # Enable additional extensions we need lazy.omni.isaac.core.utils.extensions.enable_extension("omni.flowusd") lazy.omni.isaac.core.utils.extensions.enable_extension("omni.particle.system.bundle") # Additional import for windows if os.name == "nt": lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.window.viewport") # Default Livestream settings if gm.REMOTE_STREAMING: app.set_setting("/app/window/drawMouse", True) app.set_setting("/app/livestream/proto", "ws") app.set_setting("/app/livestream/websocket/framerate_limit", 120) app.set_setting("/ngx/enabled", False) # Find our IP address s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] s.close() # Note: Only one livestream extension can be enabled at a time if gm.REMOTE_STREAMING == "native": # Enable Native Livestream extension # Default App: Streaming Client from the Omniverse Launcher lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.livestream.native") print(f"Now streaming on {ip} via Omniverse Streaming Client") elif gm.REMOTE_STREAMING == "webrtc": # Enable WebRTC Livestream extension app.set_setting("/exts/omni.services.transport.server.http/port", gm.HTTP_PORT) app.set_setting("/app/livestream/port", gm.WEBRTC_PORT) lazy.omni.isaac.core.utils.extensions.enable_extension("omni.services.streamclient.webrtc") print(f"Now streaming on: http://{ip}:{gm.HTTP_PORT}/streaming/webrtc-client?server={ip}") else: raise ValueError(f"Invalid REMOTE_STREAMING option {gm.REMOTE_STREAMING}. Must be one of None, native, webrtc.") # If we're headless, suppress all warnings about GLFW if gm.HEADLESS: og_log = lazy.omni.log.get_log() og_log.set_channel_enabled("carb.windowing-glfw.plugin", False, lazy.omni.log.SettingBehavior.OVERRIDE) # Globally suppress certain logging modules (unless we're in debug mode) since they produce spurious warnings if not gm.DEBUG: og_log = lazy.omni.log.get_log() for channel in ["omni.hydra.scene_delegate.plugin", "omni.kit.manipulator.prim.model"]: og_log.set_channel_enabled(channel, False, lazy.omni.log.SettingBehavior.OVERRIDE) # Possibly hide windows if in debug mode hide_window_names = [] if not gm.RENDER_VIEWER_CAMERA: hide_window_names.append("Viewport") if gm.GUI_VIEWPORT_ONLY: hide_window_names.extend(["Console", "Main ToolBar", "Stage", "Layer", "Property", "Render Settings", "Content", "Flow", "Semantics Schema Editor"]) for name in hide_window_names: window = lazy.omni.ui.Workspace.get_window(name) if window is not None: window.visible = False app.update() lazy.omni.kit.widget.stage.context_menu.ContextMenu.save_prim = print_save_usd_warning # TODO: Automated cleanup in callback doesn't work for some reason. Need to investigate. shutdown_stream = lazy.omni.kit.app.get_app().get_shutdown_event_stream() sub = shutdown_stream.create_subscription_to_pop(og.cleanup, name="og_cleanup", order=0) # Loading Isaac Sim disables Ctrl+C, so we need to re-enable it signal.signal(signal.SIGINT, og.shutdown_handler) return app def launch_simulator(*args, **kwargs): if not og.app: og.app = _launch_app() class Simulator(lazy.omni.isaac.core.simulation_context.SimulationContext, Serializable): """ Simulator class for directly interfacing with the physx physics engine. NOTE: This is a monolithic class. All created Simulator() instances will reference the same underlying Simulator object Args: gravity (float): gravity on z direction. physics_dt (float): dt between physics steps. Defaults to 1.0 / 120.0. rendering_dt (float): dt between rendering steps. Note: rendering means rendering a frame of the current application and not only rendering a frame to the viewports/ cameras. So UI elements of Isaac Sim will be refreshed with this dt as well if running non-headless. Defaults to 1.0 / 30.0. stage_units_in_meters (float): The metric units of assets. This will affect gravity value..etc. Defaults to 0.01. viewer_width (int): width of the camera image, in pixels viewer_height (int): height of the camera image, in pixels device (None or str): specifies the device to be used if running on the gpu with torch backend """ _world_initialized = False def __init__( self, gravity=9.81, physics_dt=1.0 / 120.0, rendering_dt=1.0 / 30.0, stage_units_in_meters=1.0, viewer_width=gm.DEFAULT_VIEWER_WIDTH, viewer_height=gm.DEFAULT_VIEWER_HEIGHT, device=None, ): # Store vars needed for initialization self.gravity = gravity self._viewer_camera = None self._camera_mover = None # Run super init super().__init__( physics_dt=physics_dt, rendering_dt=rendering_dt, stage_units_in_meters=stage_units_in_meters, device=device, ) if self._world_initialized: return Simulator._world_initialized = True # Store other references to variables that will be initialized later self._scene = None self._physx_interface = None self._physx_simulation_interface = None self._physx_scene_query_interface = None self._contact_callback = None self._simulation_event_callback = None # List of objects that need to be initialized during whenever the next sim step occurs self._objects_to_initialize = [] self._objects_require_contact_callback = False self._objects_require_joint_break_callback = False # Maps callback name to callback self._callbacks_on_play = dict() self._callbacks_on_stop = dict() self._callbacks_on_import_obj = dict() self._callbacks_on_remove_obj = dict() # Mapping from link IDs assigned from omni to the object that they reference self._link_id_to_objects = dict() # Set of categories that can be grasped by assisted grasping self.object_state_types = get_states_by_dependency_order() self.object_state_types_requiring_update = \ [state for state in self.object_state_types if (issubclass(state, UpdateStateMixin) or issubclass(state, GlobalUpdateStateMixin))] self.object_state_types_on_contact = \ {state for state in self.object_state_types if issubclass(state, ContactSubscribedStateMixin)} self.object_state_types_on_joint_break = \ {state for state in self.object_state_types if issubclass(state, JointBreakSubscribedStateMixin)} # Auto-load the dummy stage self.clear() # Set the viewer dimensions if gm.RENDER_VIEWER_CAMERA: self.viewer_width = viewer_width self.viewer_height = viewer_height # Toggle simulator state once so that downstream omni features can be used without bugs # e.g.: particle sampling, which for some reason requires sim.play() to be called at least once self.play() self.stop() # Update the physics settings # This needs to be done now, after an initial step + stop for some reason if we want to use GPU # dynamics, otherwise we get very strange behavior, e.g., PhysX complains about invalid transforms # and crashes self._set_physics_engine_settings() def __new__( cls, gravity=9.81, physics_dt=1.0 / 120.0, rendering_dt=1.0 / 30.0, stage_units_in_meters=1.0, viewer_width=gm.DEFAULT_VIEWER_WIDTH, viewer_height=gm.DEFAULT_VIEWER_HEIGHT, device_idx=0, ): # Overwrite since we have different kwargs if Simulator._instance is None: Simulator._instance = object.__new__(cls) else: lazy.carb.log_info("Simulator is defined already, returning the previously defined one") return Simulator._instance def _set_viewer_camera(self, prim_path="/World/viewer_camera", viewport_name="Viewport"): """ Creates a camera prim dedicated for this viewer at @prim_path if it doesn't exist, and sets this camera as the active camera for the viewer Args: prim_path (str): Path to check for / create the viewer camera viewport_name (str): Name of the viewport this camera should attach to. Default is "Viewport", which is the default viewport's name in Isaac Sim """ self._viewer_camera = VisionSensor( prim_path=prim_path, name=prim_path.split("/")[-1], # Assume name is the lowest-level name in the prim_path modalities="rgb", image_height=self.viewer_height, image_width=self.viewer_width, viewport_name=viewport_name, ) if not self._viewer_camera.loaded: self._viewer_camera.load() # We update its clipping range and focal length so we get a good FOV and so that it doesn't clip # nearby objects (default min is 1 m) self._viewer_camera.clipping_range = [0.001, 10000000.0] self._viewer_camera.focal_length = 17.0 # Initialize the sensor self._viewer_camera.initialize() # Also need to potentially update our camera mover if it already exists if self._camera_mover is not None: self._camera_mover.set_cam(cam=self._viewer_camera) def _set_physics_engine_settings(self): """ Set the physics engine with specified settings """ assert self.is_stopped(), f"Cannot set simulator physics settings while simulation is playing!" self._physics_context.set_gravity(value=-self.gravity) # Also make sure we don't invert the collision group filter settings so that different collision groups by # default collide with each other, and modify settings for speed optimization self._physics_context.set_invert_collision_group_filter(False) self._physics_context.enable_ccd(gm.ENABLE_CCD) self._physics_context.enable_fabric(gm.ENABLE_FLATCACHE) # Enable GPU dynamics based on whether we need omni particles feature if gm.USE_GPU_DYNAMICS: self._physics_context.enable_gpu_dynamics(True) self._physics_context.set_broadphase_type("GPU") else: self._physics_context.enable_gpu_dynamics(False) self._physics_context.set_broadphase_type("MBP") # Set GPU Pairs capacity and other GPU settings self._physics_context.set_gpu_found_lost_pairs_capacity(gm.GPU_PAIRS_CAPACITY) self._physics_context.set_gpu_found_lost_aggregate_pairs_capacity(gm.GPU_AGGR_PAIRS_CAPACITY) self._physics_context.set_gpu_total_aggregate_pairs_capacity(gm.GPU_AGGR_PAIRS_CAPACITY) self._physics_context.set_gpu_max_particle_contacts(gm.GPU_MAX_PARTICLE_CONTACTS) self._physics_context.set_gpu_max_rigid_contact_count(gm.GPU_MAX_RIGID_CONTACT_COUNT) self._physics_context.set_gpu_max_rigid_patch_count(gm.GPU_MAX_RIGID_PATCH_COUNT) def _set_renderer_settings(self): if gm.ENABLE_HQ_RENDERING: lazy.carb.settings.get_settings().set_bool("/rtx/reflections/enabled", True) lazy.carb.settings.get_settings().set_bool("/rtx/indirectDiffuse/enabled", True) lazy.carb.settings.get_settings().set_int("/rtx/post/dlss/execMode", 3) # "Auto" lazy.carb.settings.get_settings().set_bool("/rtx/ambientOcclusion/enabled", True) lazy.carb.settings.get_settings().set_bool("/rtx/directLighting/sampledLighting/enabled", False) else: lazy.carb.settings.get_settings().set_bool("/rtx/reflections/enabled", False) lazy.carb.settings.get_settings().set_bool("/rtx/indirectDiffuse/enabled", False) lazy.carb.settings.get_settings().set_int("/rtx/post/dlss/execMode", 0) # "Performance" lazy.carb.settings.get_settings().set_bool("/rtx/ambientOcclusion/enabled", False) lazy.carb.settings.get_settings().set_bool("/rtx/directLighting/sampledLighting/enabled", True) lazy.carb.settings.get_settings().set_int("/rtx/raytracing/showLights", 1) lazy.carb.settings.get_settings().set_float("/rtx/sceneDb/ambientLightIntensity", 0.1) @property def viewer_visibility(self): """ Returns: bool: Whether the viewer is visible or not """ return self._viewer_camera.viewer_visibility @viewer_visibility.setter def viewer_visibility(self, visible): """ Sets whether the viewer should be visible or not in the Omni UI Args: visible (bool): Whether the viewer should be visible or not """ self._viewer_camera.viewer_visibility = visible @property def viewer_height(self): """ Returns: int: viewer height of this sensor, in pixels """ # If the viewer camera hasn't been created yet, utilize the default width return gm.DEFAULT_VIEWER_HEIGHT if self._viewer_camera is None else self._viewer_camera.image_height @viewer_height.setter def viewer_height(self, height): """ Sets the viewer height @height for this sensor Args: height (int): viewer height, in pixels """ self._viewer_camera.image_height = height @property def viewer_width(self): """ Returns: int: viewer width of this sensor, in pixels """ # If the viewer camera hasn't been created yet, utilize the default height return gm.DEFAULT_VIEWER_WIDTH if self._viewer_camera is None else self._viewer_camera.image_width @viewer_width.setter def viewer_width(self, width): """ Sets the viewer width @width for this sensor Args: width (int): viewer width, in pixels """ self._viewer_camera.image_width = width def set_lighting_mode(self, mode): """ Sets the active lighting mode in the current simulator. Valid options are one of LightingMode Args: mode (LightingMode): Lighting mode to set """ lazy.omni.kit.commands.execute("SetLightingMenuModeCommand", lighting_mode=mode) def enable_viewer_camera_teleoperation(self): """ Enables keyboard control of the active viewer camera for this simulation """ assert gm.RENDER_VIEWER_CAMERA, "Viewer camera must be enabled to enable teleoperation!" self._camera_mover = CameraMover(cam=self._viewer_camera) self._camera_mover.print_info() return self._camera_mover def import_scene(self, scene): """ Import a scene into the simulator. A scene could be a synthetic one or a realistic Gibson Environment. Args: scene (Scene): a scene object to load """ assert self.is_stopped(), "Simulator must be stopped while importing a scene!" assert isinstance(scene, Scene), "import_scene can only be called with Scene" # Clear the existing scene if any self.clear() # Initialize all global updatable object states for state in self.object_state_types_requiring_update: if issubclass(state, GlobalUpdateStateMixin): state.global_initialize() self._scene = scene self._scene.load() # Make sure simulator is not running, then start it so that we can initialize the scene assert self.is_stopped(), "Simulator must be stopped after importing a scene!" self.play() # Initialize the scene self._scene.initialize() # Need to one more step for particle systems to work self.step() self.stop() log.info("Imported scene.") def initialize_object_on_next_sim_step(self, obj): """ Initializes the object upon the next simulation step Args: obj (BasePrim): Object to initialize as soon as a new sim step is called """ self._objects_to_initialize.append(obj) def import_object(self, obj, register=True): """ Import an object into the simulator. Args: obj (BaseObject): an object to load register (bool): whether to register this object internally in the scene registry """ assert isinstance(obj, BaseObject), "import_object can only be called with BaseObject" # Make sure scene is loaded -- objects should not be loaded unless we have a reference to a scene assert self.scene is not None, "import_object needs to be called after import_scene" # Load the object in omniverse by adding it to the scene self.scene.add_object(obj, register=register, _is_call_from_simulator=True) # Run any callbacks for callback in self._callbacks_on_import_obj.values(): callback(obj) # Cache the mapping from link IDs to object for link in obj.links.values(): self._link_id_to_objects[lazy.pxr.PhysicsSchemaTools.sdfPathToInt(link.prim_path)] = obj # Lastly, additionally add this object automatically to be initialized as soon as another simulator step occurs self.initialize_object_on_next_sim_step(obj=obj) def remove_object(self, obj): """ Remove one or a list of non-robot object from the simulator. Args: obj (BaseObject or Iterable[BaseObject]): one or a list of non-robot objects to remove """ objs = [obj] if isinstance(obj, BaseObject) else obj if self.is_playing(): state = self.dump_state() # Omniverse has a strange bug where if GPU dynamics is on and the object to remove is in contact with # with another object (in some specific configuration only, not always), the simulator crashes. Therefore, # we first move the object to a safe location, then remove it. pos = list(m.OBJECT_GRAVEYARD_POS) for ob in objs: ob.set_position_orientation(pos, [0, 0, 0, 1]) pos[0] += max(ob.aabb_extent) # One physics timestep will elapse self.step_physics() for ob in objs: self._remove_object(ob) if self.is_playing(): # Update all handles that are now broken because objects have changed self.update_handles() # Load the state back self.load_state(state) # Refresh all current rules TransitionRuleAPI.prune_active_rules() def _remove_object(self, obj): """ Remove a non-robot object from the simulator. Should not be called directly by the user. Args: obj (BaseObject): a non-robot object to remove """ # Run any callbacks for callback in self._callbacks_on_remove_obj.values(): callback(obj) # pop all link ids for link in obj.links.values(): self._link_id_to_objects.pop(lazy.pxr.PhysicsSchemaTools.sdfPathToInt(link.prim_path)) # If it was queued up to be initialized, remove it from the queue as well for i, initialize_obj in enumerate(self._objects_to_initialize): if obj.name == initialize_obj.name: self._objects_to_initialize.pop(i) break self._scene.remove_object(obj) def remove_prim(self, prim): """ Remove a prim from the simulator. Args: prim (BasePrim): a prim to remove """ # [omni.physx.tensors.plugin] prim '[prim_path]' was deleted while being used by a shape in a tensor view # class. The physics.tensors simulationView was invalidated. with suppress_omni_log(channels=["omni.physx.tensors.plugin"]): # Remove prim prim.remove() # Update all handles that are now broken because prims have changed self.update_handles() def _reset_variables(self): """ Reset internal variables when a new stage is loaded """ def render(self): super().render() # During rendering, the Fabric API is updated, so we can mark it as clean PoseAPI.mark_valid() def update_handles(self): # Handles are only relevant when physx is running if not self.is_playing(): return # First, refresh the physics sim view self._physics_sim_view = lazy.omni.physics.tensors.create_simulation_view(self.backend) self._physics_sim_view.set_subspace_roots("/") # Then update the handles for all objects if self.scene is not None and self.scene.initialized: for obj in self.scene.objects: # Only need to update if object is already initialized as well if obj.initialized: obj.update_handles() for system in self.scene.systems: if issubclass(system, MacroPhysicalParticleSystem): system.refresh_particles_view() # Finally update any unified views RigidContactAPI.initialize_view() def _non_physics_step(self): """ Complete any non-physics steps such as state updates. """ # If we don't have a valid scene, immediately return if self._scene is None: return # Update omni self._omni_update_step() # If we're playing we, also run additional logic if self.is_playing(): # Check to see if any objects should be initialized (only done IF we're playing) n_objects_to_initialize = len(self._objects_to_initialize) if n_objects_to_initialize > 0 and self.is_playing(): # We iterate through the objects to initialize # Note that we don't explicitly do for obj in self._objects_to_initialize because additional objects # may be added mid-iteration!! # For this same reason, after we finish the loop, we keep any objects that are yet to be initialized # First call zero-physics step update, so that handles are properly propagated og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time) for i in range(n_objects_to_initialize): obj = self._objects_to_initialize[i] obj.initialize() if len(obj.states.keys() & self.object_state_types_on_contact) > 0: self._objects_require_contact_callback = True if len(obj.states.keys() & self.object_state_types_on_joint_break) > 0: self._objects_require_joint_break_callback = True self._objects_to_initialize = self._objects_to_initialize[n_objects_to_initialize:] # Re-initialize the physics view because the number of objects has changed self.update_handles() # Also refresh the transition rules that are currently active TransitionRuleAPI.refresh_all_rules() # Update any system-related state for system in self.scene.systems: system.update() # Propagate states if the feature is enabled if gm.ENABLE_OBJECT_STATES: # Step the object states in global topological order (if the scene exists) for state_type in self.object_state_types_requiring_update: if issubclass(state_type, GlobalUpdateStateMixin): state_type.global_update() if issubclass(state_type, UpdateStateMixin): for obj in self.scene.get_objects_with_state(state_type): # Update the state (object should already be initialized since # this step will only occur after objects are initialized and sim # is playing obj.states[state_type].update() for obj in self.scene.objects: # Only update visuals for objects that have been initialized so far if isinstance(obj, StatefulObject) and obj.initialized: obj.update_visuals() # Possibly run transition rule step if gm.ENABLE_TRANSITION_RULES: TransitionRuleAPI.step() def _omni_update_step(self): """ Step any omni-related things """ # Clear the bounding box and contact caches so that they get updated during the next time they're called RigidContactAPI.clear() def play(self): if not self.is_playing(): # Track whether we're starting the simulator fresh -- i.e.: whether we were stopped previously was_stopped = self.is_stopped() # Run super first # We suppress warnings from omni.usd because it complains about values set in the native USD # These warnings occur because the native USD file has some type mismatch in the `scale` property, # where the property expects a double but for whatever reason the USD interprets its values as floats # We suppress omni.physicsschema.plugin when kinematic_only objects are placed with scale ~1.0, to suppress # the following error: # [omni.physicsschema.plugin] ScaleOrientation is not supported for rigid bodies, prim path: [...] You may # ignore this if the scale is close to uniform. # We also need to suppress the following error when flat cache is used: # [omni.physx.plugin] Transformation change on non-root links is not supported. channels = ["omni.usd", "omni.physicsschema.plugin"] if gm.ENABLE_FLATCACHE: channels.append("omni.physx.plugin") with suppress_omni_log(channels=channels): super().play() # Take a render step -- this is needed so that certain (unknown, maybe omni internal state?) is populated # correctly. self.render() # Update all object handles, unless this is a play during initialization if og.sim is not None: self.update_handles() if was_stopped: # We need to update controller mode because kp and kd were set to the original (incorrect) values when # sim was stopped. We need to reset them to default_kp and default_kd defined in ControllableObject. # We also need to take an additional sim step to make sure simulator is functioning properly. # We need to do this because for some reason omniverse exhibits strange behavior if we do certain # operations immediately after playing; e.g.: syncing USD poses when flatcache is enabled if self.scene is not None and self.scene.initialized: for robot in self.scene.robots: if robot.initialized: robot.update_controller_mode() # Also refresh any transition rules that became stale while sim was stopped TransitionRuleAPI.refresh_all_rules() # Additionally run non physics things self._non_physics_step() # Run all callbacks for callback in self._callbacks_on_play.values(): callback() def pause(self): if not self.is_paused(): super().pause() def stop(self): if not self.is_stopped(): super().stop() # If we're using flatcache, we also need to reset its API if gm.ENABLE_FLATCACHE: FlatcacheAPI.reset() # Run all callbacks for callback in self._callbacks_on_stop.values(): callback() @property def n_physics_timesteps_per_render(self): """ Number of physics timesteps per rendering timestep. rendering_dt has to be a multiple of physics_dt. Returns: int: Discrete number of physics timesteps to take per step """ n_physics_timesteps_per_render = self.get_rendering_dt() / self.get_physics_dt() assert n_physics_timesteps_per_render.is_integer(), "render_timestep must be a multiple of physics_timestep" return int(n_physics_timesteps_per_render) def step(self, render=True): """ Step the simulation at self.render_timestep Args: render (bool): Whether rendering should occur or not """ # If we have imported any objects within the last timestep, we render the app once, since otherwise calling # step() may not step physics if len(self._objects_to_initialize) > 0: self.render() if render: super().step(render=True) else: for i in range(self.n_physics_timesteps_per_render): super().step(render=False) # Additionally run non physics things self._non_physics_step() # TODO (eric): After stage changes (e.g. pose, texture change), it will take two super().step(render=True) for # the result to propagate to the rendering. We could have called super().render() here but it will introduce # a big performance regression. def step_physics(self): """ Step the physics a single step. """ self._physics_context._step(current_time=self.current_time) self._omni_update_step() PoseAPI.invalidate() def _on_contact(self, contact_headers, contact_data): """ This callback will be invoked after every PHYSICS step if there is any contact. For each of the pair of objects in each contact, we invoke the on_contact function for each of its states that subclass ContactSubscribedStateMixin. These states update based on contact events. """ if gm.ENABLE_OBJECT_STATES and self._objects_require_contact_callback: headers = defaultdict(list) for contact_header in contact_headers: actor0_obj = self._link_id_to_objects.get(contact_header.actor0, None) actor1_obj = self._link_id_to_objects.get(contact_header.actor1, None) # If any of the objects cannot be found, skip if actor0_obj is None or actor1_obj is None: continue # If any of the objects is not initialized, skip if not actor0_obj.initialized or not actor1_obj.initialized: continue # If any of the objects is not stateful, skip if not isinstance(actor0_obj, StatefulObject) or not isinstance(actor1_obj, StatefulObject): continue # If any of the objects doesn't have states that require on_contact callbacks, skip if len(actor0_obj.states.keys() & self.object_state_types_on_contact) == 0 or len(actor1_obj.states.keys() & self.object_state_types_on_contact) == 0: continue headers[tuple(sorted((actor0_obj, actor1_obj), key=lambda x: x.uuid))].append(contact_header) for (actor0_obj, actor1_obj) in headers: for obj0, obj1 in [(actor0_obj, actor1_obj), (actor1_obj, actor0_obj)]: for state_type in self.object_state_types_on_contact: if state_type in obj0.states: obj0.states[state_type].on_contact(obj1, headers[(actor0_obj, actor1_obj)], contact_data) def _on_simulation_event(self, event): """ This callback will be invoked if there is any simulation event. Currently it only processes JOINT_BREAK event. """ if gm.ENABLE_OBJECT_STATES: if event.type == int(lazy.omni.physx.bindings._physx.SimulationEvent.JOINT_BREAK) and self._objects_require_joint_break_callback: joint_path = str(lazy.pxr.PhysicsSchemaTools.decodeSdfPath(event.payload["jointPath"][0], event.payload["jointPath"][1])) obj = None # TODO: recursively try to find the parent object of this joint tokens = joint_path.split("/") for i in range(2, len(tokens) + 1): obj = self._scene.object_registry("prim_path", "/".join(tokens[:i])) if obj is not None: break if obj is None or not obj.initialized or not isinstance(obj, StatefulObject): return if len(obj.states.keys() & self.object_state_types_on_joint_break) == 0: return for state_type in self.object_state_types_on_joint_break: if state_type in obj.states: obj.states[state_type].on_joint_break(joint_path) def is_paused(self): """ Returns: bool: True if the simulator is paused, otherwise False """ return not (self.is_stopped() or self.is_playing()) @contextlib.contextmanager def stopped(self): """ A context scope for making sure the simulator is stopped during execution within this scope. Upon leaving the scope, the prior simulator state is restored. """ # Infer what state we're currently in, then stop, yield, and then restore the original state sim_is_playing, sim_is_paused = self.is_playing(), self.is_paused() if sim_is_playing or sim_is_paused: self.stop() yield if sim_is_playing: self.play() elif sim_is_paused: self.pause() @contextlib.contextmanager def playing(self): """ A context scope for making sure the simulator is playing during execution within this scope. Upon leaving the scope, the prior simulator state is restored. """ # Infer what state we're currently in, then stop, yield, and then restore the original state sim_is_stopped, sim_is_paused = self.is_stopped(), self.is_paused() if sim_is_stopped or sim_is_paused: self.play() yield if sim_is_stopped: self.stop() elif sim_is_paused: self.pause() @contextlib.contextmanager def paused(self): """ A context scope for making sure the simulator is paused during execution within this scope. Upon leaving the scope, the prior simulator state is restored. """ # Infer what state we're currently in, then stop, yield, and then restore the original state sim_is_stopped, sim_is_playing = self.is_stopped(), self.is_playing() if sim_is_stopped or sim_is_playing: self.pause() yield if sim_is_stopped: self.stop() elif sim_is_playing: self.play() @contextlib.contextmanager def slowed(self, dt): """ A context scope for making the simulator simulation dt slowed, e.g.: for taking micro-steps for propagating instantaneous kinematics with minimal impact on physics propagation. NOTE: This will set both the physics dt and rendering dt to the same value during this scope. Upon leaving the scope, the prior simulator state is restored. """ # Set dt, yield, then restore the original dt physics_dt, rendering_dt = self.get_physics_dt(), self.get_rendering_dt() self.set_simulation_dt(physics_dt=dt, rendering_dt=dt) yield self.set_simulation_dt(physics_dt=physics_dt, rendering_dt=rendering_dt) def add_callback_on_play(self, name, callback): """ Adds a function @callback, referenced by @name, to be executed every time sim.play() is called Args: name (str): Name of the callback callback (function): Callback function. Function signature is expected to be: def callback() --> None """ self._callbacks_on_play[name] = callback def add_callback_on_stop(self, name, callback): """ Adds a function @callback, referenced by @name, to be executed every time sim.stop() is called Args: name (str): Name of the callback callback (function): Callback function. Function signature is expected to be: def callback() --> None """ self._callbacks_on_stop[name] = callback def add_callback_on_import_obj(self, name, callback): """ Adds a function @callback, referenced by @name, to be executed every time sim.import_object() is called Args: name (str): Name of the callback callback (function): Callback function. Function signature is expected to be: def callback(obj: BaseObject) --> None """ self._callbacks_on_import_obj[name] = callback def add_callback_on_remove_obj(self, name, callback): """ Adds a function @callback, referenced by @name, to be executed every time sim.remove_object() is called Args: name (str): Name of the callback callback (function): Callback function. Function signature is expected to be: def callback(obj: BaseObject) --> None """ self._callbacks_on_remove_obj[name] = callback def remove_callback_on_play(self, name): """ Remove play callback whose reference is @name Args: name (str): Name of the callback """ self._callbacks_on_play.pop(name, None) def remove_callback_on_stop(self, name): """ Remove stop callback whose reference is @name Args: name (str): Name of the callback """ self._callbacks_on_stop.pop(name, None) def remove_callback_on_import_obj(self, name): """ Remove stop callback whose reference is @name Args: name (str): Name of the callback """ self._callbacks_on_import_obj.pop(name, None) def remove_callback_on_remove_obj(self, name): """ Remove stop callback whose reference is @name Args: name (str): Name of the callback """ self._callbacks_on_remove_obj.pop(name, None) @classmethod def clear_instance(cls): lazy.omni.isaac.core.simulation_context.SimulationContext.clear_instance() Simulator._world_initialized = None return def __del__(self): lazy.omni.isaac.core.simulation_context.SimulationContext.__del__(self) Simulator._world_initialized = None return @property def pi(self): """ Returns: PhysX: Physx Interface (pi) for controlling low-level physx engine """ return self._physx_interface @property def psi(self): """ Returns: IPhysxSimulation: Physx Simulation Interface (psi) for controlling low-level physx simulation """ return self._physx_simulation_interface @property def psqi(self): """ Returns: PhysXSceneQuery: Physx Scene Query Interface (psqi) for running low-level scene queries """ return self._physx_scene_query_interface @property def scene(self): """ Returns: None or Scene: Scene currently loaded in this simulator. If no scene is loaded, returns None """ return self._scene @property def viewer_camera(self): """ Returns: VisionSensor: Active camera sensor corresponding to the active viewport window instance shown in the omni UI """ return self._viewer_camera @property def camera_mover(self): """ Returns: None or CameraMover: If enabled, the teleoperation interface for controlling the active viewer camera """ return self._camera_mover @property def world_prim(self): """ Returns: Usd.Prim: Prim at /World """ return lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path="/World") def clear(self) -> None: """ Clears the stage leaving the PhysicsScene only if under /World. """ # Stop the physics self.stop() # Clear any pre-existing scene if it exists if self._scene is not None: self.scene.clear() self._scene = None # Clear all vision sensors and remove viewer camera reference and camera mover reference VisionSensor.clear() self._viewer_camera = None if self._camera_mover is not None: self._camera_mover.clear() self._camera_mover = None # Clear all global update states for state in self.object_state_types_requiring_update: if issubclass(state, GlobalUpdateStateMixin): state.global_clear() # Clear all materials MaterialPrim.clear() # Clear all transition rules TransitionRuleAPI.clear() # Clear uniquely named items and other internal states clear_pu() clear_uu() self._objects_to_initialize = [] self._objects_require_contact_callback = False self._objects_require_joint_break_callback = False self._link_id_to_objects = dict() self._callbacks_on_play = dict() self._callbacks_on_stop = dict() self._callbacks_on_import_obj = dict() self._callbacks_on_remove_obj = dict() # Load dummy stage, but don't clear sim to prevent circular loops self._open_new_stage() def write_metadata(self, key, data): """ Writes metadata @data to the current global metadata dict using key @key Args: key (str): Keyword entry in the global metadata dictionary to use data (dict): Data to write to @key in the global metadata dictionary """ self.world_prim.SetCustomDataByKey(key, data) def get_metadata(self, key): """ Grabs metadata from the current global metadata dict using key @key Args: key (str): Keyword entry in the global metadata dictionary to use """ return self.world_prim.GetCustomDataByKey(key) def restore(self, json_path): """ Restore a simulation environment from @json_path. Args: json_path (str): Full path of JSON file to load, which contains information to recreate a scene. """ if not json_path.endswith(".json"): log.error(f"You have to define the full json_path to load from. Got: {json_path}") return # Load the info from the json with open(json_path, "r") as f: scene_info = json.load(f) init_info = scene_info["init_info"] state = scene_info["state"] # Override the init info with our json path init_info["args"]["scene_file"] = json_path # Also make sure we have any additional modifications necessary from the specific scene og.REGISTERED_SCENES[init_info["class_name"]].modify_init_info_for_restoring(init_info=init_info) # Recreate and import the saved scene og.sim.stop() recreated_scene = create_object_from_init_info(init_info) self.import_scene(scene=recreated_scene) # Start the simulation and restore the dynamic state of the scene and then pause again self.play() self.load_state(state, serialized=False) log.info("The saved simulation environment loaded.") return def save(self, json_path): """ Saves the current simulation environment to @json_path. Args: json_path (str): Full path of JSON file to save (should end with .json), which contains information to recreate the current scene. """ # Make sure the sim is not stopped, since we need to grab joint states assert not self.is_stopped(), "Simulator cannot be stopped when saving to USD!" # Make sure there are no objects in the initialization queue, if not, terminate early and notify user # Also run other sanity checks before saving if len(self._objects_to_initialize) > 0: log.error("There are still objects to initialize! Please take one additional sim step and then save.") return if not self.scene: log.warning("Scene has not been loaded. Nothing to save.") return if not json_path.endswith(".json"): log.error(f"You have to define the full json_path to save the scene to. Got: {json_path}") return # Update scene info self.scene.update_objects_info() # Dump saved current state and also scene init info scene_info = { "metadata": self.world_prim.GetCustomData(), "state": self.scene.dump_state(serialized=False), "init_info": self.scene.get_init_info(), "objects_info": self.scene.get_objects_info(), } # Write this to the json file Path(os.path.dirname(json_path)).mkdir(parents=True, exist_ok=True) with open(json_path, "w+") as f: json.dump(scene_info, f, cls=NumpyEncoder, indent=4) log.info("The current simulation environment saved.") def _open_new_stage(self): """ Opens a new stage """ # Stop the physics if we're playing if not self.is_stopped(): log.warning("Stopping simulation in order to open new stage.") self.stop() # Store physics dt and rendering dt to reuse later # Note that the stage may have been deleted previously; if so, we use the default values # of 1/120, 1/30 try: physics_dt = self.get_physics_dt() except: print("WARNING: Invalid or non-existent physics scene found. Setting physics dt to 1/120.") physics_dt = 1 / 120. rendering_dt = self.get_rendering_dt() # Open new stage -- suppressing warning that we're opening a new stage with suppress_omni_log(None): lazy.omni.isaac.core.utils.stage.create_new_stage() # Clear physics context self._physics_context = None self._physx_fabric_interface = None # Create world prim self.stage.DefinePrim("/World", "Xform") self._init_stage(physics_dt=physics_dt, rendering_dt=rendering_dt) def _load_stage(self, usd_path): """ Open the stage specified by USD file at @usd_path Args: usd_path (str): Absolute filepath to USD stage that should be loaded """ # Stop the physics if we're playing if not self.is_stopped(): log.warning("Stopping simulation in order to load stage.") self.stop() # Store physics dt and rendering dt to reuse later # Note that the stage may have been deleted previously; if so, we use the default values # of 1/120, 1/30 try: physics_dt = self.get_physics_dt() except: print("WARNING: Invalid or non-existent physics scene found. Setting physics dt to 1/120.") physics_dt = 1/120. rendering_dt = self.get_rendering_dt() # Open new stage -- suppressing warning that we're opening a new stage with suppress_omni_log(None): lazy.omni.isaac.core.utils.stage.open_stage(usd_path=usd_path) self._init_stage(physics_dt=physics_dt, rendering_dt=rendering_dt) def _init_stage( self, physics_dt=None, rendering_dt=None, stage_units_in_meters=None, physics_prim_path="/physicsScene", sim_params=None, set_defaults=True, backend="numpy", device=None, ): # Run super first super()._init_stage( physics_dt=physics_dt, rendering_dt=rendering_dt, stage_units_in_meters=stage_units_in_meters, physics_prim_path=physics_prim_path, sim_params=sim_params, set_defaults=set_defaults, backend=backend, device=device, ) # Update internal vars self._physx_interface = lazy.omni.physx.get_physx_interface() self._physx_simulation_interface = lazy.omni.physx.get_physx_simulation_interface() self._physx_scene_query_interface = lazy.omni.physx.get_physx_scene_query_interface() # Update internal settings self._set_physics_engine_settings() self._set_renderer_settings() # Update internal callbacks self._setup_default_callback_fns() self._stage_open_callback = ( lazy.omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self._stage_open_callback_fn) ) self._contact_callback = self._physics_context._physx_sim_interface.subscribe_contact_report_events(self._on_contact) self._simulation_event_callback = self._physx_interface.get_simulation_event_stream_v2().create_subscription_to_pop(self._on_simulation_event) # Set the lighting mode to be stage by default self.set_lighting_mode(mode=LightingMode.STAGE) # Set the viewer camera, and then set its default pose if gm.RENDER_VIEWER_CAMERA: self._set_viewer_camera() self.viewer_camera.set_position_orientation( position=np.array(m.DEFAULT_VIEWER_CAMERA_POS), orientation=np.array(m.DEFAULT_VIEWER_CAMERA_QUAT), ) def close(self): """ Shuts down the OmniGibson application """ self._app.shutdown() @property def stage_id(self): """ Returns: int: ID of the current active stage """ return lazy.pxr.UsdUtils.StageCache.Get().GetId(self.stage).ToLongInt() @property def device(self): """ Returns: device (None or str): Device used in simulation backend """ return self._device @device.setter def device(self, device): """ Sets the device used for sim backend Args: device (None or str): Device to set for the simulation backend """ self._device = device if self._device is not None and "cuda" in self._device: device_id = self._settings.get_as_int("/physics/cudaDevice") self._device = f"cuda:{device_id}" @property def state_size(self): # Total state size is the state size of our scene return self._scene.state_size def _dump_state(self): # Default state is from the scene return self._scene.dump_state(serialized=False) def _load_state(self, state): # Default state is from the scene self._scene.load_state(state=state, serialized=False) def load_state(self, state, serialized=False): # We need to make sure the simulator is playing since joint states only get updated when playing assert self.is_playing() # Run super super().load_state(state=state, serialized=serialized) # Highlight that at the current step, the non-kinematic states are potentially inaccurate because a sim # step is needed to propagate specific states in physics backend # TODO: This should be resolved in a future omniverse release! disclaimer("Attempting to load simulator state.\n" "Currently, omniverse does not support exclusively stepping kinematics, so we cannot update some " "of our object states relying on updated kinematics until a simulator step is taken!\n" "Object states such as OnTop, Inside, etc. relying on relative spatial information will inaccurate" "until a single sim step is taken.\n" "This should be resolved by the next NVIDIA Isaac Sim release.") def _serialize(self, state): # Default state is from the scene return self._scene.serialize(state=state) def _deserialize(self, state): # Default state is from the scene return self._scene.deserialize(state=state), self._scene.state_size if not og.sim: og.sim = Simulator(*args, **kwargs) print() print_icon() print_logo() print() log.info(f"{'-' * 10} Welcome to {logo_small()}! {'-' * 10}") return og.sim
StanfordVL/OmniGibson/omnigibson/transition_rules.py
import operator from abc import ABCMeta, abstractmethod from collections import namedtuple, defaultdict import numpy as np import json from copy import copy import itertools import os from collections import defaultdict import networkx as nx import omnigibson as og from omnigibson.macros import gm, create_module_macros from omnigibson.systems import get_system, is_system_active, PhysicalParticleSystem, VisualParticleSystem, REGISTERED_SYSTEMS from omnigibson.objects.dataset_object import DatasetObject from omnigibson.object_states import * from omnigibson.object_states.factory import get_system_states from omnigibson.object_states.object_state_base import AbsoluteObjectState, RelativeObjectState from omnigibson.utils.asset_utils import get_all_object_category_models from omnigibson.utils.constants import PrimType from omnigibson.utils.python_utils import Registerable, classproperty, subclass_factory from omnigibson.utils.registry_utils import Registry import omnigibson.utils.transform_utils as T from omnigibson.utils.ui_utils import disclaimer, create_module_logger from omnigibson.utils.usd_utils import RigidContactAPI from omnigibson.utils.bddl_utils import translate_bddl_recipe_to_og_recipe, translate_bddl_washer_rule_to_og_washer_rule import bddl # Create module logger log = create_module_logger(module_name=__name__) # Create settings for this module m = create_module_macros(module_path=__file__) # Default melting temperature m.MELTING_TEMPERATURE = 100.0 # Default "trash" system if an invalid mixing rule transition occurs m.DEFAULT_GARBAGE_SYSTEM = "sludge" # Tuple of attributes of objects created in transitions. # `states` field is dict mapping object state class to arguments to pass to setter for that class _attrs_fields = ["category", "model", "name", "scale", "obj", "pos", "orn", "bb_pos", "bb_orn", "states", "callback"] # States: dict: mapping state nameargs to pass to the state setter for @obj in order to set the object state # callback: function: signature callback(obj) -> None to execute after states are set, if any ObjectAttrs = namedtuple( "ObjectAttrs", _attrs_fields, defaults=(None,) * len(_attrs_fields)) # Tuple of lists of objects to be added or removed returned from transitions, if not None TransitionResults = namedtuple( "TransitionResults", ["add", "remove"], defaults=(None, None)) # Mapping from transition rule json files to rule classe names _JSON_FILES_TO_RULES = { "heat_cook.json": ["CookingObjectRule", "CookingSystemRule"], "mixing_stick.json": ["MixingToolRule"], "single_toggleable_machine.json": ["ToggleableMachineRule"], "substance_cooking.json": ["CookingPhysicalParticleRule"], "substance_watercooking.json": ["CookingPhysicalParticleRule"], "washer.json": ["WasherRule"], } # Global dicts that will contain mappings REGISTERED_RULES = dict() class TransitionRuleAPI: """ Monolithic class containing methods to check and execute arbitrary discrete state transitions within the simulator """ # Set of active rules ACTIVE_RULES = set() # Maps BaseObject instances to dictionary with the following keys: # "states": None or dict mapping object states to arguments to set for that state when the object is initialized # "callback": None or function to execute when the object is initialized _INIT_INFO = dict() @classmethod def get_rule_candidates(cls, rule, objects): """ Computes valid input object candidates for transition rule @rule, if any exist Args: rule (BaseTransitionRule): Transition rule whose candidates should be computed objects (list of BaseObject): List of objects that will be used to compute object candidates Returns: None or dict: None if no valid candidates are found, otherwise mapping from filter key to list of object instances that satisfy that filter """ obj_candidates = rule.get_object_candidates(objects=objects) n_filters_satisfied = sum(len(candidates) > 0 for candidates in obj_candidates.values()) # Return object candidates if all filters are met, otherwise return None return obj_candidates if n_filters_satisfied == len(rule.candidate_filters) else None @classmethod def prune_active_rules(cls): """ Prunes the active transition rules, removing any whose filter requirements are not satisfied by all current objects on the scene. Useful when the current object set changes, e.g.: an object is removed from the simulator """ # Need explicit tuple to iterate over because refresh_rules mutates the ACTIVE_RULES set in place cls.refresh_rules(rules=tuple(cls.ACTIVE_RULES)) @classmethod def refresh_all_rules(cls): """ Refreshes all registered rules given the current set of objects in the scene """ global RULES_REGISTRY # Clear all active rules cls.ACTIVE_RULES = set() # Refresh all registered rules cls.refresh_rules(rules=RULES_REGISTRY.objects) @classmethod def refresh_rules(cls, rules): """ Refreshes the specified transition rules @rules based on current set of objects in the simulator. This will prune any pre-existing rules in cls.ACTIVE_RULES if no valid candidates are found, or add / update the entry if valid candidates are found Args: rules (list of BaseTransitionRule): List of transition rules whose candidate lists should be refreshed """ objects = og.sim.scene.objects for rule in rules: # Check if rule is still valid, if so, update its entry object_candidates = cls.get_rule_candidates(rule=rule, objects=objects) # Update candidates if valid, otherwise pop the entry if it exists in cls.ACTIVE_RULES if object_candidates is not None: # We have a valid rule which should be active, so grab and initialize all of its conditions # NOTE: The rule may ALREADY exist in ACTIVE_RULES, but we still need to refresh its candidates because # the relevant candidate set / information for the rule + its conditions may have changed given the # new set of objects rule.refresh(object_candidates=object_candidates) cls.ACTIVE_RULES.add(rule) elif rule in cls.ACTIVE_RULES: cls.ACTIVE_RULES.remove(rule) @classmethod def step(cls): """ Steps all active transition rules, checking if any are satisfied, and if so, executing their transition """ # First apply any transition object init states from before, and then clear the dictionary for obj, info in cls._INIT_INFO.items(): if info["states"] is not None: for state, args in info["states"].items(): obj.states[state].set_value(*args) if info["callback"] is not None: info["callback"](obj) cls._INIT_INFO = dict() # Iterate over all active rules and process the rule for every valid object candidate combination # Cast to list before iterating since ACTIVE_RULES may get updated mid-iteration added_obj_attrs = [] removed_objs = [] for rule in tuple(cls.ACTIVE_RULES): output = rule.step() # Store objects to be added / removed if we have a valid output if output is not None: added_obj_attrs += output.add removed_objs += output.remove cls.execute_transition(added_obj_attrs=added_obj_attrs, removed_objs=removed_objs) @classmethod def execute_transition(cls, added_obj_attrs, removed_objs): """ Executes the transition for the given added and removed objects. :param added_obj_attrs: List of ObjectAttrs instances to add to the scene :param removed_objs: List of BaseObject instances to remove from the scene """ # Process all transition results if len(removed_objs) > 0: # First remove pre-existing objects og.sim.remove_object(removed_objs) # Then add new objects if len(added_obj_attrs) > 0: state = og.sim.dump_state() for added_obj_attr in added_obj_attrs: new_obj = added_obj_attr.obj og.sim.import_object(new_obj) # By default, added_obj_attr is populated with all Nones -- so these will all be pass-through operations # unless pos / orn (or, conversely, bb_pos / bb_orn) is specified if added_obj_attr.pos is not None or added_obj_attr.orn is not None: new_obj.set_position_orientation(position=added_obj_attr.pos, orientation=added_obj_attr.orn) elif isinstance(new_obj, DatasetObject) and \ (added_obj_attr.bb_pos is not None or added_obj_attr.bb_orn is not None): new_obj.set_bbox_center_position_orientation(position=added_obj_attr.bb_pos, orientation=added_obj_attr.bb_orn) else: raise ValueError("Expected at least one of pos, orn, bb_pos, or bb_orn to be specified in ObjectAttrs!") # Additionally record any requested states if specified to be updated during the next transition step if added_obj_attr.states is not None or added_obj_attr.callback is not None: cls._INIT_INFO[new_obj] = { "states": added_obj_attr.states, "callback": added_obj_attr.callback, } @classmethod def clear(cls): """ Clears any internal state when the simulator is restarted (e.g.: when a new stage is opened) """ global RULES_REGISTRY # Clear internal dictionaries cls.ACTIVE_RULES = set() cls._INIT_INFO = dict() class ObjectCandidateFilter(metaclass=ABCMeta): """ Defines a filter to apply for inferring which objects are valid candidates for checking a transition rule's condition requirements. NOTE: These filters should describe STATIC properties about an object -- i.e.: properties that should NOT change at runtime, once imported """ @abstractmethod def __call__(self, obj): """Returns true if the given object passes the filter.""" return False class CategoryFilter(ObjectCandidateFilter): """Filter for object categories.""" def __init__(self, category): self.category = category def __call__(self, obj): return obj.category == self.category class AbilityFilter(ObjectCandidateFilter): """Filter for object abilities.""" def __init__(self, ability): self.ability = ability def __call__(self, obj): return self.ability in obj._abilities class NameFilter(ObjectCandidateFilter): """Filter for object names.""" def __init__(self, name): self.name = name def __call__(self, obj): return self.name in obj.name class NotFilter(ObjectCandidateFilter): """Logical-not of a filter""" def __init__(self, f): self.f = f def __call__(self, obj): return not self.f(obj) class OrFilter(ObjectCandidateFilter): """Logical-or of a set of filters.""" def __init__(self, filters): self.filters = filters def __call__(self, obj): return any(f(obj) for f in self.filters) class AndFilter(ObjectCandidateFilter): """Logical-and of a set of filters.""" def __init__(self, filters): self.filters = filters def __call__(self, obj): return all(f(obj) for f in self.filters) class RuleCondition: """ Defines a transition rule condition for filtering a given set of input object candidates. NOTE: These filters should describe DYNAMIC properties about object candidates -- i.e.: properties that MAY change at runtime, once imported """ def refresh(self, object_candidates): """ Refreshes any internal state for this rule condition, given set of input object candidates @object_candidates Args: object_candidates (dict): Maps filter name to valid object(s) that satisfy that filter """ # No-op by default pass @abstractmethod def __call__(self, object_candidates): """ Filters @object_candidates and updates the candidates in-place, returning True if there are still valid candidates Args: object_candidates (dict): Maps filter name to valid object(s) that satisfy that filter Returns: bool: Whether there are still valid candidates in @object_candidates """ # Default is False return False @property def modifies_filter_names(self): """ Returns: set: Filter name(s) whose values may be modified in-place by this condition """ raise NotImplementedError class TouchingAnyCondition(RuleCondition): """ Rule condition that prunes object candidates from @filter_1_name, only keeping any that are touching any object from @filter_2_name """ def __init__(self, filter_1_name, filter_2_name): """ Args: filter_1_name (str): Name of the filter whose object candidates will be pruned based on whether or not they are touching any object from @filter_2_name filter_2_name (str): Name of the filter whose object candidates will be used to prune the candidates from @filter_1_name """ self._filter_1_name = filter_1_name self._filter_2_name = filter_2_name # Will be filled in during self.initialize # Maps object to the list of rigid body idxs in the global contact matrix corresponding to filter 1 self._filter_1_idxs = None # If optimized, filter_2_idxs will be used, otherwise filter_2_bodies will be used! # Maps object to the list of rigid body idxs in the global contact matrix corresponding to filter 2 self._filter_2_idxs = None # Maps object to set of rigid bodies corresponding to filter 2 self._filter_2_bodies = None # Flag whether optimized call can be used self._optimized = None def refresh(self, object_candidates): # Check whether we can use optimized computation or not -- this is determined by whether or not any objects # in our collision set are kinematic only self._optimized = not np.any([obj.kinematic_only or obj.prim_type == PrimType.CLOTH for f in (self._filter_1_name, self._filter_2_name) for obj in object_candidates[f]]) if self._optimized: # Register idx mappings self._filter_1_idxs = {obj: [RigidContactAPI.get_body_row_idx(link.prim_path) for link in obj.links.values()] for obj in object_candidates[self._filter_1_name]} self._filter_2_idxs = {obj: [RigidContactAPI.get_body_col_idx(link.prim_path) for link in obj.links.values()] for obj in object_candidates[self._filter_2_name]} else: # Register body mappings self._filter_2_bodies = {obj: set(obj.links.values()) for obj in object_candidates[self._filter_2_name]} def __call__(self, object_candidates): # Keep any object that has non-zero impulses between itself and any of the @filter_2_name's objects objs = [] if self._optimized: # Get all impulses impulses = RigidContactAPI.get_all_impulses() idxs_to_check = np.concatenate([self._filter_2_idxs[obj] for obj in object_candidates[self._filter_2_name]]) # Batch check for each object for obj in object_candidates[self._filter_1_name]: if np.any(impulses[self._filter_1_idxs[obj]][:, idxs_to_check]): objs.append(obj) else: # Manually check contact filter_2_bodies = set.union(*(self._filter_2_bodies[obj] for obj in object_candidates[self._filter_2_name])) for obj in object_candidates[self._filter_1_name]: if len(obj.states[ContactBodies].get_value().intersection(filter_2_bodies)) > 0: objs.append(obj) # Update candidates object_candidates[self._filter_1_name] = objs # If objs is empty, return False, otherwise, True return len(objs) > 0 @property def modifies_filter_names(self): # Only modifies values from filter 1 return {self._filter_1_name} class StateCondition(RuleCondition): """ Rule condition that checks all objects from @filter_name whether a state condition is equal to @val for """ def __init__( self, filter_name, state, val, op=operator.eq, ): """ Args: filter_name (str): Name of the filter whose object candidates will be pruned based on whether or not the state @state's value is equal to @val state (BaseObjectState): Object state whose value should be queried as a rule condition val (any): The value @state should be in order for this condition to be satisfied op (function): Binary operator to apply between @state's getter and @val. Default is operator.eq, which does state.get_value() == val. Expected signature: def op(state_getter, val) --> bool """ self._filter_name = filter_name self._state = state self._val = val self._op = op def __call__(self, object_candidates): # Keep any object whose states are satisfied object_candidates[self._filter_name] = \ [obj for obj in object_candidates[self._filter_name] if self._op(obj.states[self._state].get_value(), self._val)] # Condition met if any object meets the condition return len(object_candidates[self._filter_name]) > 0 @property def modifies_filter_names(self): return {self._filter_name} class ChangeConditionWrapper(RuleCondition): """ Rule condition wrapper that checks whether the output from @condition """ def __init__( self, condition, ): """ Args: condition (RuleCondition): Condition whose output will be additionally filtered whether or not its relevant values have changed since the previous time this condition was called """ self._condition = condition self._last_valid_candidates = {filter_name: set() for filter_name in self.modifies_filter_names} def refresh(self, object_candidates): # Refresh nested condition self._condition.refresh(object_candidates=object_candidates) def __call__(self, object_candidates): # Call wrapped method first valid = self._condition(object_candidates=object_candidates) # Iterate over all current candidates -- if there's a mismatch in last valid candidates and current, # then we store it, otherwise, we don't for filter_name in self.modifies_filter_names: # Compute current valid candidates objs = [obj for obj in object_candidates[filter_name] if obj not in self._last_valid_candidates[filter_name]] # Store last valid objects -- these are all candidates that were validated by self._condition at the # current timestep self._last_valid_candidates[filter_name] = set(object_candidates[filter_name]) # Update current object candidates with the change-filtered ones object_candidates[filter_name] = objs valid = valid and len(objs) > 0 # Valid if any object conditions have changed and we still have valid objects return valid @property def modifies_filter_names(self): # Return wrapped names return self._condition.modifies_filter_names class OrConditionWrapper(RuleCondition): """ Logical OR between multiple RuleConditions """ def __init__(self, conditions): """ Args: conditions (list of RuleConditions): Conditions to take logical OR over. This will generate the UNION of all candidates. """ self._conditions = conditions def refresh(self, object_candidates): # Refresh nested conditions for condition in self._conditions: condition.refresh(object_candidates=object_candidates) def __call__(self, object_candidates): # Iterate over all conditions and aggregate their results pruned_candidates = dict() for condition in self._conditions: # Copy the candidates because they get modified in place pruned_candidates[condition] = copy(object_candidates) condition(object_candidates=pruned_candidates[condition]) # For each filter, take the union over object candidates across each condition. # If the result is empty, we immediately return False. for filter_name in object_candidates: object_candidates[filter_name] = \ list(set.union(*[set(candidates[filter_name]) for candidates in pruned_candidates.values()])) if len(object_candidates[filter_name]) == 0: return False return True @property def modifies_filter_names(self): # Return all wrapped names return set.union(*(condition.modifies_filter_names for condition in self._conditions)) class AndConditionWrapper(RuleCondition): """ Logical AND between multiple RuleConditions """ def __init__(self, conditions): """ Args: conditions (list of RuleConditions): Conditions to take logical AND over. This will generate the INTERSECTION of all candidates. """ self._conditions = conditions def refresh(self, object_candidates): # Refresh nested conditions for condition in self._conditions: condition.refresh(object_candidates=object_candidates) def __call__(self, object_candidates): # Iterate over all conditions and aggregate their results pruned_candidates = dict() for condition in self._conditions: # Copy the candidates because they get modified in place pruned_candidates[condition] = copy(object_candidates) condition(object_candidates=pruned_candidates[condition]) # For each filter, take the intersection over object candidates across each condition. # If the result is empty, we immediately return False. for filter_name in object_candidates: object_candidates[filter_name] = \ list(set.intersection(*[set(candidates[filter_name]) for candidates in pruned_candidates.values()])) if len(object_candidates[filter_name]) == 0: return False return True @property def modifies_filter_names(self): # Return all wrapped names return set.union(*(condition.modifies_filter_names for condition in self._conditions)) class BaseTransitionRule(Registerable): """ Defines a set of categories of objects and how to transition their states. """ conditions = None candidates = None def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) # Register this system, and # make sure at least one filter is specified -- in general, there should never be a rule # where no filter is specified # Only run this check for actual rules that are being registered if cls.__name__ not in cls._do_not_register_classes: global RULES_REGISTRY RULES_REGISTRY.add(obj=cls) assert len(cls.candidate_filters) > 0, \ "At least one of individual_filters or group_filters must be specified!" # Store conditions cls.conditions = cls._generate_conditions() @classproperty def candidate_filters(cls): """ Object candidate filters that this transition rule cares about. For each name, filter key-value pair, the global transition rule step will produce a single dictionary of valid filtered objects. For example, if the group filters are: {"apple": CategoryFilter("apple"), "knife": CategoryFilter("knife")}, the transition rule step will produce the following dictionary: {"apple": [apple0, apple1, ...], "knife": [knife0, knife1, ...]} based on the current instances of each object type in the scene and pass them to conditions in @self.conditions NOTE: There should always be at least one filter applied for every rule! Returns: dict: Maps filter name to filter for inferring valid object candidates for this transition rule """ raise NotImplementedError @classmethod def _generate_conditions(cls): """ Generates rule condition(s)s for this transition rule. These conditions are used to prune object candidates at runtime, to determine whether a transition rule should occur at the given timestep Returns: list of RuleCondition: Condition(s) to enforce to determine whether a transition rule should occur """ raise NotImplementedError @classmethod def get_object_candidates(cls, objects): """ Given the set of objects @objects, compute the valid object candidate combinations that may be valid for this TransitionRule Args: objects (list of BaseObject): Objects to filter for valid transition rule candidates Returns: dict: Maps filter name to valid object(s) that satisfy that filter """ # Iterate over all objects and add to dictionary if valid filters = cls.candidate_filters obj_dict = {filter_name: [] for filter_name in filters.keys()} for obj in objects: for fname, f in filters.items(): if f(obj): obj_dict[fname].append(obj) return obj_dict @classmethod def refresh(cls, object_candidates): """ Refresh any internal state for this rule, given set of input object candidates @object_candidates Args: object_candidates (dict): Maps filter name to valid object(s) that satisfy that filter """ # Store candidates cls.candidates = object_candidates # Refresh all conditions for condition in cls.conditions: condition.refresh(object_candidates=object_candidates) @classmethod def transition(cls, object_candidates): """ Rule to apply for each set of objects satisfying the condition. Args: object_candidates (dict): Dictionary mapping corresponding keys from @cls.filters to list of individual object instances where the filter is satisfied Returns: TransitionResults: results from the executed transition """ raise NotImplementedError() @classmethod def step(cls): """ Takes a step for this transition rule, checking if all of @cls.conditions are satisified, and if so, taking a transition via @cls.transition() Returns: None or TransitionResults: If a transition occurs, returns its results, otherwise, returns None """ # Copy the candidates dictionary since it may be mutated in place by @conditions object_candidates = {filter_name: candidates.copy() for filter_name, candidates in cls.candidates.items()} for condition in cls.conditions: if not condition(object_candidates=object_candidates): # Condition was not met, so immediately terminate return # All conditions are met, take the transition return cls.transition(object_candidates=object_candidates) @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("BaseTransitionRule") return classes @classproperty def _cls_registry(cls): # Global registry global REGISTERED_RULES return REGISTERED_RULES # Global dicts that will contain mappings. Must be placed here immediately AFTER BaseTransitionRule! RULES_REGISTRY = Registry( name="TransitionRuleRegistry", class_types=BaseTransitionRule, default_key="__name__", ) class WasherDryerRule(BaseTransitionRule): """ Transition rule to apply to cloth washers and dryers. """ @classmethod def _generate_conditions(cls): assert len(cls.candidate_filters.keys()) == 1 machine_type = list(cls.candidate_filters.keys())[0] return [ChangeConditionWrapper( condition=AndConditionWrapper(conditions=[ StateCondition(filter_name=machine_type, state=ToggledOn, val=True, op=operator.eq), StateCondition(filter_name=machine_type, state=Open, val=False, op=operator.eq), ]) )] @classmethod def _compute_global_rule_info(cls): """ Helper function to compute global information necessary for checking rules. This is executed exactly once per cls.transition() step Returns: dict: Keyword-mapped global rule information """ # Compute all obj obj_positions = np.array([obj.aabb_center for obj in og.sim.scene.objects]) return dict(obj_positions=obj_positions) @classmethod def _compute_container_info(cls, object_candidates, container, global_info): """ Helper function to compute container-specific information necessary for checking rules. This is executed once per container per cls.transition() step Args: object_candidates (dict): Dictionary mapping corresponding keys from @cls.filters to list of individual object instances where the filter is satisfied container (StatefulObject): Relevant container object for computing information global_info (dict): Output of @cls._compute_global_rule_info(); global information which may be relevant for computing container information Returns: dict: Keyword-mapped container information """ del object_candidates obj_positions = global_info["obj_positions"] in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) in_volume_objs = list(np.array(og.sim.scene.objects)[in_volume]) # Remove the container itself if container in in_volume_objs: in_volume_objs.remove(container) return dict(in_volume_objs=in_volume_objs) @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("WasherDryerRule") return classes class WasherRule(WasherDryerRule): """ Transition rule to apply to cloth washers. 1. remove "dirty" particles from the washer if the necessary solvent is present. 2. wet the objects inside by making them either Saturated with or Covered by water. """ cleaning_conditions = None @classmethod def register_cleaning_conditions(cls, conditions): """ Register cleaning conditions for this rule. Args: conditions (dict): ictionary mapping the system name (str) to None or list of system names (str). None represents "never", empty list represents "always", or non-empty list represents at least one of the systems in the list needs to be present in the washer for the key system to be removed. E.g. "rust" -> None: "never remove rust from the washer" E.g. "dust" -> []: "always remove dust from the washer" E.g. "cooking_oil" -> ["sodium_carbonate", "vinegar"]: "remove cooking_oil from the washer if either sodium_carbonate or vinegar is present" For keys not present in the dictionary, the default is []: "always remove" """ cls.cleaning_conditions = conditions @classproperty def candidate_filters(cls): return { "washer": CategoryFilter("washer"), } @classmethod def transition(cls, object_candidates): water = get_system("water") global_info = cls._compute_global_rule_info() for washer in object_candidates["washer"]: # Remove the systems if the conditions are met systems_to_remove = [] for system in ParticleRemover.supported_active_systems.values(): # Never remove if system.name in cls.cleaning_conditions and cls.cleaning_conditions[system.name] is None: continue if not washer.states[Contains].get_value(system): continue solvents = cls.cleaning_conditions.get(system.name, []) # Always remove if len(solvents) == 0: systems_to_remove.append(system) else: solvents = [get_system(solvent) for solvent in solvents if is_system_active(solvent)] # If any of the solvents are present if any(washer.states[Contains].get_value(solvent) for solvent in solvents): systems_to_remove.append(system) for system in systems_to_remove: washer.states[Contains].set_value(system, False) # Make the objects wet container_info = cls._compute_container_info(object_candidates=object_candidates, container=washer, global_info=global_info) in_volume_objs = container_info["in_volume_objs"] for obj in in_volume_objs: if Saturated in obj.states: obj.states[Saturated].set_value(water, True) else: obj.states[Covered].set_value(water, True) return TransitionResults(add=[], remove=[]) class DryerRule(WasherDryerRule): """ Transition rule to apply to cloth dryers. 1. dry the objects inside by making them not Saturated with water. 2. remove all water from the dryer. """ @classproperty def candidate_filters(cls): return { "dryer": CategoryFilter("clothes_dryer"), } @classmethod def transition(cls, object_candidates): water = get_system("water") global_info = cls._compute_global_rule_info() for dryer in object_candidates["dryer"]: container_info = cls._compute_container_info(object_candidates=object_candidates, container=dryer, global_info=global_info) in_volume_objs = container_info["in_volume_objs"] for obj in in_volume_objs: if Saturated in obj.states: obj.states[Saturated].set_value(water, False) dryer.states[Contains].set_value(water, False) return TransitionResults(add=[], remove=[]) class SlicingRule(BaseTransitionRule): """ Transition rule to apply to sliced / slicer object pairs. """ @classproperty def candidate_filters(cls): return { "sliceable": AbilityFilter("sliceable"), "slicer": AbilityFilter("slicer"), } @classmethod def _generate_conditions(cls): # sliceables should be touching any slicer return [TouchingAnyCondition(filter_1_name="sliceable", filter_2_name="slicer"), StateCondition(filter_name="slicer", state=SlicerActive, val=True, op=operator.eq)] @classmethod def transition(cls, object_candidates): objs_to_add, objs_to_remove = [], [] for sliceable_obj in object_candidates["sliceable"]: # Object parts offset annotation are w.r.t the base link of the whole object. pos, orn = sliceable_obj.get_position_orientation() # Load object parts for i, part in enumerate(sliceable_obj.metadata["object_parts"].values()): # List of dicts gets replaced by {'0':dict, '1':dict, ...} # Get bounding box info part_bb_pos = np.array(part["bb_pos"]) part_bb_orn = np.array(part["bb_orn"]) # Determine the relative scale to apply to the object part from the original object # Note that proper (rotated) scaling can only be applied when the relative orientation of # the object part is a multiple of 90 degrees wrt the parent object, so we assert that here assert T.check_quat_right_angle(part_bb_orn), "Sliceable objects should only have relative object part orientations that are factors of 90 degrees!" # Scale the offset accordingly. scale = np.abs(T.quat2mat(part_bb_orn) @ sliceable_obj.scale) # Calculate global part bounding box pose. part_bb_pos = pos + T.quat2mat(orn) @ (part_bb_pos * scale) part_bb_orn = T.quat_multiply(orn, part_bb_orn) part_obj_name = f"half_{sliceable_obj.name}_{i}" part_obj = DatasetObject( name=part_obj_name, category=part["category"], model=part["model"], bounding_box=part["bb_size"] * scale, # equiv. to scale=(part["bb_size"] / self.native_bbox) * (scale) ) sliceable_obj_state = sliceable_obj.dump_state() # Propagate non-physical states of the whole object to the half objects, e.g. cooked, saturated, etc. # Add the new object to the results. new_obj_attrs = ObjectAttrs( obj=part_obj, bb_pos=part_bb_pos, bb_orn=part_bb_orn, callback=lambda obj: obj.load_non_kin_state(sliceable_obj_state), ) objs_to_add.append(new_obj_attrs) # Delete original object from stage. objs_to_remove.append(sliceable_obj) return TransitionResults(add=objs_to_add, remove=objs_to_remove) class DicingRule(BaseTransitionRule): """ Transition rule to apply to diceable / slicer object pairs. """ @classproperty def candidate_filters(cls): return { "diceable": AbilityFilter("diceable"), "slicer": AbilityFilter("slicer"), } @classmethod def _generate_conditions(cls): # sliceables should be touching any slicer return [TouchingAnyCondition(filter_1_name="diceable", filter_2_name="slicer"), StateCondition(filter_name="slicer", state=SlicerActive, val=True, op=operator.eq)] @classmethod def transition(cls, object_candidates): objs_to_remove = [] for diceable_obj in object_candidates["diceable"]: obj_category = diceable_obj.category # We expect all diced particle systems to follow the naming convention (cooked__)diced__<category> system_name = "diced__" + diceable_obj.category.removeprefix("half_") if Cooked in diceable_obj.states and diceable_obj.states[Cooked].get_value(): system_name = "cooked__" + system_name system = get_system(system_name) system.generate_particles_from_link(diceable_obj, diceable_obj.root_link, check_contact=False, use_visual_meshes=False) # Delete original object from stage. objs_to_remove.append(diceable_obj) return TransitionResults(add=[], remove=objs_to_remove) class MeltingRule(BaseTransitionRule): """ Transition rule to apply to meltable objects to simulate melting Once the object reaches the melting temperature, remove the object and spawn the melted substance in its place. """ @classproperty def candidate_filters(cls): # We want to find all meltable objects return {"meltable": AbilityFilter("meltable")} @classmethod def _generate_conditions(cls): return [StateCondition(filter_name="meltable", state=MaxTemperature, val=m.MELTING_TEMPERATURE, op=operator.ge)] @classmethod def transition(cls, object_candidates): objs_to_remove = [] # Convert the meltable object into its melted substance for meltable_obj in object_candidates["meltable"]: # All meltable xyz, half_xyz and diced__xyz transform into melted__xyz root_category = meltable_obj.category.removeprefix("half_").removeprefix("diced__") system_name = f"melted__{root_category}" system = get_system(system_name) system.generate_particles_from_link(meltable_obj, meltable_obj.root_link, check_contact=False, use_visual_meshes=False) # Delete original object from stage. objs_to_remove.append(meltable_obj) return TransitionResults(add=[], remove=objs_to_remove) class RecipeRule(BaseTransitionRule): """ Transition rule to approximate recipe-based transitions """ # Maps recipe name to recipe information _RECIPES = None # Maps active recipe name to recipe information _ACTIVE_RECIPES = None # Maps object category name to indices in the flattened object array for efficient computation _CATEGORY_IDXS = None # Flattened array of all simulator objects, sorted by category _OBJECTS = None # Maps object to idx within the _OBJECTS array _OBJECTS_TO_IDX = None def __init_subclass__(cls, **kwargs): # Run super first super().__init_subclass__(**kwargs) # Initialize recipes cls._RECIPES = dict() @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, input_states=None, output_states=None, fillable_categories=None, **kwargs, ): """ Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned fillable_categories (None or set of str): If specified, set of fillable categories which are allowed for this recipe. If None, any fillable is allowed kwargs (dict): Any additional keyword-arguments to be stored as part of this recipe """ input_states = input_states if input_states is not None else defaultdict(lambda: defaultdict(list)) output_states = output_states if output_states is not None else defaultdict(lambda: defaultdict(list)) input_object_tree = None if cls.is_multi_instance and len(input_objects) > 0: # Build a tree of input object categories according to the kinematic binary states # Example: 'raw_egg': {'binary_object': [(OnTop, 'bagel_dough', True)]} results in an edge # from 'bagel_dough' to 'raw_egg', i.e. 'bagel_dough' is the parent of 'raw_egg'. input_object_tree = nx.DiGraph() for obj_category, state_checks in input_states.items(): for state_class, second_obj_category, state_value in state_checks["binary_object"]: input_object_tree.add_edge(second_obj_category, obj_category) if nx.is_empty(input_object_tree): input_object_tree = None else: assert nx.is_tree(input_object_tree), f"Input object tree must be a tree! Now: {input_object_tree}." root_nodes = [node for node in input_object_tree.nodes() if input_object_tree.in_degree(node) == 0] assert len(root_nodes) == 1, f"Input object tree must have exactly one root node! Now: {root_nodes}." assert input_objects[root_nodes[0]] == 1, f"Input object tree root node must have exactly one instance! Now: {cls._RECIPES[name]['input_objects'][root_nodes[0]]}." # Store information for this recipe cls._RECIPES[name] = { "name": name, "input_objects": input_objects, "input_systems": input_systems, "output_objects": output_objects, "output_systems": output_systems, "input_states": input_states, "output_states": output_states, "fillable_categories": fillable_categories, "input_object_tree": input_object_tree, **kwargs, } @classmethod def _validate_recipe_container_is_valid(cls, recipe, container): """ Validates that @container's category satisfies @recipe's fillable_categories Args: recipe (dict): Recipe whose fillable_categories should be checked against @container container (StatefulObject): Container whose category should match one of @recipe's fillable_categories, if specified Returns: bool: True if @container is valid, else False """ fillable_categories = recipe["fillable_categories"] return fillable_categories is None or container.category in fillable_categories @classmethod def _validate_recipe_systems_are_contained(cls, recipe, container): """ Validates whether @recipe's input_systems are all contained in @container or not Args: recipe (dict): Recipe whose systems should be checked container (BaseObject): Container object that should contain all of @recipe's input systems Returns: bool: True if all the input systems are contained """ for system_name in recipe["input_systems"]: system = get_system(system_name=system_name) if not container.states[Contains].get_value(system=system): return False return True @classmethod def _validate_nonrecipe_systems_not_contained(cls, recipe, container): """ Validates whether all systems not relevant to @recipe are not contained in @container Args: recipe (dict): Recipe whose systems should be checked container (BaseObject): Container object that should contain all of @recipe's input systems Returns: bool: True if none of the non-relevant systems are contained """ for system in og.sim.scene.system_registry.objects: # Skip cloth system if system.name == "cloth": continue if system.name not in recipe["input_systems"] and container.states[Contains].get_value(system=system): return False return True @classmethod def _validate_recipe_objects_are_contained_and_states_satisfied(cls, recipe, container_info): """ Validates whether @recipe's input_objects are contained in the container and whether their states are satisfied Args: recipe (dict): Recipe whose objects should be checked container_info (dict): Output of @cls._compute_container_info(); container-specific information which may be relevant for computing whether recipe is executable. This will be populated with execution info. Returns: bool: True if all the input object quantities are contained """ in_volume = container_info["in_volume"] # Store necessary information for execution container_info["execution_info"] = dict() category_to_valid_indices = cls._filter_input_objects_by_unary_and_binary_system_states(recipe=recipe) container_info["execution_info"]["category_to_valid_indices"] = category_to_valid_indices if not cls.is_multi_instance: return cls._validate_recipe_objects_non_multi_instance( recipe=recipe, category_to_valid_indices=category_to_valid_indices, in_volume=in_volume, ) else: return cls._validate_recipe_objects_multi_instance( recipe=recipe, category_to_valid_indices=category_to_valid_indices, container_info=container_info, ) @classmethod def _filter_input_objects_by_unary_and_binary_system_states(cls, recipe): # Filter input objects based on a subset of input states (unary states and binary system states) # Map object categories (str) to valid indices (np.ndarray) category_to_valid_indices = dict() for obj_category in recipe["input_objects"]: if obj_category not in recipe["input_states"]: # If there are no input states, all objects of this category are valid category_to_valid_indices[obj_category] = cls._CATEGORY_IDXS[obj_category] else: category_to_valid_indices[obj_category] = [] for idx in cls._CATEGORY_IDXS[obj_category]: obj = cls._OBJECTS[idx] success = True # Check if unary states are satisfied for state_class, state_value in recipe["input_states"][obj_category]["unary"]: if obj.states[state_class].get_value() != state_value: success = False break if not success: continue # Check if binary system states are satisfied for state_class, system_name, state_value in recipe["input_states"][obj_category]["binary_system"]: if obj.states[state_class].get_value(system=get_system(system_name)) != state_value: success = False break if not success: continue category_to_valid_indices[obj_category].append(idx) # Convert to numpy array for faster indexing category_to_valid_indices[obj_category] = np.array(category_to_valid_indices[obj_category], dtype=int) return category_to_valid_indices @classmethod def _validate_recipe_objects_non_multi_instance(cls, recipe, category_to_valid_indices, in_volume): # Check if sufficiently number of objects are contained for obj_category, obj_quantity in recipe["input_objects"].items(): if np.sum(in_volume[category_to_valid_indices[obj_category]]) < obj_quantity: return False return True @classmethod def _validate_recipe_objects_multi_instance(cls, recipe, category_to_valid_indices, container_info): in_volume = container_info["in_volume"] input_object_tree = recipe["input_object_tree"] # Map object category to a set of objects that are used in this execution relevant_objects = defaultdict(set) # Map system name to a set of particle indices that are used in this execution relevant_systems = defaultdict(set) # Number of instances of this recipe that can be produced num_instances = 0 # Define a recursive function to check the kinematic tree def check_kinematic_tree(obj, should_check_in_volume=False): """ Recursively check if the kinematic tree is satisfied. Return True/False, and a set of objects that belong to the subtree rooted at the current node Args: obj (BaseObject): Subtree root node to check should_check_in_volume (bool): Whether to check if the object is in the volume or not Returns: bool: True if the subtree rooted at the current node is satisfied set: Set of objects that belong to the subtree rooted at the current node """ # Check if obj is in volume if should_check_in_volume and not in_volume[cls._OBJECTS_TO_IDX[obj]]: return False, set() # If the object is a leaf node, return True and the set containing the object if input_object_tree.out_degree(obj.category) == 0: return True, set([obj]) children_categories = list(input_object_tree.successors(obj.category)) all_subtree_objs = set() for child_cat in children_categories: assert len(input_states[child_cat]["binary_object"]) == 1, \ "Each child node should have exactly one binary object state, i.e. one parent in the input_object_tree" state_class, _, state_value = input_states[child_cat]["binary_object"][0] num_valid_children = 0 children_objs = cls._OBJECTS[category_to_valid_indices[child_cat]] for child_obj in children_objs: # If the child doesn't satisfy the binary object state, skip if child_obj.states[state_class].get_value(obj) != state_value: continue # Recursively check if the subtree rooted at the child is valid subtree_valid, subtree_objs = check_kinematic_tree(child_obj) # If the subtree is valid, increment the number of valid children and aggregate the objects if subtree_valid: num_valid_children += 1 all_subtree_objs |= subtree_objs # If there are not enough valid children, return False if num_valid_children < recipe["input_objects"][child_cat]: return False, set() # If all children categories have sufficient number of objects that satisfy the binary object state, # e.g. five pieces of pepperoni and two pieces of basil on the pizza, the subtree rooted at the # current node is valid. Return True and the set of objects in the subtree (all descendants plus # the current node) return True, all_subtree_objs | {obj} # If multi-instance is True but doesn't require kinematic states between objects if input_object_tree is None: num_instances = np.inf # Compute how many instances of this recipe can be produced. # Example: if a recipe requires 1 apple and 2 bananas, and there are 3 apples and 4 bananas in the # container, then 2 instance of the recipe can be produced. for obj_category, obj_quantity in recipe["input_objects"].items(): quantity_in_volume = np.sum(in_volume[category_to_valid_indices[obj_category]]) num_inst = quantity_in_volume // obj_quantity if num_inst < 1: return False num_instances = min(num_instances, num_inst) # If at least one instance of the recipe can be executed, add all valid objects to be relevant_objects. # This can be considered as a special case of below where there are no binary kinematic states required. for obj_category in recipe["input_objects"]: relevant_objects[obj_category] = set(cls._OBJECTS[category_to_valid_indices[obj_category]]) # If multi-instance is True and requires kinematic states between objects else: root_node_category = [node for node in input_object_tree.nodes() if input_object_tree.in_degree(node) == 0][0] # A list of objects belonging to the root node category root_nodes = cls._OBJECTS[category_to_valid_indices[root_node_category]] input_states = recipe["input_states"] for root_node in root_nodes: # should_check_in_volume is True only for the root nodes. # Example: the bagel dough needs to be in_volume of the container, but the raw egg on top doesn't. tree_valid, relevant_object_set = check_kinematic_tree(obj=root_node, should_check_in_volume=True) if tree_valid: # For each valid tree, increment the number of instances and aggregate the objects num_instances += 1 for obj in relevant_object_set: relevant_objects[obj.category].add(obj) # If there are no valid trees, return False if num_instances == 0: return False # Note that for multi instance recipes, the relevant system particles are NOT the ones in the container. # Instead, they are the ones that are related to the relevant objects, e.g. salt covering the bagel dough. for obj_category, objs in relevant_objects.items(): for state_class, system_name, state_value in recipe["input_states"][obj_category]["binary_system"]: # If the state value is False, skip if not state_value: continue for obj in objs: if state_class in [Filled, Contains]: contained_particle_idx = obj.states[ContainedParticles].get_value(get_system(system_name)).in_volume.nonzero()[0] relevant_systems[system_name] |= contained_particle_idx elif state_class in [Covered]: covered_particle_idx = obj.states[ContactParticles].get_value(get_system(system_name)) relevant_systems[system_name] |= covered_particle_idx # Now we populate the execution info with the relevant objects and systems as well as the number of # instances of the recipe that can be produced. container_info["execution_info"]["relevant_objects"] = relevant_objects container_info["execution_info"]["relevant_systems"] = relevant_systems container_info["execution_info"]["num_instances"] = num_instances return True @classmethod def _validate_nonrecipe_objects_not_contained(cls, recipe, container_info): """ Validates whether all objects not relevant to @recipe are not contained in the container represented by @in_volume Args: recipe (dict): Recipe whose systems should be checked container_info (dict): Output of @cls._compute_container_info(); container-specific information which may be relevant for computing whether recipe is executable Returns: bool: True if none of the non-relevant objects are contained """ in_volume = container_info["in_volume"] # These are object indices whose objects satisfy the input states category_to_valid_indices = container_info["execution_info"]["category_to_valid_indices"] nonrecipe_objects_in_volume = in_volume if len(recipe["input_objects"]) == 0 else \ np.delete(in_volume, np.concatenate([category_to_valid_indices[obj_category] for obj_category in category_to_valid_indices])) return not np.any(nonrecipe_objects_in_volume) @classmethod def _validate_recipe_systems_exist(cls, recipe): """ Validates whether @recipe's input_systems are all active or not Args: recipe (dict): Recipe whose systems should be checked Returns: bool: True if all the input systems are active """ for system_name in recipe["input_systems"]: if not is_system_active(system_name=system_name): return False return True @classmethod def _validate_recipe_objects_exist(cls, recipe): """ Validates whether @recipe's input_objects exist in the current scene or not Args: recipe (dict): Recipe whose objects should be checked Returns: bool: True if all the input objects exist in the scene """ for obj_category, obj_quantity in recipe["input_objects"].items(): if len(og.sim.scene.object_registry("category", obj_category, default_val=set())) < obj_quantity: return False return True @classmethod def _validate_recipe_fillables_exist(cls, recipe): """ Validates that recipe @recipe's necessary fillable categorie(s) exist in the current scene Args: recipe (dict): Recipe whose fillable categories should be checked Returns: bool: True if there is at least a single valid fillable category in the current scene, else False """ fillable_categories = recipe["fillable_categories"] if fillable_categories is None: # Any is valid return True # Otherwise, at least one valid type must exist for category in fillable_categories: if len(og.sim.scene.object_registry("category", category, default_val=set())) > 0: return True # None found, return False return False @classmethod def _is_recipe_active(cls, recipe): """ Helper function to determine whether a given recipe @recipe should be actively checked for or not. Args: recipe (dict): Maps relevant keyword to corresponding recipe info Returns: bool: True if the recipe is active, else False """ # Check valid active systems if not cls._validate_recipe_systems_exist(recipe=recipe): return False # Check valid object quantities if not cls._validate_recipe_objects_exist(recipe=recipe): return False # Check valid fillable categories if not cls._validate_recipe_fillables_exist(recipe=recipe): return False return True @classmethod def _is_recipe_executable(cls, recipe, container, global_info, container_info): """ Helper function to determine whether a given recipe @recipe should be immediately executed or not. Args: recipe (dict): Maps relevant keyword to corresponding recipe info container (StatefulObject): Container in which @recipe may be executed global_info (dict): Output of @cls._compute_global_rule_info(); global information which may be relevant for computing whether recipe is executable container_info (dict): Output of @cls._compute_container_info(); container-specific information which may be relevant for computing whether recipe is executable Returns: bool: True if the recipe is active, else False """ in_volume = container_info["in_volume"] # Verify the container category is valid if not cls._validate_recipe_container_is_valid(recipe=recipe, container=container): return False # Verify all required systems are contained in the container if not cls.relax_recipe_systems and not cls._validate_recipe_systems_are_contained(recipe=recipe, container=container): return False # Verify all required object quantities are contained in the container and their states are satisfied if not cls._validate_recipe_objects_are_contained_and_states_satisfied(recipe=recipe, container_info=container_info): return False # Verify no non-relevant system is contained if not cls.ignore_nonrecipe_systems and not cls._validate_nonrecipe_systems_not_contained(recipe=recipe, container=container): return False # Verify no non-relevant object is contained if we're not ignoring them if not cls.ignore_nonrecipe_objects and not cls._validate_nonrecipe_objects_not_contained(recipe=recipe, container_info=container_info): return False return True @classmethod def _compute_global_rule_info(cls): """ Helper function to compute global information necessary for checking rules. This is executed exactly once per cls.transition() step Returns: dict: Keyword-mapped global rule information """ # Compute all relevant object AABB positions obj_positions = np.array([obj.aabb_center for obj in cls._OBJECTS]) return dict(obj_positions=obj_positions) @classmethod def _compute_container_info(cls, object_candidates, container, global_info): """ Helper function to compute container-specific information necessary for checking rules. This is executed once per container per cls.transition() step Args: object_candidates (dict): Dictionary mapping corresponding keys from @cls.filters to list of individual object instances where the filter is satisfied container (StatefulObject): Relevant container object for computing information global_info (dict): Output of @cls._compute_global_rule_info(); global information which may be relevant for computing container information Returns: dict: Keyword-mapped container information """ del object_candidates obj_positions = global_info["obj_positions"] # Compute in volume for all relevant object positions # We check for either the object AABB being contained OR the object being on top of the container, in the # case that the container is too flat for the volume to contain the object in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) | \ np.array([obj.states[OnTop].get_value(container) for obj in cls._OBJECTS]) # Container itself is never within its own volume in_volume[cls._OBJECTS_TO_IDX[container]] = False return dict(in_volume=in_volume) @classmethod def refresh(cls, object_candidates): # Run super first super().refresh(object_candidates=object_candidates) # Cache active recipes given the current set of objects cls._ACTIVE_RECIPES = dict() cls._CATEGORY_IDXS = dict() cls._OBJECTS = [] cls._OBJECTS_TO_IDX = dict() # Prune any recipes whose objects / system requirements are not met by the current set of objects / systems objects_by_category = og.sim.scene.object_registry.get_dict("category") for name, recipe in cls._RECIPES.items(): # If all pre-requisites met, add to active recipes if cls._is_recipe_active(recipe=recipe): cls._ACTIVE_RECIPES[name] = recipe # Finally, compute relevant objects and category mapping based on relevant categories i = 0 for category, objects in objects_by_category.items(): cls._CATEGORY_IDXS[category] = i + np.arange(len(objects)) cls._OBJECTS += list(objects) for obj in objects: cls._OBJECTS_TO_IDX[obj] = i i += 1 # Wrap relevant objects as numpy array so we can index into it efficiently cls._OBJECTS = np.array(cls._OBJECTS) @classproperty def candidate_filters(cls): # Fillable object required return {"container": AbilityFilter(ability="fillable")} @classmethod def transition(cls, object_candidates): objs_to_add, objs_to_remove = [], [] # Compute global info global_info = cls._compute_global_rule_info() # Iterate over all fillable objects, to execute recipes for each one for container in object_candidates["container"]: recipe_results = None # Compute container info container_info = cls._compute_container_info( object_candidates=object_candidates, container=container, global_info=global_info, ) # Check every recipe to find if any is valid for name, recipe in cls._ACTIVE_RECIPES.items(): if cls._is_recipe_executable(recipe=recipe, container=container, global_info=global_info, container_info=container_info): # Otherwise, all conditions met, we found a valid recipe and so we execute and terminate early og.log.info(f"Executing recipe: {name} in container {container.name}!") # Take the transform and terminate early recipe_results = cls._execute_recipe( container=container, recipe=recipe, container_info=container_info, ) objs_to_add += recipe_results.add objs_to_remove += recipe_results.remove break # Otherwise, if we didn't find a valid recipe, we execute a garbage transition instead if requested if recipe_results is None and cls.use_garbage_fallback_recipe: og.log.info(f"Did not find a valid recipe for rule {cls.__name__}; generating {m.DEFAULT_GARBAGE_SYSTEM} in {container.name}!") # Generate garbage fluid garbage_results = cls._execute_recipe( container=container, recipe=dict( name="garbage", input_objects=dict(), input_systems=[], output_objects=dict(), output_systems=[m.DEFAULT_GARBAGE_SYSTEM], output_states=defaultdict(lambda: defaultdict(list)), ), container_info=container_info, ) objs_to_add += garbage_results.add objs_to_remove += garbage_results.remove return TransitionResults(add=objs_to_add, remove=objs_to_remove) @classmethod def _execute_recipe(cls, container, recipe, container_info): """ Transforms all items contained in @container into @output_system, generating volume of @output_system proportional to the number of items transformed. Args: container (BaseObject): Container object which will have its contained elements transformed into @output_system recipe (dict): Recipe to execute. Should include, at the minimum, "input_objects", "input_systems", "output_objects", and "output_systems" keys container_info (dict): Output of @cls._compute_container_info(); container-specific information which may be relevant for computing whether recipe is executable. Returns: TransitionResults: Results of the executed recipe transition """ objs_to_add, objs_to_remove = [], [] in_volume = container_info["in_volume"] if cls.is_multi_instance: execution_info = container_info["execution_info"] # Compute total volume of all contained items volume = 0 if not cls.is_multi_instance: # Remove either all systems or only the ones specified in the input systems of the recipe contained_particles_state = container.states[ContainedParticles] for system in PhysicalParticleSystem.get_active_systems().values(): if not cls.ignore_nonrecipe_systems or system.name in recipe["input_systems"]: if container.states[Contains].get_value(system): volume += contained_particles_state.get_value(system).n_in_volume * np.pi * (system.particle_radius ** 3) * 4 / 3 container.states[Contains].set_value(system, False) for system in VisualParticleSystem.get_active_systems().values(): if not cls.ignore_nonrecipe_systems or system.name in recipe["input_systems"]: if container.states[Contains].get_value(system): container.states[Contains].set_value(system, False) else: # Remove the particles that are involved in this execution for system_name, particle_idxs in execution_info["relevant_systems"].items(): system = get_system(system_name) volume += len(particle_idxs) * np.pi * (system.particle_radius ** 3) * 4 / 3 system.remove_particles(idxs=np.array(list(particle_idxs))) if not cls.is_multi_instance: # Remove either all objects or only the ones specified in the input objects of the recipe object_mask = in_volume.copy() if cls.ignore_nonrecipe_objects: object_category_mask = np.zeros_like(object_mask, dtype=bool) for obj_category in recipe["input_objects"].keys(): object_category_mask[cls._CATEGORY_IDXS[obj_category]] = True object_mask &= object_category_mask objs_to_remove.extend(cls._OBJECTS[object_mask]) else: # Remove the objects that are involved in this execution for obj_category, objs in execution_info["relevant_objects"].items(): objs_to_remove.extend(objs) volume += sum(obj.volume for obj in objs_to_remove) # Define callback for spawning new objects inside container def _spawn_object_in_container(obj): # For simplicity sake, sample only OnTop # TODO: Can we sample inside intelligently? state = OnTop # TODO: What to do if setter fails? if not obj.states[state].set_value(container, True): log.warning(f"Failed to spawn object {obj.name} in container {container.name}! Directly placing on top instead.") pos = np.array(container.aabb_center) + np.array([0, 0, container.aabb_extent[2] / 2.0 + obj.aabb_extent[2] / 2.0]) obj.set_bbox_center_position_orientation(position=pos) # Spawn in new objects for category, n_instances in recipe["output_objects"].items(): # Multiply by number of instances of execution if this is a multi-instance recipe if cls.is_multi_instance: n_instances *= execution_info["num_instances"] output_states = dict() for state_type, state_value in recipe["output_states"][category]["unary"]: output_states[state_type] = (state_value,) for state_type, system_name, state_value in recipe["output_states"][category]["binary_system"]: output_states[state_type] = (get_system(system_name), state_value) n_category_objs = len(og.sim.scene.object_registry("category", category, [])) models = get_all_object_category_models(category=category) for i in range(n_instances): obj = DatasetObject( name=f"{category}_{n_category_objs + i}", category=category, model=np.random.choice(models), ) new_obj_attrs = ObjectAttrs( obj=obj, callback=_spawn_object_in_container, states=output_states, pos=np.ones(3) * (100.0 + i), ) objs_to_add.append(new_obj_attrs) # Spawn in new fluid if len(recipe["output_systems"]) > 0: # Only one system is allowed to be spawned assert len(recipe["output_systems"]) == 1, "Only a single output system can be spawned for a given recipe!" out_system = get_system(recipe["output_systems"][0]) out_system.generate_particles_from_link( obj=container, link=contained_particles_state.link, # When ignore_nonrecipe_objects is True, we don't necessarily remove all objects in the container. # Therefore, we need to check for contact when generating output systems. check_contact=cls.ignore_nonrecipe_objects, max_samples=int(volume / (np.pi * (out_system.particle_radius ** 3) * 4 / 3)), ) # Return transition results return TransitionResults(add=objs_to_add, remove=objs_to_remove) @classproperty def relax_recipe_systems(cls): """ Returns: bool: Whether to relax the requirement of having all systems in the recipe contained in the container """ raise NotImplementedError("Must be implemented by subclass!") @classproperty def ignore_nonrecipe_systems(cls): """ Returns: bool: Whether contained systems not relevant to the recipe should be ignored or not """ raise NotImplementedError("Must be implemented by subclass!") @classproperty def ignore_nonrecipe_objects(cls): """ Returns: bool: Whether contained rigid objects not relevant to the recipe should be ignored or not """ raise NotImplementedError("Must be implemented by subclass!") @classproperty def use_garbage_fallback_recipe(cls): """ Returns: bool: Whether this recipe rule should use a garbage fallback recipe if all conditions are met but no valid recipe is found for a given container """ raise NotImplementedError("Must be implemented by subclass!") @classproperty def is_multi_instance(cls): """ Returns: bool: Whether this rule can be applied multiple times to the same container, e.g. to cook multiple doughs """ return False @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("RecipeRule") return classes class CookingPhysicalParticleRule(RecipeRule): """ Transition rule to apply to "cook" physical particles. It comes with two forms of recipes: 1. xyz -> cooked__xyz, e.g. diced__chicken -> cooked__diced__chicken 2. xyz + cooked__water -> cooked__xyz, e.g. rice + cooked__water -> cooked__rice During execution, we replace the input particles (xyz) with the output particles (cooked__xyz), and remove the cooked__water if it was used as an input. """ @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, **kwargs, ): """ Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. """ assert len(input_objects) == 0, f"No input objects can be specified for {cls.__name__}, recipe: {name}!" assert len(output_objects) == 0, f"No output objects can be specified for {cls.__name__}, recipe: {name}!" assert len(input_systems) == 1 or len(input_systems) == 2, \ f"Only one or two input systems can be specified for {cls.__name__}, recipe: {name}!" if len(input_systems) == 2: assert input_systems[1] == "cooked__water", \ f"Second input system must be cooked__water for {cls.__name__}, recipe: {name}!" assert len(output_systems) == 1, \ f"Exactly one output system needs to be specified for {cls.__name__}, recipe: {name}!" super().add_recipe( name=name, input_objects=input_objects, input_systems=input_systems, output_objects=output_objects, output_systems=output_systems, **kwargs, ) @classproperty def candidate_filters(cls): # Modify the container filter to include the heatable ability as well candidate_filters = super().candidate_filters candidate_filters["container"] = AndFilter(filters=[candidate_filters["container"], AbilityFilter(ability="heatable")]) return candidate_filters @classmethod def _generate_conditions(cls): # Only heated objects are valid return [StateCondition(filter_name="container", state=Heated, val=True, op=operator.eq)] @classproperty def relax_recipe_systems(cls): return False @classproperty def ignore_nonrecipe_systems(cls): return True @classproperty def ignore_nonrecipe_objects(cls): return True @classproperty def use_garbage_fallback_recipe(cls): return False @classmethod def _execute_recipe(cls, container, recipe, container_info): system = get_system(recipe["input_systems"][0]) contained_particles_state = container.states[ContainedParticles].get_value(system) in_volume_idx = np.where(contained_particles_state.in_volume)[0] assert len(in_volume_idx) > 0, "No particles found in the container when executing recipe!" # Remove uncooked particles system.remove_particles(idxs=in_volume_idx) # Generate cooked particles cooked_system = get_system(recipe["output_systems"][0]) particle_positions = contained_particles_state.positions[in_volume_idx] cooked_system.generate_particles(positions=particle_positions) # Remove water if the cooking requires water if len(recipe["input_systems"]) > 1: cooked_water_system = get_system(recipe["input_systems"][1]) container.states[Contains].set_value(cooked_water_system, False) return TransitionResults(add=[], remove=[]) class ToggleableMachineRule(RecipeRule): """ Transition mixing rule that leverages a single toggleable machine (e.g. electric mixer, coffee machine, blender), which require toggledOn in order to trigger the recipe event. It comes with two forms of recipes: 1. output is a single object, e.g. flour + butter + sugar -> dough, machine is electric mixer 2. output is a system, e.g. strawberry + milk -> smoothie, machine is blender """ @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, input_states=None, output_states=None, fillable_categories=None, **kwargs, ): """ Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned fillable_categories (None or set of str): If specified, set of fillable categories which are allowed for this recipe. If None, any fillable is allowed """ if len(output_objects) > 0: assert len(output_objects) == 1, f"Only one category of output object can be specified for {cls.__name__}, recipe: {name}!" assert output_objects[list(output_objects.keys())[0]] == 1, f"Only one instance of output object can be specified for {cls.__name__}, recipe: {name}!" super().add_recipe( name=name, input_objects=input_objects, input_systems=input_systems, output_objects=output_objects, output_systems=output_systems, input_states=input_states, output_states=output_states, fillable_categories=fillable_categories, **kwargs, ) @classproperty def candidate_filters(cls): # Modify the container filter to include toggleable ability as well candidate_filters = super().candidate_filters candidate_filters["container"] = AndFilter(filters=[ candidate_filters["container"], AbilityFilter(ability="toggleable"), # Exclude washer and clothes dryer because they are handled by WasherRule and DryerRule NotFilter(CategoryFilter("washer")), NotFilter(CategoryFilter("clothes_dryer")), NotFilter(CategoryFilter("hot_tub")), ]) return candidate_filters @classmethod def _generate_conditions(cls): # Container must be toggledOn, and should only be triggered once return [ChangeConditionWrapper( condition=StateCondition(filter_name="container", state=ToggledOn, val=True, op=operator.eq) )] @classproperty def relax_recipe_systems(cls): return False @classproperty def ignore_nonrecipe_systems(cls): return False @classproperty def ignore_nonrecipe_objects(cls): return False @classproperty def use_garbage_fallback_recipe(cls): return True class MixingToolRule(RecipeRule): """ Transition mixing rule that leverages "mixingTool" ability objects, which require touching between a mixing tool and a container in order to trigger the recipe event. Example: water + lemon_juice + sugar -> lemonade, mixing tool is spoon """ @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, input_states=None, output_states=None, **kwargs, ): """ Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned """ assert len(output_objects) == 0, f"No output objects can be specified for {cls.__name__}, recipe: {name}!" assert len(input_systems) > 0, f"Some input systems need to be specified for {cls.__name__}, recipe: {name}!" assert len(output_systems) == 1, \ f"Exactly one output system needs to be specified for {cls.__name__}, recipe: {name}!" super().add_recipe( name=name, input_objects=input_objects, input_systems=input_systems, output_objects=output_objects, output_systems=output_systems, input_states=input_states, output_states=output_states, **kwargs, ) @classproperty def candidate_filters(cls): # Add mixing tool filter as well candidate_filters = super().candidate_filters candidate_filters["mixingTool"] = AbilityFilter(ability="mixingTool") return candidate_filters @classmethod def _generate_conditions(cls): # Mixing tool must be touching the container, and should only be triggered once return [ChangeConditionWrapper( condition=TouchingAnyCondition(filter_1_name="container", filter_2_name="mixingTool") )] @classproperty def relax_recipe_systems(cls): return False @classproperty def ignore_nonrecipe_systems(cls): return False @classproperty def ignore_nonrecipe_objects(cls): return True @classproperty def use_garbage_fallback_recipe(cls): return True class CookingRule(RecipeRule): """ Transition mixing rule that approximates cooking recipes via a container and heatsource. It is subclassed by CookingObjectRule and CookingSystemRule. """ # Counter that increments monotonically COUNTER = 0 # Maps recipe name to current number of consecutive heating steps _HEAT_STEPS = None # Maps recipe name to the last timestep that it was active _LAST_HEAT_TIMESTEP = None @classmethod def refresh(cls, object_candidates): # Run super first super().refresh(object_candidates=object_candidates) # Iterate through all (updated) active recipes and store in internal variables if not already recorded cls._HEAT_STEPS = dict() if cls._HEAT_STEPS is None else cls._HEAT_STEPS cls._LAST_HEAT_TIMESTEP = dict() if cls._LAST_HEAT_TIMESTEP is None else cls._LAST_HEAT_TIMESTEP for name in cls._ACTIVE_RECIPES.keys(): if name not in cls._HEAT_STEPS: cls._HEAT_STEPS[name] = 0 cls._LAST_HEAT_TIMESTEP[name] = -1 @classmethod def _validate_recipe_fillables_exist(cls, recipe): """ Validates that recipe @recipe's necessary fillable categorie(s) exist in the current scene Args: recipe (dict): Recipe whose fillable categories should be checked Returns: bool: True if there is at least a single valid fillable category in the current scene, else False """ fillable_categories = recipe["fillable_categories"] if fillable_categories is None: # Any is valid return True # Otherwise, at least one valid type must exist for category in fillable_categories: if len(og.sim.scene.object_registry("category", category, default_val=set())) > 0: return True # None found, return False return False @classmethod def _validate_recipe_heatsources_exist(cls, recipe): """ Validates that recipe @recipe's necessary heatsource categorie(s) exist in the current scene Args: recipe (dict): Recipe whose heatsource categories should be checked Returns: bool: True if there is at least a single valid heatsource category in the current scene, else False """ heatsource_categories = recipe["heatsource_categories"] if heatsource_categories is None: # Any is valid return True # Otherwise, at least one valid type must exist for category in heatsource_categories: if len(og.sim.scene.object_registry("category", category, default_val=set())) > 0: return True # None found, return False return False @classmethod def _validate_recipe_heatsource_is_valid(cls, recipe, heatsource_categories): """ Validates that there is a valid heatsource category in @heatsource_categories compatible with @recipe Args: recipe (dict): Recipe whose heatsource_categories should be checked against @heatsource_categories heatsource_categories (set of str): Set of potential heatsource categories Returns: bool: True if there is a compatible category in @heatsource_categories, else False """ required_heatsource_categories = recipe["heatsource_categories"] # Either no specific required and there is at least 1 heatsource or there is at least 1 matching heatsource # between the required and available return (required_heatsource_categories is None and len(heatsource_categories) > 0) or \ len(required_heatsource_categories.intersection(heatsource_categories)) > 0 @classmethod def _compute_container_info(cls, object_candidates, container, global_info): # Run super first info = super()._compute_container_info(object_candidates=object_candidates, container=container, global_info=global_info) # Compute whether each heatsource is affecting the container info["heatsource_categories"] = set(obj.category for obj in object_candidates["heatSource"] if obj.states[HeatSourceOrSink].affects_obj(container)) return info @classmethod def _is_recipe_active(cls, recipe): # Check for heatsource categories first if not cls._validate_recipe_heatsources_exist(recipe=recipe): return False # Otherwise, run super normally return super()._is_recipe_active(recipe=recipe) @classmethod def _is_recipe_executable(cls, recipe, container, global_info, container_info): # Check for heatsource compatibility first if not cls._validate_recipe_heatsource_is_valid(recipe=recipe, heatsource_categories=container_info["heatsource_categories"]): return False # Run super executable = super()._is_recipe_executable( recipe=recipe, container=container, global_info=global_info, container_info=container_info, ) # If executable, increment heat counter by 1, if we were also active last timestep, else, reset to 1 if executable: name = recipe["name"] cls._HEAT_STEPS[name] = cls._HEAT_STEPS[name] + 1 if \ cls._LAST_HEAT_TIMESTEP[name] == cls.COUNTER - 1 else 1 cls._LAST_HEAT_TIMESTEP[name] = cls.COUNTER # If valid number of timesteps met, recipe is indeed executable executable = cls._HEAT_STEPS[name] >= recipe["timesteps"] return executable @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, input_states=None, output_states=None, fillable_categories=None, heatsource_categories=None, timesteps=None, ): """ Adds a recipe to this cooking recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned fillable_categories (None or set of str): If specified, set of fillable categories which are allowed for this recipe. If None, any fillable is allowed heatsource_categories (None or set of str): If specified, set of heatsource categories which are allowed for this recipe. If None, any heatsource is allowed timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None, it will be set to be 1, i.e.: instantaneous execution """ super().add_recipe( name=name, input_objects=input_objects, input_systems=input_systems, output_objects=output_objects, output_systems=output_systems, input_states=input_states, output_states=output_states, fillable_categories=fillable_categories, heatsource_categories=heatsource_categories, timesteps=1 if timesteps is None else timesteps, ) @classproperty def candidate_filters(cls): # Add mixing tool filter as well candidate_filters = super().candidate_filters candidate_filters["heatSource"] = AbilityFilter(ability="heatSource") return candidate_filters @classmethod def _generate_conditions(cls): # Define a class to increment this class's internal time counter every time it is triggered class TimeIncrementCondition(RuleCondition): def __init__(self, cls): self.cls = cls def __call__(self, object_candidates): # This is just a pass-through, but also increment the time self.cls.COUNTER += 1 return True def modifies_filter_names(self): return set() # Any heatsource must be active return [ TimeIncrementCondition(cls=cls), StateCondition(filter_name="heatSource", state=HeatSourceOrSink, val=True, op=operator.eq), ] @classproperty def use_garbage_fallback_recipe(cls): return False @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("CookingRule") return classes class CookingObjectRule(CookingRule): """ Cooking rule when output is objects (e.g. one dough can produce many bagels as output). Example: bagel_dough + egg + sesame_seed -> bagel, heat source is oven, fillable is baking_sheet. This is the only rule where is_multi_instance is True, where multiple copies of the recipe can be executed. """ @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, input_states=None, output_states=None, fillable_categories=None, heatsource_categories=None, timesteps=None, ): """ Adds a recipe to this cooking recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned fillable_categories (None or set of str): If specified, set of fillable categories which are allowed for this recipe. If None, any fillable is allowed heatsource_categories (None or set of str): If specified, set of heatsource categories which are allowed for this recipe. If None, any heatsource is allowed timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None, it will be set to be 1, i.e.: instantaneous execution """ assert len(output_systems) == 0, f"No output systems can be specified for {cls.__name__}, recipe: {name}!" super().add_recipe( name=name, input_objects=input_objects, input_systems=input_systems, output_objects=output_objects, output_systems=output_systems, input_states=input_states, output_states=output_states, fillable_categories=fillable_categories, heatsource_categories=heatsource_categories, timesteps=timesteps, ) @classproperty def relax_recipe_systems(cls): # We don't require systems like seasoning/cheese/sesame seeds/etc. to be contained in the baking sheet return True @classproperty def ignore_nonrecipe_systems(cls): return True @classproperty def ignore_nonrecipe_objects(cls): return True @classproperty def is_multi_instance(cls): return True class CookingSystemRule(CookingRule): """ Cooking rule when output is a system. Example: beef + tomato + chicken_stock -> stew, heat source is stove, fillable is stockpot. """ @classmethod def add_recipe( cls, name, input_objects, input_systems, output_objects, output_systems, input_states=None, output_states=None, fillable_categories=None, heatsource_categories=None, timesteps=None, ): """ Adds a recipe to this cooking recipe rule to check against. This defines a valid mapping of inputs that will transform into the outputs Args: name (str): Name of the recipe input_objects (dict): Maps object categories to number of instances required for the recipe input_systems (list): List of system names required for the recipe output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1. input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned fillable_categories (None or set of str): If specified, set of fillable categories which are allowed for this recipe. If None, any fillable is allowed heatsource_categories (None or set of str): If specified, set of heatsource categories which are allowed for this recipe. If None, any heatsource is allowed timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None, it will be set to be 1, i.e.: instantaneous execution """ assert len(output_objects) == 0, f"No output objects can be specified for {cls.__name__}, recipe: {name}!" super().add_recipe( name=name, input_objects=input_objects, input_systems=input_systems, output_objects=output_objects, output_systems=output_systems, input_states=input_states, output_states=output_states, fillable_categories=fillable_categories, heatsource_categories=heatsource_categories, timesteps=timesteps, ) @classproperty def relax_recipe_systems(cls): return False @classproperty def ignore_nonrecipe_systems(cls): return False @classproperty def ignore_nonrecipe_objects(cls): return False def import_recipes(): for json_file, rule_names in _JSON_FILES_TO_RULES.items(): recipe_fpath = os.path.join(os.path.dirname(bddl.__file__), "generated_data", "transition_map", "tm_jsons", json_file) if not os.path.exists(recipe_fpath): log.warning(f"Cannot find recipe file at {recipe_fpath}. Skipping importing recipes.") with open(recipe_fpath, "r") as f: rule_recipes = json.load(f) for rule_name in rule_names: rule = REGISTERED_RULES[rule_name] if rule == WasherRule: rule.register_cleaning_conditions(translate_bddl_washer_rule_to_og_washer_rule(rule_recipes)) elif issubclass(rule, RecipeRule): log.info(f"Adding recipes of rule {rule_name}...") for recipe in rule_recipes: if "rule_name" in recipe: recipe["name"] = recipe.pop("rule_name") if "container" in recipe: recipe["fillable_synsets"] = set(recipe.pop("container").keys()) if "heat_source" in recipe: recipe["heatsource_synsets"] = set(recipe.pop("heat_source").keys()) if "machine" in recipe: recipe["fillable_synsets"] = set(recipe.pop("machine").keys()) # Route the recipe to the correct rule: CookingObjectRule or CookingSystemRule satisfied = True og_recipe = translate_bddl_recipe_to_og_recipe(**recipe) has_output_system = len(og_recipe["output_systems"]) > 0 if (rule == CookingObjectRule and has_output_system) or (rule == CookingSystemRule and not has_output_system): satisfied = False if satisfied: rule.add_recipe(**og_recipe) log.info(f"All recipes of rule {rule_name} imported successfully.") import_recipes()
StanfordVL/OmniGibson/omnigibson/__init__.py
import logging import os import shutil import signal import tempfile import builtins # TODO: Need to fix somehow -- omnigibson gets imported first BEFORE we can actually modify the macros from omnigibson.macros import gm from omnigibson.envs import Environment from omnigibson.scenes import REGISTERED_SCENES from omnigibson.objects import REGISTERED_OBJECTS from omnigibson.robots import REGISTERED_ROBOTS from omnigibson.controllers import REGISTERED_CONTROLLERS from omnigibson.tasks import REGISTERED_TASKS from omnigibson.sensors import ALL_SENSOR_MODALITIES from omnigibson.simulator import launch_simulator as launch # Create logger logging.basicConfig(format='[%(levelname)s] [%(name)s] %(message)s') log = logging.getLogger(__name__) builtins.ISAAC_LAUNCHED_FROM_JUPYTER = ( os.getenv("ISAAC_JUPYTER_KERNEL") is not None ) # We set this in the kernel.json file # Always enable nest_asyncio because MaterialPrim calls asyncio.run() import nest_asyncio nest_asyncio.apply() __version__ = "1.0.0" log.setLevel(logging.DEBUG if gm.DEBUG else logging.INFO) root_path = os.path.dirname(os.path.realpath(__file__)) # Store paths to example configs example_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs") # Initialize global variables app = None # (this is a singleton so it's okay that it's global) sim = None # (this is a singleton so it's okay that it's global) # Create and expose a temporary directory for any use cases. It will get destroyed upon omni # shutdown by the shutdown function. tempdir = tempfile.mkdtemp() def cleanup(*args, **kwargs): # TODO: Currently tempfile removal will fail due to CopyPrim command (for example, GranularSystem in dicing_apple example.) try: shutil.rmtree(tempdir) except PermissionError: log.info("Permission error when removing temp files. Ignoring") from omnigibson.simulator import logo_small log.info(f"{'-' * 10} Shutting Down {logo_small()} {'-' * 10}") def shutdown(due_to_signal=False): if app is not None: # If Isaac is running, we do the cleanup in its shutdown callback to avoid open handles. # TODO: Automated cleanup in callback doesn't work for some reason. Need to investigate. # Manually call cleanup for now. cleanup() app.close() else: # Otherwise, we do the cleanup here. cleanup() # If we're not shutting down due to a signal, we need to manually exit if not due_to_signal: exit(0) def shutdown_handler(*args, **kwargs): shutdown(due_to_signal=True) return signal.default_int_handler(*args, **kwargs) # Something somewhere disables the default SIGINT handler, so we need to re-enable it signal.signal(signal.SIGINT, shutdown_handler)
StanfordVL/OmniGibson/omnigibson/omnigibson.kit
[package] title = "OmniGibson" description = "A platform for accelerating Embodied AI research" version = "2023.1.1" # That makes it browsable in UI with "experience" filter keywords = ["experience", "app", "usd"] [dependencies] # The Main UI App "omni.kit.uiapp" = {} "omni.kit.renderer.core" = {} # Livestream - OV Streaming Client "omni.kit.streamsdk.plugins" = {version = "2.5.2", exact = true} # Status Bar "omni.kit.window.status_bar" = {} "omni.stats" = {} "omni.kit.telemetry" = {} "omni.kit.menu.utils" = {} "omni.kit.menu.file" = {} "omni.kit.menu.edit" = {} "omni.kit.menu.create" = {} "omni.kit.menu.common" = {} "omni.kit.menu.stage" = {} "omni.kit.window.file" = {} "omni.kit.context_menu" = {} "omni.kit.selection" = {} "omni.kit.stage_templates" = {} # "omni.kit.stage.mdl_converter" = {} # Animation # "omni.anim.skelvis" = {} # PhysX "omni.physx.bundle" = {} "omni.physx.tensors" = {} # "omni.physx.fabric" = {} # "omni.physx.zerogravity" = {} # "omni.kit.search.service" = {} "omni.kit.primitive.mesh" = {} # Create Windows "omni.kit.window.title" = {} "omni.kit.widget.live" = {} "omni.kit.window.stage" = {} "omni.kit.widget.layers" = {} "omni.kit.window.cursor" = {} "omni.kit.window.toolbar" = {} "omni.kit.window.commands" = {} # New Viewport, load the default bundle of extensions "omni.kit.viewport.bundle" = {} "omni.kit.viewport.menubar.lighting" = {} # Load the rendering extensions # "omni.renderer" = { tag = "rtx" } # Load the RTX rendering bundle "omni.kit.viewport.rtx" = {} # Load the Storm rendering bundle "omni.kit.viewport.pxr" = {} # Needed for Fabric delegate "omni.resourcemonitor" = {} # Additional Viewport features (legacy grid etc, HUD GPU stats) "omni.kit.viewport.legacy_gizmos" = {} "omni.kit.viewport.ready" = {} "omni.hydra.engine.stats" = {} "omni.rtx.settings.core" = {} # this is the new Render Settings 2.0 # "omni.kit.window.movie_capture" = { } "omni.kit.profiler.window" = {} "omni.kit.stage_column.variant" = {} "omni.kit.stage_column.payload" = {} # Viewport Widgets and Collaboration # "omni.kit.viewport_widgets_manager" = {} "omni.kit.collaboration.channel_manager" = {} # "omni.kit.widgets.custom" = {} # utils window # "omni.kit.window.about" = {} # Isaac Sim: disable this and replace with our own # "omni.kit.window.privacy" = {} # "omni.kit.window.provide_feedback" = {} # Isaac Sim: disable this and replace with our own # "omni.kit.material.library" = {} # "omni.kit.window.imageviewer" = {} "omni.kit.widget.filebrowser" = {} "omni.kit.window.filepicker" = {} "omni.kit.window.content_browser" = {} "omni.kit.window.stats" = { order = 1000 } "omni.kit.window.script_editor" = {} "omni.kit.window.console" = {} "omni.kit.window.extensions" = {} # browsers "omni.kit.browser.sample" = {} # "omni.kit.browser.asset" = {} # "omni.kit.browser.asset_store" = {} # "omni.kit.browser.asset_provider.local" = {} # "omni.kit.browser.asset_provider.sketchfab" = {} # "omni.kit.browser.asset_provider.turbosquid" = {} # "omni.kit.browser.asset_provider.actorcore" = {} # "omni.kit.window.environment" = {} # potentially increases startup times # Material # "omni.kit.window.material" = { } # "omni.kit.graph.delegate.default" = { } # "omni.kit.window.material_graph" = { } # "omni.kit.window.usd_paths" = {} # "omni.kit.window.preferences" = { order = 1000 } # so the menu is in the correct place # "omni.kit.renderer.capture" = {} # "omni.kit.thumbnails.usd" = {} # "omni.kit.thumbnails.images" = {} # bring all the property Widgets and Window "omni.kit.window.property" = {} "omni.kit.property.bundle" = {} "omni.kit.property.layer" = {} # tool # "omni.kit.asset_converter" = {} # "omni.kit.tool.asset_importer" = {} # "omni.kit.tool.asset_exporter" = {} # "omni.kit.tool.collect" = {} # "omni.kit.tool.remove_unused.core" = {} # "omni.kit.tool.remove_unused.controller" = {} # Iray # "omni.iray.settings.core" = {} # "omni.hydra.iray" = { order = -1000 } #Particle/PointCloud FileFormat # "omni.usd.fileformat.e57" = { } # "omni.kit.pointclouds" = {} # External Scene # "omni.geo.streaming.bundle" = {} # All QuickSearch # "omni.kit.window.quicksearch" = {} # "omni.kit.quicksearch.actions" = {} # "omni.kit.quicksearch.settings" = {} # "omni.kit.quicksearch.select" = {} # "omni.kit.quicksearch.commands" = {} # "omni.kit.quicksearch.menu" = {} # "omni.kit.quicksearch.material" = {} # "omni.kit.quicksearch.hdri" = {} # "omni.kit.quicksearch.props" = {} # "omni.kit.search.files" = {} # Compatibility Checker # "omni.kit.compatibility_checker" = {} # VERSIONING # "omni.kit.widget.versioning" = {} # Paint Default now # "omni.paint.system.bundle" = {} # Manipulator "omni.kit.manipulator.prim" = {} "omni.kit.manipulator.transform" = {} "omni.kit.manipulator.viewport" = {} # "omni.kit.manipulator.tool.mesh_snap" = {} # Destruction schema # "omni.usd.schema.destruction" = {} # Animation # "omni.anim.skelJoint" = { } # "omni.anim.curve" = { } # "omni.kit.widget.timeline" = { } # "omni.anim.curve_editor" = { } # "omni.anim.window.timeline" = { } # "omni.anim.shared.core" = {} # "omni.anim.timeline" = { } # "omni.anim.graph.bundle" = {} # "omni.anim.graph.core" = {} # "omni.anim.graph.ui" = {} # "omni.anim.retarget.bundle" = {} # "omni.anim.retarget.core" = {} # "omni.anim.retarget.ui" = {} #"omni.anim.camera_tool" = {} # Needed to properly load navigation mesh "omni.anim.graph.schema" = {} "omni.anim.navigation.schema" = {} # OmniGraph "omni.graph.bundle.action" = {} "omni.graph.window.action" = {} "omni.graph.window.generic" = {} "omni.graph.visualization.nodes" = {} # Python Scripting Component # "omni.kit.scripting" = {} # kit-testing # "omni.kit.tests.usd_stress" = {} # Curves # "omni.curve.manipulator" = {} # General Proceduralism # "omni.genproc.bundle" = {} # Sequencer # "omni.kit.window.sequencer" = {} # "omni.services.usd" = {} # SBSAR # "omni.kit.property.sbsar" = {} # "omni.usd.fileformat.sbsar" = {} # Thumbnails # "omni.kit.thumbnails.mdl" = {} # Quicklayout # "omni.kit.quicklayout" = {} # AOV # "omni.kit.menu.aov" = {} # "omni.graph.examples.cpp" = {} # Collections # "omni.kit.window.collection" = {} # "omni.kit.widget.collection" = {} # "omni.kit.property.collection" = {} # Extended Searchfield # "omni.kit.widget.extended_searchfield" = {} # Particle # "omni.particle.system.bundle" = {} # Scene Visualization "omni.usd.schema.scene.visualization" = {} # "omni.scene.visualization.bundle" = {} #Section Tool # "omni.kit.window.section" = {} # startfleet auth enabled for cloud_share to work on the receiver # "omni.services.starfleet.auth" = {} # Array Tool # "omni.tools.array" = {} # "omni.tools.pivot" = {} # Randomizer # "omni.tools.randomizer" = {} # Deepsearch # "omni.kit.browser.deepsearch" = {} # Actions # "omni.kit.actions.window" = {} # "omni.kit.viewport.actions" = {} # Scene Optimizer (formerly Data Adapter) # "omni.scene.optimizer.bundle" = {} # Hotkeys "omni.kit.hotkeys.window" = {} # USDA # "omni.kit.usda_edit" = {} # "omni.rakis" = {} "omni.warp" = {} # needed for omni.kit.viewport.ready.viewport_ready "omni.activity.profiler" = {} "omni.activity.pump" = {} "omni.kit.widget.cache_indicator" = {} [settings] renderer.active = "rtx" exts."omni.kit.viewport.menubar.camera".expand = true # Expand the extra-camera settings by default exts."omni.kit.window.file".useNewFilePicker = true exts."omni.kit.tool.asset_importer".useNewFilePicker = true exts."omni.kit.tool.collect".useNewFilePicker = true exts."omni.kit.widget.layers".useNewFilePicker = true exts."omni.kit.renderer.core".imgui.enableMips = true exts."omni.kit.browser.material".enabled = false exts."omni.kit.browser.asset".visible_after_startup = false exts."omni.kit.window.material".load_after_startup = true exts."omni.kit.widget.cloud_share".require_access_code = false exts."omni.kit.pipapi".installCheckIgnoreVersion = true exts."omni.kit.viewport.window".startup.windowName="Viewport" # Rename from Viewport Next exts."omni.kit.menu.utils".logDeprecated = false # app.content.emptyStageOnStart = false app.file.ignoreUnsavedOnExit = true # prevents save dialog when exiting # deprecate support for old kit.ui.menu app.menu.legacy_mode = false # use omni.ui.Menu for the MenuBar app.menu.compatibility_mode = false # Setting the port for the embedded http server exts."omni.services.transport.server.http".port = 8211 # default viewport is fill app.runLoops.rendering_0.fillResolution = false exts."omni.kit.window.viewport".blockingGetViewportDrawable = false exts."omni.kit.test".includeTests.1 = "*isaac*" [settings.app.python] # These disable the kit app from also printing out python output, which gets confusing interceptSysStdOutput = false logSysStdOutput = false [settings.app.settings] persistent = false dev_build = false fabricDefaultStageFrameHistoryCount = 3 # needed for omni.syntheticdata TODO105 Still True? [settings.app.window] title = "OmniGibson" hideUi = false _iconSize = 256 iconPath = "${app}/../exts/omni.isaac.app.setup/data/nvidia-omniverse-isaacsim.ico" # width = 1700 # height = 900 # x = -1 # y = -1 # Fonts [setting.app.font] file = "${fonts}/OpenSans-SemiBold.ttf" size = 16 # [setting.app.runLoops] # main.rateLimitEnabled = false # main.rateLimitFrequency = 60 # main.rateLimitUseBusyLoop = false # rendering_0.rateLimitEnabled = false [settings.exts.'omni.kit.window.extensions'] # List extensions here we want to show as featured when extension manager is opened featuredExts = [] [settings] # MGPU is always on, you can turn it from the settings, and force this off to save even more resource if you # only want to use a single GPU on your MGPU system # False for Isaac Sim renderer.multiGpu.enabled = true renderer.multiGpu.autoEnable = true 'rtx-transient'.resourcemanager.enableTextureStreaming = true # app.hydra.aperture.conform = 4 # in 105.1 pixels are square by default app.hydraEngine.waitIdle = false rtx.newDenoiser.enabled = true # Enable Iray and pxr by setting this to "rtx,iray,pxr" renderer.enabled = "rtx" physics.autoPopupSimulationOutputWindow=false ### async rendering settings omni.replicator.asyncRendering = false app.asyncRendering = false app.asyncRenderingLowLatency = false ### Render thread settings app.runLoops.main.rateLimitEnabled = false app.runLoops.main.rateLimitFrequency = 120 app.runLoops.main.rateLimitUsePrecisionSleep = true app.runLoops.main.syncToPresent = false app.runLoops.present.rateLimitFrequency = 120 app.runLoops.present.rateLimitUsePrecisionSleep = true app.runLoops.rendering_0.rateLimitFrequency = 120 app.runLoops.rendering_0.rateLimitUsePrecisionSleep = true app.runLoops.rendering_0.syncToPresent = false app.runLoops.rendering_1.rateLimitFrequency = 120 app.runLoops.rendering_1.rateLimitUsePrecisionSleep = true app.runLoops.rendering_1.syncToPresent = false app.runLoopsGlobal.syncToPresent = false app.vsync = false exts.omni.kit.renderer.core.present.enabled = false exts.omni.kit.renderer.core.present.presentAfterRendering = false persistent.app.viewport.defaults.tickRate = 120 rtx-transient.dlssg.enabled = false privacy.externalBuild = true # Basic Kit App ################################ app.versionFile = "${app}/../VERSION" app.name = "Isaac-Sim" app.version = "2023.1.1" # hide NonToggleable Exts exts."omni.kit.window.extensions".hideNonToggleableExts = true exts."omni.kit.window.extensions".showFeatureOnly = false # Hang Detector ################################ # app.hangDetector.enabled = false # app.hangDetector.timeout = 120 # Browsers exts."omni.kit.browser.material".folders = [ "Base::http://omniverse-content-production.s3-us-west-2.amazonaws.com/Materials/Base", "vMaterials::http://omniverse-content-production.s3.us-west-2.amazonaws.com/Materials/vMaterials_2/", "Twinbru Fabrics::https://twinbru.s3.eu-west-1.amazonaws.com/omniverse/Twinbru Fabrics/" ] exts."omni.kit.window.environment".folders = [ "https://omniverse-content-production.s3.us-west-2.amazonaws.com/Assets/Skies/2022_1/Skies", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Scenes/Templates", ] exts."omni.kit.browser.sample".folders = [ "http://omniverse-content-production.s3-us-west-2.amazonaws.com//Samples" ] exts."omni.kit.browser.asset".folders = [ "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Vegetation", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/ArchVis/Commercial", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/ArchVis/Industrial", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/ArchVis/Residential", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Equipment", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Safety", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Shipping", "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/DigitalTwin/Assets/Warehouse/Storage", ] exts."omni.kit.browser.texture".folders = [ "http://omniverse-content-production.s3-us-west-2.amazonaws.com/Assets/Vegetation", ] #RTX Settings [settings.rtx] translucency.worldEps = 0.005 # Content Browser ############################### [settings.exts."omni.kit.window.content_browser"] enable_thumbnail_generation_images = false # temp fix to avoid leaking python processes # Extensions ############################### [settings.exts."omni.kit.registry.nucleus"] registries = [ { name = "kit/default", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/shared" }, { name = "kit/sdk", url = "https://ovextensionsprod.blob.core.windows.net/exts/kit/prod/sdk/${kit_version_short}/${kit_git_hash}" }, { name = "kit/community", url = "https://dw290v42wisod.cloudfront.net/exts/kit/community" }, ] [settings.app.extensions] skipPublishVerification = false registryEnabled = true [settings.exts."omni.kit.window.modifier.titlebar"] titleFormatString = " Isaac Sim {version:${app}/../SHORT_VERSION,font_color=0x909090,font_size=16} {separator} {file, board=true}" showFileFullPath = true icon.file = "${app}/../exts/omni.isaac.app.setup/data/nvidia-omniverse-isaacsim.ico" icon.size = 256 defaultFont.name = "Arial" defaultFont.size = 16 defaultFont.color = 0xD0D0D0 separator.color = 0x00B976 separator.width = 1 windowBorder.color = 0x0F0F0F windowBorder.width = 2 colors.caption = 0x0F0F0F colors.client = 0x0F0F0F respondOnMouseUp = true changeWindowRegion = true # Register extension folder from this repo in kit [settings.app.exts] folders = ["${app}/../exts", "${app}/../extscache", "${app}/../extsPhysics"] [settings.crashreporter.data] experience = "Isaac Sim Python" # Isaac Sim Settings ############################### [settings.app.renderer] skipWhileMinimized = false sleepMsOnFocus = 0 sleepMsOutOfFocus = 0 resolution.width=1280 resolution.height=720 [settings.app.livestream] proto = "ws" allowResize = true outDirectory = "${data}" # default camera position in meters [settings.app.viewport] defaultCamPos.x = 5 defaultCamPos.y = 5 defaultCamPos.z = 5 [settings.rtx] raytracing.fractionalCutoutOpacity = false hydra.enableSemanticSchema = true # descriptorSets=60000 # reservedDescriptors=500000 # sceneDb.maxInstances=1000000 # Enable this for static scenes, improves visual quality # directLighting.sampledLighting.enabled = true [settings.persistent] app.file.recentFiles = [] app.stage.upAxis = "Z" app.stage.movePrimInPlace = false app.stage.instanceableOnCreatingReference = false app.stage.materialStrength = "weakerThanDescendants" app.transform.gizmoUseSRT = true app.viewport.grid.scale = 1.0 app.viewport.pickingMode = "kind:model.ALL" app.viewport.camMoveVelocity = 0.05 # 5 m/s app.viewport.gizmo.scale = 0.01 # scaled to meters app.viewport.previewOnPeek = false app.viewport.snapToSurface = false app.viewport.displayOptions = 31887 # Disable Frame Rate and Resolution by default app.window.uiStyle = "NvidiaDark" app.primCreation.DefaultXformOpType = "Scale, Orient, Translate" app.primCreation.DefaultXformOpOrder="xformOp:translate, xformOp:orient, xformOp:scale" app.primCreation.typedDefaults.camera.clippingRange = [0.01, 10000000.0] simulation.minFrameRate = 15 simulation.defaultMetersPerUnit = 1.0 omnigraph.updateToUsd = false omnigraph.useSchemaPrims = true omnigraph.disablePrimNodes = true physics.updateToUsd = true physics.updateVelocitiesToUsd = true physics.useFastCache = false physics.visualizationDisplayJoints = false physics.visualizationSimulationOutput = false omni.replicator.captureOnPlay = true exts."omni.anim.navigation.core".navMesh.viewNavMesh = false renderer.startupMessageDisplayed = true # hides the IOMMU popup window # Make Detail panel visible by default app.omniverse.content_browser.options_menu.show_details = true app.omniverse.filepicker.options_menu.show_details = true [settings.ngx] enabled=true # Enable this for DLSS # Isaac Sim Extensions ############################### [dependencies] "omni.isaac.core_archive" = {} "omni.pip.compute" = {} "omni.pip.cloud" = {} "omni.isaac.ml_archive" = {} "omni.isaac.urdf" = {} "omni.isaac.mjcf" = {} "omni.isaac.utils" = {} "omni.isaac.range_sensor" = {} "omni.isaac.dynamic_control" = {} "omni.isaac.kit" = {} "omni.isaac.core" = {} "omni.isaac.core_nodes" = {} "omni.isaac.cloner" = {} "omni.isaac.cortex" = {} "omni.isaac.cortex.sample_behaviors" = {} "omni.isaac.dofbot" = {} "omni.isaac.surface_gripper" = {} # "omni.kit.property.isaac" = {} "omni.isaac.scene_blox" = {} "omni.isaac.sensor" = {} "omni.isaac.debug_draw" = {} "omni.isaac.gym" = {} "omni.isaac.franka" = {} "omni.isaac.manipulators" = {} "omni.isaac.quadruped" = {} "omni.isaac.wheeled_robots" = {} "omni.isaac.lula" = {} "omni.isaac.motion_generation" = {} "omni.isaac.universal_robots" = {} "omni.isaac.occupancy_map" = {} "omni.replicator.isaac" = {} "omni.kit.loop-isaac" = {} #linux only extensions [dependencies."filter:platform"."linux-x86_64"] # "omni.isaac.ocs2" = {} # Non Isaac Sim Extensions ###################### [dependencies] "omni.syntheticdata" = {} "semantics.schema.editor" = {} "semantics.schema.property" = {} "omni.replicator.core" = {} "omni.replicator.replicator_yaml" = {} "omni.replicator.composer" = {} "omni.importer.mjcf" = {} "omni.importer.urdf" = {}
StanfordVL/OmniGibson/omnigibson/macros.py
""" Set of macros to use globally for OmniGibson. These are generally magic numbers that were tuned heuristically. NOTE: This is generally decentralized -- the monolithic @settings variable is created here with some global values, but submodules within OmniGibson may import this dictionary and add to it dynamically """ import os import pathlib from addict import Dict # Initialize settings macros = Dict() gm = macros.globals # Path (either relative to OmniGibson/omnigibson directory or global absolute path) for data # Assets correspond to non-objects / scenes (e.g.: robots), and dataset incliudes objects + scene # can override assets_path and dataset_path from environment variable gm.ASSET_PATH = "data/assets" if "OMNIGIBSON_ASSET_PATH" in os.environ: gm.ASSET_PATH = os.environ["OMNIGIBSON_ASSET_PATH"] gm.ASSET_PATH = os.path.expanduser(gm.ASSET_PATH) if not os.path.isabs(gm.ASSET_PATH): gm.ASSET_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), gm.ASSET_PATH) gm.DATASET_PATH = "data/og_dataset" if "OMNIGIBSON_DATASET_PATH" in os.environ: gm.DATASET_PATH = os.environ["OMNIGIBSON_DATASET_PATH"] gm.DATASET_PATH = os.path.expanduser(gm.DATASET_PATH) if not os.path.isabs(gm.DATASET_PATH): gm.DATASET_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), gm.DATASET_PATH) gm.KEY_PATH = "data/omnigibson.key" if "OMNIGIBSON_KEY_PATH" in os.environ: gm.KEY_PATH = os.environ["OMNIGIBSON_KEY_PATH"] gm.KEY_PATH = os.path.expanduser(gm.KEY_PATH) if not os.path.isabs(gm.KEY_PATH): gm.KEY_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), gm.KEY_PATH) # Which GPU to use -- None will result in omni automatically using an appropriate GPU. Otherwise, set with either # integer or string-form integer gm.GPU_ID = os.getenv("OMNIGIBSON_GPU_ID", None) # Whether to generate a headless or non-headless application upon OmniGibson startup gm.HEADLESS = (os.getenv("OMNIGIBSON_HEADLESS", 'False').lower() in ('true', '1', 't')) # Whether to enable remote streaming. None disables it, other valid options are "native", "webrtc". gm.REMOTE_STREAMING = os.getenv("OMNIGIBSON_REMOTE_STREAMING", None) # What port the webrtc and http servers should run on. This is only used if REMOTE_STREAMING is set to "webrtc" gm.HTTP_PORT = os.getenv("OMNIGIBSON_HTTP_PORT", 8211) gm.WEBRTC_PORT = os.getenv("OMNIGIBSON_WEBRTC_PORT", 49100) # Whether only the viewport should be shown in the GUI or not (if not, other peripherals are additionally shown) # CANNOT be set at runtime gm.GUI_VIEWPORT_ONLY = False # Whether to use the viewer camera or not gm.RENDER_VIEWER_CAMERA = True # Do not suppress known omni warnings / errors, and also put omnigibson in a debug state # This includes extra information for things such as object sampling, and also any debug # logging messages gm.DEBUG = (os.getenv("OMNIGIBSON_DEBUG", 'False').lower() in ('true', '1', 't')) # Whether to print out disclaimers (i.e.: known failure cases resulting from Omniverse's current bugs / limitations) gm.SHOW_DISCLAIMERS = False # Whether to use omni's GPU dynamics # This is necessary for certain features; e.g. particles (fluids / cloth) gm.USE_GPU_DYNAMICS = False # Whether to use high-fidelity rendering (this includes, e.g., isosurfaces) gm.ENABLE_HQ_RENDERING = False # Whether to use omni's flatcache feature or not (can speed up simulation) gm.ENABLE_FLATCACHE = False # Whether to use continuous collision detection or not (slower simulation, but can prevent # objects from tunneling through each other) gm.ENABLE_CCD = False # Pairs setting -- USD default is 256 * 1024, physx default apparently is 32 * 1024. gm.GPU_PAIRS_CAPACITY = 256 * 1024 # Aggregate pairs setting -- default is 1024, but is often insufficient for large scenes gm.GPU_AGGR_PAIRS_CAPACITY = (2 ** 14) * 1024 # Maximum particle contacts allowed gm.GPU_MAX_PARTICLE_CONTACTS = 1024 * 1024 # Maximum rigid contacts -- 524288 is default value from omni, but increasing too much can sometimes lead to crashes gm.GPU_MAX_RIGID_CONTACT_COUNT = 524288 * 4 # Maximum rigid patches -- 81920 is default value from omni, but increasing too much can sometimes lead to crashes gm.GPU_MAX_RIGID_PATCH_COUNT = 81920 * 4 # Whether to enable object state logic or not gm.ENABLE_OBJECT_STATES = True # Whether to enable transition rules or not gm.ENABLE_TRANSITION_RULES = True # Default settings for the omni UI viewer gm.DEFAULT_VIEWER_WIDTH = 1280 gm.DEFAULT_VIEWER_HEIGHT = 720 # (Demo-purpose) Whether to activate Assistive Grasping mode for Cloth (it's handled differently from RigidBody) gm.AG_CLOTH = False # Forced light intensity for all DatasetObjects. None if the USD-provided intensities should be respected. gm.FORCE_LIGHT_INTENSITY = 150000 # Forced roughness for all DatasetObjects. None if the USD-provided roughness maps should be respected. gm.FORCE_ROUGHNESS = 0.7 # Create helper function for generating sub-dictionaries def create_module_macros(module_path): """ Creates a dictionary that can be populated with module macros based on the module's @module_path Args: module_path (str): Relative path from the package root directory pointing to the module. This will be parsed to generate the appropriate sub-macros dictionary, e.g., for module "dirty" in omnigibson/object_states_dirty.py, this would generate a dictionary existing at macros.object_states.dirty Returns: Dict: addict dictionary which can be populated with values """ # Sanity check module path, make sure omnigibson/ is in the path module_path = pathlib.Path(module_path) omnigibson_path = pathlib.Path(__file__).parent # Trim the .py, and anything before and including omnigibson/, and split into its appropriate parts try: subsections = module_path.with_suffix("").relative_to(omnigibson_path).parts except ValueError: raise ValueError("module_path is expected to be a filepath including the omnigibson root directory, got: {module_path}!") # Create and return the generated sub-dictionary def _recursively_get_or_create_dict(dic, keys): # If no entry is in @keys, it returns @dic # Otherwise, checks whether the dictionary contains the first entry in @keys, if so, it grabs the # corresponding nested dictionary, otherwise, generates a new Dict() as the value # It then recurisvely calls this function with the new dic and the remaining keys if len(keys) == 0: return dic else: key = keys[0] if key not in dic: dic[key] = Dict() return _recursively_get_or_create_dict(dic=dic[key], keys=keys[1:]) return _recursively_get_or_create_dict(dic=macros, keys=subsections)
StanfordVL/OmniGibson/omnigibson/lazy.py
import sys from omnigibson.utils.lazy_import_utils import LazyImporter sys.modules[__name__] = LazyImporter("", None)
StanfordVL/OmniGibson/omnigibson/scenes/__init__.py
from omnigibson.scenes.scene_base import Scene, REGISTERED_SCENES from omnigibson.scenes.traversable_scene import TraversableScene from omnigibson.scenes.static_traversable_scene import StaticTraversableScene from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
StanfordVL/OmniGibson/omnigibson/scenes/static_traversable_scene.py
import os import numpy as np from omnigibson.scenes.traversable_scene import TraversableScene from omnigibson.prims.geom_prim import CollisionVisualGeomPrim from omnigibson.utils.asset_utils import get_scene_path from omnigibson.utils.usd_utils import add_asset_to_stage from omnigibson.utils.ui_utils import create_module_logger # Create module logger log = create_module_logger(module_name=__name__) class StaticTraversableScene(TraversableScene): """ Static traversable scene class for OmniGibson, where scene is defined by a singular mesh (no intereactable objects) """ def __init__( self, scene_model, scene_file=None, trav_map_resolution=0.1, default_erosion_radius=0.0, trav_map_with_objects=True, num_waypoints=10, waypoint_resolution=0.2, floor_plane_visible=False, floor_plane_color=(1.0, 1.0, 1.0), ): """ Args: scene_model (str): Scene model name, e.g.: Adrian scene_file (None or str): If specified, full path of JSON file to load (with .json). None results in no additional objects being loaded into the scene trav_map_resolution (float): traversability map resolution default_erosion_radius (float): default map erosion radius in meters trav_map_with_objects (bool): whether to use objects or not when constructing graph num_waypoints (int): number of way points returned waypoint_resolution (float): resolution of adjacent way points floor_plane_visible (bool): whether to render the additionally added floor plane floor_plane_color (3-array): if @floor_plane_visible is True, this determines the (R,G,B) color assigned to the generated floor plane """ # Store and initialize additional variables self._floor_heights = None self._scene_mesh = None # Run super init super().__init__( scene_model=scene_model, scene_file=scene_file, trav_map_resolution=trav_map_resolution, default_erosion_radius=default_erosion_radius, trav_map_with_objects=trav_map_with_objects, num_waypoints=num_waypoints, waypoint_resolution=waypoint_resolution, use_floor_plane=True, floor_plane_visible=floor_plane_visible, floor_plane_color=floor_plane_color, ) def _load(self): # Run super first super()._load() # Load the scene mesh (use downsampled one if available) filename = os.path.join(get_scene_path(self.scene_model), "mesh_z_up_downsampled.obj") if not os.path.isfile(filename): filename = os.path.join(get_scene_path(self.scene_model), "mesh_z_up.obj") scene_prim = add_asset_to_stage( asset_path=filename, prim_path=f"/World/scene_{self.scene_model}", ) # Grab the actual mesh prim self._scene_mesh = CollisionVisualGeomPrim( prim_path=f"/World/scene_{self.scene_model}/mesh_z_up/{self.scene_model}_mesh_texture", name=f"{self.scene_model}_mesh", ) # Load floor metadata floor_height_path = os.path.join(get_scene_path(self.scene_model), "floors.txt") assert os.path.isfile(floor_height_path), f"floor_heights.txt cannot be found in model: {self.scene_model}" with open(floor_height_path, "r") as f: self.floor_heights = sorted(list(map(float, f.readlines()))) log.debug("Floors {}".format(self.floor_heights)) # Move the floor plane to the first floor by default self.move_floor_plane(floor=0) # Filter the collision between the scene mesh and the floor plane self._scene_mesh.add_filtered_collision_pair(prim=self._floor_plane) # Load the traversability map self._trav_map.load_map(get_scene_path(self.scene_model)) def move_floor_plane(self, floor=0, additional_elevation=0.02, height=None): """ Resets the floor plane to a new floor Args: floor (int): Integer identifying the floor to move the floor plane to additional_elevation (float): Additional elevation with respect to the height of the floor height (None or float): If specified, alternative parameter to directly control the height of the ground plane. Note that this will override @additional_elevation and @floor! """ height = height if height is not None else self.floor_heights[floor] + additional_elevation self._floor_plane.set_position(np.array([0, 0, height])) def get_floor_height(self, floor=0): """ Return the current floor height (in meter) Returns: int: current floor height """ return self.floor_heights[floor] @property def n_floors(self): return len(self._floor_heights)
StanfordVL/OmniGibson/omnigibson/scenes/traversable_scene.py
from omnigibson.scenes.scene_base import Scene from omnigibson.maps.traversable_map import TraversableMap from omnigibson.utils.ui_utils import create_module_logger # Create module logger log = create_module_logger(module_name=__name__) class TraversableScene(Scene): """ Traversable scene class. Contains the functionalities for navigation such as shortest path computation """ def __init__( self, scene_model, scene_file=None, trav_map_resolution=0.1, default_erosion_radius=0.0, trav_map_with_objects=True, num_waypoints=10, waypoint_resolution=0.2, use_floor_plane=True, floor_plane_visible=True, floor_plane_color=(1.0, 1.0, 1.0), ): """ Args: scene_model (str): Scene model name, e.g.: Adrian or Rs_int scene_file (None or str): If specified, full path of JSON file to load (with .json). None results in no additional objects being loaded into the scene trav_map_resolution (float): traversability map resolution default_erosion_radius (float): default map erosion radius in meters trav_map_with_objects (bool): whether to use objects or not when constructing graph num_waypoints (int): number of way points returned waypoint_resolution (float): resolution of adjacent way points use_floor_plane (bool): whether to load a flat floor plane into the simulator floor_plane_visible (bool): whether to render the additionally added floor plane floor_plane_color (3-array): if @floor_plane_visible is True, this determines the (R,G,B) color assigned to the generated floor plane """ log.info("TraversableScene model: {}".format(scene_model)) self.scene_model = scene_model # Create traversable map self._trav_map = TraversableMap( map_resolution=trav_map_resolution, default_erosion_radius=default_erosion_radius, trav_map_with_objects=trav_map_with_objects, num_waypoints=num_waypoints, waypoint_resolution=waypoint_resolution, ) # Run super init super().__init__( scene_file=scene_file, use_floor_plane=use_floor_plane, floor_plane_visible=floor_plane_visible, floor_plane_color=floor_plane_color, ) @property def trav_map(self): """ Returns: TraversableMap: Map for computing connectivity between nodes for this scene """ return self._trav_map def get_random_point(self, floor=None, reference_point=None, robot=None): return self._trav_map.get_random_point(floor=floor, reference_point=reference_point, robot=robot) def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None): return self._trav_map.get_shortest_path( floor=floor, source_world=source_world, target_world=target_world, entire_path=entire_path, robot=robot, )
StanfordVL/OmniGibson/omnigibson/scenes/interactive_traversable_scene.py
import os from omnigibson.robots.robot_base import REGISTERED_ROBOTS from omnigibson.robots.robot_base import m as robot_macros from omnigibson.scenes.traversable_scene import TraversableScene from omnigibson.maps.segmentation_map import SegmentationMap from omnigibson.utils.asset_utils import get_og_scene_path from omnigibson.utils.constants import STRUCTURE_CATEGORIES from omnigibson.utils.ui_utils import create_module_logger # Create module logger log = create_module_logger(module_name=__name__) class InteractiveTraversableScene(TraversableScene): """ Create an interactive scene defined from a scene json file. In general, this supports curated, pre-defined scene layouts with annotated objects. This adds semantic support via a segmentation map generated for this specific scene. """ def __init__( self, scene_model, scene_instance=None, scene_file=None, trav_map_resolution=0.1, default_erosion_radius=0.0, trav_map_with_objects=True, num_waypoints=10, waypoint_resolution=0.2, load_object_categories=None, not_load_object_categories=None, load_room_types=None, load_room_instances=None, load_task_relevant_only=False, seg_map_resolution=0.1, include_robots=True, ): """ Args: scene_model (str): Scene model name, e.g.: Rs_int scene_instance (None or str): name of json file to load (without .json); if None, defaults to og_dataset/scenes/<scene_model>/json/<scene_instance>.urdf scene_file (None or str): If specified, full path of JSON file to load (with .json). This will override scene_instance and scene_model! trav_map_resolution (float): traversability map resolution default_erosion_radius (float): default map erosion radius in meters trav_map_with_objects (bool): whether to use objects or not when constructing graph num_waypoints (int): number of way points returned waypoint_resolution (float): resolution of adjacent way points load_object_categories (None or list): if specified, only load these object categories into the scene not_load_object_categories (None or list): if specified, do not load these object categories into the scene load_room_types (None or list): only load objects in these room types into the scene load_room_instances (None or list): if specified, only load objects in these room instances into the scene load_task_relevant_only (bool): Whether only task relevant objects (and building structure) should be loaded seg_map_resolution (float): room segmentation map resolution include_robots (bool): whether to also include the robot(s) defined in the scene """ # Store attributes from inputs self.include_robots = include_robots # Infer scene directory self.scene_dir = get_og_scene_path(scene_model) # Other values that will be loaded at runtime self.load_object_categories = None self.not_load_object_categories = None self.load_room_instances = None self.load_task_relevant_only = load_task_relevant_only # Get scene information if scene_file is None: scene_file = self.get_scene_loading_info( scene_model=scene_model, scene_instance=scene_instance, ) # Load room semantic and instance segmentation map (must occur AFTER inferring scene directory) self._seg_map = SegmentationMap(scene_dir=self.scene_dir, map_resolution=seg_map_resolution) # Decide which room(s) and object categories to load self.filter_rooms_and_object_categories( load_object_categories, not_load_object_categories, load_room_types, load_room_instances ) # Run super init first super().__init__( scene_model=scene_model, scene_file=scene_file, trav_map_resolution=trav_map_resolution, default_erosion_radius=default_erosion_radius, trav_map_with_objects=trav_map_with_objects, num_waypoints=num_waypoints, waypoint_resolution=waypoint_resolution, use_floor_plane=False, ) def get_scene_loading_info(self, scene_model, scene_instance=None): """ Gets scene loading info to know what single USD file to load, specified indirectly via @scene_instance if it is specified, otherwise, will grab the "best" scene file to load. Args: scene_model (str): Name of the scene to load, e.g, Rs_int, etc. scene_instance (None or str): If specified, should be name of json file to load. (without .json), default to og_dataset/scenes/<scene_model>/json/<scene_instance>.json Returns: str: Absolute path to the desired scene file (.json) to load """ # Infer scene file from model and directory fname = "{}_best".format(scene_model) if scene_instance is None else scene_instance return os.path.join(self.scene_dir, "json", "{}.json".format(fname)) def filter_rooms_and_object_categories( self, load_object_categories, not_load_object_categories, load_room_types, load_room_instances ): """ Handle partial scene loading based on object categories, room types or room instances Args: load_object_categories (None or list): if specified, only load these object categories into the scene not_load_object_categories (None or list): if specified, do not load these object categories into the scene load_room_types (None or list): only load objects in these room types into the scene load_room_instances (None or list): if specified, only load objects in these room instances into the scene """ self.load_object_categories = [load_object_categories] if \ isinstance(load_object_categories, str) else load_object_categories self.not_load_object_categories = [not_load_object_categories] if \ isinstance(not_load_object_categories, str) else not_load_object_categories if load_room_instances is not None: if isinstance(load_room_instances, str): load_room_instances = [load_room_instances] load_room_instances_filtered = [] for room_instance in load_room_instances: if room_instance in self._seg_map.room_ins_name_to_ins_id: load_room_instances_filtered.append(room_instance) else: log.warning("room_instance [{}] does not exist.".format(room_instance)) self.load_room_instances = load_room_instances_filtered elif load_room_types is not None: if isinstance(load_room_types, str): load_room_types = [load_room_types] load_room_instances_filtered = [] for room_type in load_room_types: if room_type in self._seg_map.room_sem_name_to_ins_name: load_room_instances_filtered.extend(self._seg_map.room_sem_name_to_ins_name[room_type]) else: log.warning("room_type [{}] does not exist.".format(room_type)) self.load_room_instances = load_room_instances_filtered else: self.load_room_instances = None def _load(self): # Run super first super()._load() # Load the traversability map if we have the connectivity graph maps_path = os.path.join(self.scene_dir, "layout") self._trav_map.load_map(maps_path) def _should_load_object(self, obj_info, task_metadata): name = obj_info["args"]["name"] category = obj_info["args"].get("category", "object") in_rooms = obj_info["args"].get("in_rooms", None) if isinstance(in_rooms, str): assert "," not in in_rooms in_rooms = [in_rooms] if isinstance(in_rooms, str) else in_rooms # Do not load these object categories (can blacklist building structures as well) not_blacklisted = self.not_load_object_categories is None or category not in self.not_load_object_categories # Only load these object categories (no need to white list building structures) task_relevant_names = set(task_metadata["inst_to_name"].values()) if "inst_to_name" in task_metadata else set() is_task_relevant = name in task_relevant_names or category in STRUCTURE_CATEGORIES whitelisted = ( # Either no whitelisting-only mode is on (self.load_object_categories is None and not self.load_task_relevant_only) or # Or the object is in the whitelist (self.load_object_categories is not None and category in self.load_object_categories) or # Or it's in the task relevant list (self.load_task_relevant_only and is_task_relevant) ) # This object is not located in one of the selected rooms, skip valid_room = self.load_room_instances is None or len(set(self.load_room_instances) & set(in_rooms)) > 0 # Check whether this is an agent and we allow agents agent_ok = self.include_robots or obj_info["class_name"] not in REGISTERED_ROBOTS # We only load this model if all the above conditions are met return not_blacklisted and whitelisted and valid_room and agent_ok @property def seg_map(self): """ Returns: SegmentationMap: Map for segmenting this scene """ return self._seg_map @classmethod def modify_init_info_for_restoring(cls, init_info): # Run super first super().modify_init_info_for_restoring(init_info=init_info) # We also make sure we load in any robots, and also pop any filters that were stored init_info["args"]["include_robots"] = True init_info["args"]["load_object_categories"] = None init_info["args"]["not_load_object_categories"] = None init_info["args"]["load_room_types"] = None init_info["args"]["load_room_instances"] = None
StanfordVL/OmniGibson/omnigibson/scenes/scene_base.py
import json from abc import ABC from itertools import combinations import numpy as np import omnigibson as og import omnigibson.lazy as lazy from omnigibson.macros import create_module_macros, gm from omnigibson.prims.xform_prim import XFormPrim from omnigibson.prims.material_prim import MaterialPrim from omnigibson.utils.constants import STRUCTURE_CATEGORIES from omnigibson.utils.python_utils import classproperty, Serializable, Registerable, Recreatable, \ create_object_from_init_info from omnigibson.utils.registry_utils import SerializableRegistry from omnigibson.utils.ui_utils import create_module_logger from omnigibson.utils.usd_utils import CollisionAPI from omnigibson.objects.object_base import BaseObject from omnigibson.objects.dataset_object import DatasetObject from omnigibson.systems.system_base import SYSTEM_REGISTRY, clear_all_systems, get_system from omnigibson.objects.light_object import LightObject from omnigibson.robots.robot_base import m as robot_macros # Create module logger log = create_module_logger(module_name=__name__) # Create settings for this module m = create_module_macros(module_path=__file__) # Default texture to use for skybox m.DEFAULT_SKYBOX_TEXTURE = f"{gm.ASSET_PATH}/models/background/sky.jpg" # Global dicts that will contain mappings REGISTERED_SCENES = dict() class Scene(Serializable, Registerable, Recreatable, ABC): """ Base class for all Scene objects. Contains the base functionalities for an arbitrary scene with an arbitrary set of added objects """ def __init__( self, scene_file=None, use_floor_plane=True, floor_plane_visible=True, use_skybox=True, floor_plane_color=(1.0, 1.0, 1.0), ): """ Args: scene_file (None or str): If specified, full path of JSON file to load (with .json). None results in no additional objects being loaded into the scene use_floor_plane (bool): whether to load a flat floor plane into the simulator floor_plane_visible (bool): whether to render the additionally added floor plane floor_plane_color (3-array): if @floor_plane_visible is True, this determines the (R,G,B) color assigned to the generated floor plane """ # Store internal variables self.scene_file = scene_file self._loaded = False # Whether this scene exists in the stage or not self._initialized = False # Whether this scene has its internal handles / info initialized or not (occurs AFTER and INDEPENDENTLY from loading!) self._registry = None self._world_prim = None self._initial_state = None self._objects_info = None # Information associated with this scene self._use_floor_plane = use_floor_plane self._floor_plane_visible = floor_plane_visible self._floor_plane_color = floor_plane_color self._floor_plane = None self._use_skybox = use_skybox self._skybox = None # Call super init super().__init__() @property def registry(self): """ Returns: SerializableRegistry: Master registry containing sub-registries of objects, robots, systems, etc. """ return self._registry @property def skybox(self): """ Returns: None or LightObject: Skybox light associated with this scene, if it is used """ return self._skybox @property def floor_plane(self): """ Returns: None or XFormPrim: Generated floor plane prim, if it is used """ return self._floor_plane @property def object_registry(self): """ Returns: SerializableRegistry: Object registry containing all active standalone objects in the scene """ return self._registry(key="name", value="object_registry") @property def system_registry(self): """ Returns: SerializableRegistry: System registry containing all systems in the scene (e.g.: water, dust, etc.) """ return self._registry(key="name", value="system_registry") @property def objects(self): """ Get the objects in the scene. Returns: list of BaseObject: Standalone object(s) that are currently in this scene """ return self.object_registry.objects @property def robots(self): """ Robots in the scene Returns: list of BaseRobot: Robot(s) that are currently in this scene """ return list(self.object_registry("category", robot_macros.ROBOT_CATEGORY, [])) @property def systems(self): """ Systems in the scene Returns: list of BaseSystem: System(s) that are available to use in this scene """ return self.system_registry.objects @property def object_registry_unique_keys(self): """ Returns: list of str: Keys with which to index into the object registry. These should be valid public attributes of prims that we can use as unique IDs to reference prims, e.g., prim.prim_path, prim.name, etc. """ return ["name", "prim_path", "uuid"] @property def object_registry_group_keys(self): """ Returns: list of str: Keys with which to index into the object registry. These should be valid public attributes of prims that we can use as grouping IDs to reference prims, e.g., prim.in_rooms """ return ["prim_type", "states", "category", "fixed_base", "in_rooms", "abilities"] @property def loaded(self): return self._loaded @property def initialized(self): return self._initialized def _load(self): """ Load the scene into simulator The elements to load may include: floor, building, objects, etc. """ # Create collision group for fixed base objects' non root links, root links, and building structures CollisionAPI.create_collision_group(col_group="fixed_base_nonroot_links", filter_self_collisions=False) # Disable collision between root links of fixed base objects CollisionAPI.create_collision_group(col_group="fixed_base_root_links", filter_self_collisions=True) # Disable collision between building structures CollisionAPI.create_collision_group(col_group="structures", filter_self_collisions=True) # Disable collision between building structures and 1. fixed base objects, 2. attached objects CollisionAPI.add_group_filter(col_group="structures", filter_group="fixed_base_nonroot_links") CollisionAPI.add_group_filter(col_group="structures", filter_group="fixed_base_root_links") # We just add a ground plane if requested if self._use_floor_plane: self.add_ground_plane(color=self._floor_plane_color, visible=self._floor_plane_visible) # Also add skybox if requested if self._use_skybox: self._skybox = LightObject( prim_path="/World/skybox", name="skybox", category="background", light_type="Dome", intensity=1500, fixed_base=True, ) og.sim.import_object(self._skybox, register=False) self._skybox.color = (1.07, 0.85, 0.61) self._skybox.texture_file_path = m.DEFAULT_SKYBOX_TEXTURE def _load_objects_from_scene_file(self): """ Loads scene objects based on metadata information found in the current USD stage's scene info (information stored in the world prim's CustomData) """ # Grab objects info from the scene file with open(self.scene_file, "r") as f: scene_info = json.load(f) init_info = scene_info["objects_info"]["init_info"] init_state = scene_info["state"]["object_registry"] init_systems = scene_info["state"]["system_registry"].keys() task_metadata = {} try: task_metadata = scene_info["metadata"]["task"] except: pass # Create desired systems for system_name in init_systems: if gm.USE_GPU_DYNAMICS: get_system(system_name) else: log.warning(f"System {system_name} is not supported without GPU dynamics! Skipping...") # Iterate over all scene info, and instantiate object classes linked to the objects found on the stage # accordingly for obj_name, obj_info in init_info.items(): # Check whether we should load the object or not if not self._should_load_object(obj_info=obj_info, task_metadata=task_metadata): continue # Create object class instance obj = create_object_from_init_info(obj_info) # Import into the simulator og.sim.import_object(obj) # Set the init pose accordingly obj.set_position_orientation( position=init_state[obj_name]["root_link"]["pos"], orientation=init_state[obj_name]["root_link"]["ori"], ) def _load_metadata_from_scene_file(self): """ Loads metadata from self.scene_file and stores it within the world prim's CustomData """ with open(self.scene_file, "r") as f: scene_info = json.load(f) # Write the metadata for key, data in scene_info.get("metadata", dict()).items(): og.sim.write_metadata(key=key, data=data) def _should_load_object(self, obj_info, task_metadata): """ Helper function to check whether we should load an object given its init_info. Useful for potentially filtering objects based on, e.g., their category, size, etc. Subclasses can implement additional logic. By default, this returns True Args: obj_info (dict): Dictionary of object kwargs that will be used to load the object Returns: bool: Whether this object should be loaded or not """ return True def load(self): """ Load the scene into simulator The elements to load may include: floor, building, objects, etc. """ # Make sure simulator is stopped assert og.sim.is_stopped(), "Simulator should be stopped when loading this scene!" # Do not override this function. Override _load instead. if self._loaded: raise ValueError("This scene is already loaded.") # Create the registry for tracking all objects in the scene self._registry = self._create_registry() # Store world prim and load the scene into the simulator self._world_prim = og.sim.world_prim self._load() # If we have any scene file specified, use it to load the objects within it and also update the initial state # and metadata if self.scene_file is not None: self._load_objects_from_scene_file() self._load_metadata_from_scene_file() # We're now loaded self._loaded = True # Always stop the sim if we started it internally if not og.sim.is_stopped(): og.sim.stop() def clear(self): """ Clears any internal state before the scene is destroyed """ # Clears systems so they can be re-initialized clear_all_systems() def _initialize(self): """ Initializes state of this scene and sets up any references necessary post-loading. Should be implemented by sub-class for extended utility """ pass def initialize(self): """ Initializes state of this scene and sets up any references necessary post-loading. Subclasses should implement / extend the _initialize() method. """ assert not self._initialized, "Scene can only be initialized once! (It is already initialized)" self._initialize() # Grab relevant objects info self.update_objects_info() self.wake_scene_objects() self._initialized = True # Store initial state, which may be loaded from a scene file if specified if self.scene_file is None: init_state = self.dump_state(serialized=False) else: with open(self.scene_file, "r") as f: scene_info = json.load(f) init_state = scene_info["state"] og.sim.load_state(init_state, serialized=False) self._initial_state = init_state def _create_registry(self): """ Creates the internal registry used for tracking all objects Returns: SerializableRegistry: registry for tracking all objects """ # Create meta registry and populate with internal registries for robots, objects, and systems registry = SerializableRegistry( name="master_registry", class_types=SerializableRegistry, ) # Add registry for systems -- this is already created externally, so we just update it and pull it directly registry.add(obj=SYSTEM_REGISTRY) # Add registry for objects registry.add(obj=SerializableRegistry( name="object_registry", class_types=BaseObject, default_key="name", unique_keys=self.object_registry_unique_keys, group_keys=self.object_registry_group_keys, )) return registry def wake_scene_objects(self): """ Force wakeup sleeping objects """ for obj in self.objects: obj.wake() def get_objects_with_state(self, state): """ Get the objects with a given state in the scene. Args: state (BaseObjectState): state of the objects to get Returns: set: all objects with the given state """ return self.object_registry("states", state, set()) def get_objects_with_state_recursive(self, state): """ Get the objects with a given state and its subclasses in the scene. Args: state (BaseObjectState): state of the objects to get Returns: set: all objects with the given state and its subclasses """ objs = set() states = {state} while states: next_states = set() for state in states: objs |= self.object_registry("states", state, set()) next_states |= set(state.__subclasses__()) states = next_states return objs def _add_object(self, obj): """ Add an object to the scene's internal object tracking mechanisms. Note that if the scene is not loaded, it should load this added object alongside its other objects when scene.load() is called. The object should also be accessible through scene.objects. Args: obj (BaseObject): the object to load into the simulator """ pass def add_object(self, obj, register=True, _is_call_from_simulator=False): """ Add an object to the scene, loading it if the scene is already loaded. Note that calling add_object to an already loaded scene should only be done by the simulator's import_object() function. Args: obj (BaseObject): the object to load register (bool): whether to track this object internally in the scene registry _is_call_from_simulator (bool): whether the caller is the simulator. This should **not** be set by any callers that are not the Simulator class Returns: Usd.Prim: the prim of the loaded object if the scene was already loaded, or None if the scene is not loaded (in that case, the object is stored to be loaded together with the scene) """ # Make sure the simulator is the one calling this function assert _is_call_from_simulator, "Use import_object() for adding objects to a simulator and scene!" # If the scene is already loaded, we need to load this object separately. Otherwise, don't do anything now, # let scene._load() load the object when called later on. prim = obj.load() # If this object is fixed and is NOT an agent, disable collisions between the fixed links of the fixed objects # This is to account for cases such as Tiago, which has a fixed base which is needed for its global base joints # We do this by adding the object to our tracked collision groups if obj.fixed_base and obj.category != robot_macros.ROBOT_CATEGORY and not obj.visual_only: # TODO: Remove structure hotfix once asset collision meshes are fixed!! if obj.category in STRUCTURE_CATEGORIES: CollisionAPI.add_to_collision_group(col_group="structures", prim_path=obj.prim_path) else: for link in obj.links.values(): CollisionAPI.add_to_collision_group( col_group="fixed_base_root_links" if link == obj.root_link else "fixed_base_nonroot_links", prim_path=link.prim_path, ) # Add this object to our registry based on its type, if we want to register it if register: self.object_registry.add(obj) # Run any additional scene-specific logic with the created object self._add_object(obj) return prim def remove_object(self, obj): """ Method to remove an object from the simulator Args: obj (BaseObject): Object to remove """ # Remove from the appropriate registry if registered. # Sometimes we don't register objects to the object registry during import_object (e.g. particle templates) if self.object_registry.object_is_registered(obj): self.object_registry.remove(obj) # Remove from omni stage obj.remove() def reset(self): """ Resets this scene """ # Make sure the simulator is playing assert og.sim.is_playing(), "Simulator must be playing in order to reset the scene!" # Reset the states of all objects (including robots), including (non-)kinematic states and internal variables. assert self._initial_state is not None self.load_state(self._initial_state) og.sim.step_physics() @property def n_floors(self): """ Returns: int: Number of floors in this scene """ # Default is a single floor return 1 @property def n_objects(self): """ Returns: int: number of objects """ return len(self.objects) @property def fixed_objects(self): """ Returns: dict: Keyword-mapped objects that are fixed in the scene, IGNORING any robots. Maps object name to their object class instances (DatasetObject) """ return {obj.name: obj for obj in self.object_registry("fixed_base", True, default_val=[]) if obj.category != robot_macros.ROBOT_CATEGORY} def get_random_floor(self): """ Sample a random floor among all existing floor_heights in the scene. Most scenes in OmniGibson only have a single floor. Returns: int: an integer between 0 and self.n_floors-1 """ return np.random.randint(0, self.n_floors) def get_random_point(self, floor=None, reference_point=None, robot=None): """ Sample a random point on the given floor number. If not given, sample a random floor number. If @reference_point is given, sample a point in the same connected component as the previous point. Args: floor (None or int): floor number. None means the floor is randomly sampled Warning: if @reference_point is given, @floor must be given; otherwise, this would lead to undefined behavior reference_point (3-array): (x,y,z) if given, sample a point in the same connected component as this point Returns: 2-tuple: - int: floor number. This is the sampled floor number if @floor is None - 3-array: (x,y,z) randomly sampled point """ raise NotImplementedError() def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None): """ Get the shortest path from one point to another point. Args: floor (int): floor number source_world (2-array): (x,y) 2D source location in world reference frame (metric) target_world (2-array): (x,y) 2D target location in world reference frame (metric) entire_path (bool): whether to return the entire path robot (None or BaseRobot): if given, erode the traversability map to account for the robot's size Returns: 2-tuple: - (N, 2) array: array of path waypoints, where N is the number of generated waypoints - float: geodesic distance of the path """ raise NotImplementedError() def get_floor_height(self, floor=0): """ Get the height of the given floor. Default is 0.0, since we only have a single floor Args: floor: an integer identifying the floor Returns: int: height of the given floor """ return 0.0 def add_ground_plane( self, size=None, z_position: float = 0, name="ground_plane", prim_path: str = "/World/groundPlane", static_friction: float = 0.5, dynamic_friction: float = 0.5, restitution: float = 0.8, color=None, visible=True, ): """ Generate a ground plane into the simulator Args: size (None or float): If specified, sets the (x,y) size of the generated plane z_position (float): Z position of the generated plane name (str): Name to assign to the generated plane prim_path (str): Prim path for the generated plane static_friction (float): Static friction of the generated plane dynamic_friction (float): Dynamics friction of the generated plane restitution (float): Restitution of the generated plane color (None or 3-array): If specified, sets the (R,G,B) color of the generated plane visible (bool): Whether the plane should be visible or not """ plane = lazy.omni.isaac.core.objects.ground_plane.GroundPlane( prim_path=prim_path, name=name, z_position=z_position, size=size, color=None if color is None else np.array(color), visible=visible, # TODO: update with new PhysicsMaterial API # static_friction=static_friction, # dynamic_friction=dynamic_friction, # restitution=restitution, ) self._floor_plane = XFormPrim( prim_path=plane.prim_path, name=plane.name, ) # Assign floors category to the floor plane lazy.omni.isaac.core.utils.semantics.add_update_semantics( prim=self._floor_plane.prim, semantic_label="floors", type_label="class", ) def update_initial_state(self, state=None): """ Updates the initial state for this scene (which the scene will get reset to upon calling reset()) Args: state (None or dict): If specified, the state to set internally. Otherwise, will set the initial state to be the current state """ self._initial_state = self.dump_state(serialized=False) if state is None else state def update_objects_info(self): """ Updates the scene-relevant information and saves it to the active USD. Useful for reloading a scene directly from a saved USD in this format. """ # Save relevant information # Iterate over all objects and save their init info init_info = {obj.name: obj.get_init_info() for obj in self.object_registry.objects} # Compose as single dictionary and store internally self._objects_info = dict(init_info=init_info) def get_objects_info(self): """ Stored information, if any, for this scene. Structure is: "init_info": "<obj0>": <obj0> init kw/args ... "<robot0>": <robot0> init kw/args ... Returns: None or dict: If it exists, nested dictionary of relevant objects' information """ return self._objects_info @property def state_size(self): # Total state size is the state size of our registry return self._registry.state_size def _dump_state(self): # Default state for the scene is from the registry alone return self._registry.dump_state(serialized=False) def _load_state(self, state): # Default state for the scene is from the registry alone self._registry.load_state(state=state, serialized=False) def _serialize(self, state): # Default state for the scene is from the registry alone return self._registry.serialize(state=state) def _deserialize(self, state): # Default state for the scene is from the registry alone # We split this into two explicit steps, because the actual registry state size might dynamically change # as we're deserializing state_dict = self._registry.deserialize(state=state) return state_dict, self._registry.state_size @classproperty def _cls_registry(cls): # Global registry global REGISTERED_SCENES return REGISTERED_SCENES @classmethod def modify_init_info_for_restoring(cls, init_info): """ Helper function to modify a given init info for restoring a scene from corresponding scene info. Note that this function modifies IN-PLACE! Args: init_info (dict): Information for this scene from @self.get_init_info() """ # Default is pass pass
StanfordVL/OmniGibson/omnigibson/examples/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/README.md
### Code Examples The following examples illustrate the use of OmniGibson. If you are interested in just getting started as an end-user, you only need check out `./environments`. If you are looking for examples of BEHAVIOR, the benchmark of household activities that uses OmniGibson, please check the BEHAVIOR repository at https://github.com/StanfordVL/behavior. - environments: how to instantiate OmniGibson environments with interactive or static scenes, optionally with a scene selector. - learning: how to train RL policies for robot navigation using stable baselines 3, and how to save and replay demos of agents for imitation learning. - objects: how to create, load, and place objects to predefined locations or using a logic sampler (e.g. onTop(A, B)), how to change texture as a function of the temperature, and how to generate the minimum volume bounding boxes of objects. - object_states: how to change various objects states, including dusty, stained, (water sources) toggled on, (cleaning tool) soaked, sliced, and temprature, and how to save and reload object states. - observations: how to generate different observation modalities such as RGB, depth, LiDAR, segmentation, etc. - renderer: how to use the renderer directly, without the physics engine. - robots: how to (keyboard) control robots with differential drive controllers, IK controllers and sampling-based motion planners. - ros: how to run ROS with OmniGibson as if it is the real world. - scenes: how to load interactive and non-interactive scenes, how to use domain randomization (of object models and/or texture), and how to create a tour video of the scenes. - vr: how to use OmniGibson with VR. - web_ui: how to start a web server that hosts OmniGibson environments.
StanfordVL/OmniGibson/omnigibson/examples/scenes/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/scenes/scene_selector.py
import omnigibson as og from omnigibson.macros import gm from omnigibson.utils.asset_utils import get_available_g_scenes, get_available_og_scenes from omnigibson.utils.ui_utils import choose_from_options # Configure macros for maximum performance gm.USE_GPU_DYNAMICS = True gm.ENABLE_FLATCACHE = True gm.ENABLE_OBJECT_STATES = False gm.ENABLE_TRANSITION_RULES = False def main(random_selection=False, headless=False, short_exec=False): """ Prompts the user to select any available interactive scene and loads a turtlebot into it. It steps the environment 100 times with random actions sampled from the action space, using the Gym interface, resetting it 10 times. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Choose the scene type to load scene_options = { "InteractiveTraversableScene": "Procedurally generated scene with fully interactive objects", # "StaticTraversableScene": "Monolithic scene mesh with no interactive objects", } scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection) # Choose the scene model to load scenes = get_available_og_scenes() if scene_type == "InteractiveTraversableScene" else get_available_g_scenes() scene_model = choose_from_options(options=scenes, name="scene model", random_selection=random_selection) cfg = { "scene": { "type": scene_type, "scene_model": scene_model, }, "robots": [ { "type": "Turtlebot", "obs_modalities": ["scan", "rgb", "depth"], "action_type": "continuous", "action_normalize": True, }, ], } # If the scene type is interactive, also check if we want to quick load or full load the scene if scene_type == "InteractiveTraversableScene": load_options = { "Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)", "Full": "Load all interactive objects in the scene", } load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection) if load_mode == "Quick": cfg["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"] # Load the environment env = og.Environment(configs=cfg) # Allow user to move camera more easily if not gm.HEADLESS: og.sim.enable_viewer_camera_teleoperation() # Run a simple loop and reset periodically max_iterations = 10 if not short_exec else 1 for j in range(max_iterations): og.log.info("Resetting environment") env.reset() for i in range(100): action = env.action_space.sample() state, reward, done, info = env.step(action) if done: og.log.info("Episode finished after {} timesteps".format(i + 1)) break # Always close the environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/scenes/scene_tour_demo.py
import numpy as np import omnigibson as og import omnigibson.lazy as lazy from omnigibson.macros import gm from omnigibson.utils.asset_utils import get_available_g_scenes, get_available_og_scenes from omnigibson.utils.ui_utils import choose_from_options, KeyboardEventHandler def main(random_selection=False, headless=False, short_exec=False): """ Prompts the user to select any available interactive scene and loads it. It sets the camera to various poses and records images, and then generates a trajectory from a set of waypoints and records the resulting video. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Make sure the example is not being run headless. If so, terminate early if gm.HEADLESS: print("This demo should only be run not headless! Exiting early.") og.shutdown() # Choose the scene type to load scene_options = { "InteractiveTraversableScene": "Procedurally generated scene with fully interactive objects", # "StaticTraversableScene": "Monolithic scene mesh with no interactive objects", } scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection) # Choose the scene model to load scenes = get_available_og_scenes() if scene_type == "InteractiveTraversableScene" else get_available_g_scenes() scene_model = choose_from_options(options=scenes, name="scene model", random_selection=random_selection) print(f"scene model: {scene_model}") cfg = { "scene": { "type": scene_type, "scene_model": scene_model, }, } # If the scene type is interactive, also check if we want to quick load or full load the scene if scene_type == "InteractiveTraversableScene": load_options = { "Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)", "Full": "Load all interactive objects in the scene", } load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection) if load_mode == "Quick": cfg["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"] # Load the environment env = og.Environment(configs=cfg) # Allow user to teleoperate the camera cam_mover = og.sim.enable_viewer_camera_teleoperation() # Create a keyboard event handler for generating waypoints waypoints = [] def add_waypoint(): nonlocal waypoints pos = cam_mover.cam.get_position() print(f"Added waypoint at {pos}") waypoints.append(pos) def clear_waypoints(): nonlocal waypoints print(f"Cleared all waypoints!") waypoints = [] KeyboardEventHandler.initialize() KeyboardEventHandler.add_keyboard_callback( key=lazy.carb.input.KeyboardInput.X, callback_fn=add_waypoint, ) KeyboardEventHandler.add_keyboard_callback( key=lazy.carb.input.KeyboardInput.C, callback_fn=clear_waypoints, ) KeyboardEventHandler.add_keyboard_callback( key=lazy.carb.input.KeyboardInput.J, callback_fn=lambda: cam_mover.record_trajectory_from_waypoints( waypoints=np.array(waypoints), per_step_distance=0.02, fps=30, steps_per_frame=1, fpath=None, # This corresponds to the default path inferred from cam_mover.save_dir ), ) KeyboardEventHandler.add_keyboard_callback( key=lazy.carb.input.KeyboardInput.ESCAPE, callback_fn=lambda: env.close(), ) # Print out additional keyboard commands print(f"\t X: Save the current camera pose as a waypoint") print(f"\t C: Clear all waypoints") print(f"\t J: Record the camera trajectory from the current set of waypoints") print(f"\t ESC: Terminate the demo") # Loop indefinitely while True: env.step([]) if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/scenes/traversability_map_example.py
import os import cv2 import matplotlib.pyplot as plt import numpy as np from PIL import Image import omnigibson as og from omnigibson.utils.asset_utils import get_og_scene_path, get_available_og_scenes from omnigibson.utils.ui_utils import choose_from_options def main(random_selection=False, headless=False, short_exec=False): """ Traversable map demo Loads the floor plan and obstacles for the requested scene, and overlays them in a visual figure such that the highlighted area reflects the traversable (free-space) area """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) scenes = get_available_og_scenes() scene_model = choose_from_options(options=scenes, name="scene model", random_selection=random_selection) print(f"Generating traversability map for scene {scene_model}") trav_map_size = 200 trav_map_erosion = 2 trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png")) trav_map = np.array(trav_map.resize((trav_map_size, trav_map_size))) trav_map = cv2.erode(trav_map, np.ones((trav_map_erosion, trav_map_erosion))) if not headless: plt.figure(figsize=(12, 12)) plt.imshow(trav_map) plt.title(f"Traversable area of {scene_model} scene") if not headless: plt.show() # Shut down omnigibson at the end og.shutdown() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/learning/navigation_policy_demo.py
""" Example training code using stable-baselines3 PPO for one BEHAVIOR activity. Note that due to the sparsity of the reward, this training code will not converge and achieve task success. This only serves as a starting point that users can further build upon. """ import argparse import os, time, cv2 import yaml import omnigibson as og from omnigibson import example_config_path from omnigibson.macros import gm from omnigibson.utils.python_utils import meets_minimum_version try: import gym import torch as th import torch.nn as nn import tensorboard from stable_baselines3 import PPO from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.preprocessing import maybe_transpose from stable_baselines3.common.torch_layers import BaseFeaturesExtractor from stable_baselines3.common.utils import set_random_seed from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback except ModuleNotFoundError: og.log.error("torch, stable-baselines3, or tensorboard is not installed. " "See which packages are missing, and then run the following for any missing packages:\n" "pip install stable-baselines3[extra]\n" "pip install tensorboard\n" "pip install shimmy>=0.2.1\n" "Also, please update gym to >=0.26.1 after installing sb3: pip install gym>=0.26.1") exit(1) assert meets_minimum_version(gym.__version__, "0.26.1"), "Please install/update gym to version >= 0.26.1" # We don't need object states nor transitions rules, so we disable them now, and also enable flatcache for maximum speed gm.ENABLE_OBJECT_STATES = False gm.ENABLE_TRANSITION_RULES = False gm.ENABLE_FLATCACHE = True class CustomCombinedExtractor(BaseFeaturesExtractor): def __init__(self, observation_space: gym.spaces.Dict): # We do not know features-dim here before going over all the items, # so put something dummy for now. PyTorch requires calling super().__init__(observation_space, features_dim=1) extractors = {} self.step_index = 0 self.img_save_dir = 'img_save_dir' os.makedirs(self.img_save_dir, exist_ok=True) total_concat_size = 0 feature_size = 128 for key, subspace in observation_space.spaces.items(): # For now, only keep RGB observations if "rgb" in key: og.log.info(f"obs {key} shape: {subspace.shape}") n_input_channels = subspace.shape[0] # channel first cnn = nn.Sequential( nn.Conv2d(n_input_channels, 4, kernel_size=8, stride=4, padding=0), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(4, 8, kernel_size=4, stride=2, padding=0), nn.ReLU(), nn.MaxPool2d(2), nn.Conv2d(8, 4, kernel_size=3, stride=1, padding=0), nn.ReLU(), nn.Flatten(), ) test_tensor = th.zeros(subspace.shape) with th.no_grad(): n_flatten = cnn(test_tensor[None]).shape[1] fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU()) extractors[key] = nn.Sequential(cnn, fc) total_concat_size += feature_size self.extractors = nn.ModuleDict(extractors) # Update the features dim manually self._features_dim = total_concat_size def forward(self, observations) -> th.Tensor: encoded_tensor_list = [] self.step_index += 1 # self.extractors contain nn.Modules that do all the processing. for key, extractor in self.extractors.items(): encoded_tensor_list.append(extractor(observations[key])) feature = th.cat(encoded_tensor_list, dim=1) return feature def main(): # Parse args parser = argparse.ArgumentParser(description="Train or evaluate a PPO agent in BEHAVIOR") parser.add_argument( "--checkpoint", type=str, default=None, help="Absolute path to desired PPO checkpoint to load for evaluation", ) parser.add_argument( "--eval", action="store_true", help="If set, will evaluate the PPO agent found from --checkpoint", ) args = parser.parse_args() tensorboard_log_dir = os.path.join("log_dir", time.strftime("%Y%m%d-%H%M%S")) os.makedirs(tensorboard_log_dir, exist_ok=True) prefix = '' seed = 0 # Load config with open(f"{example_config_path}/turtlebot_nav.yaml", "r") as f: cfg = yaml.load(f, Loader=yaml.FullLoader) # Make sure flattened obs and action space is used cfg["env"]["flatten_action_space"] = True cfg["env"]["flatten_obs_space"] = True # Only use RGB obs cfg["robots"][0]["obs_modalities"] = ["rgb"] # If we're not eval, turn off the start / goal markers so the agent doesn't see them if not args.eval: cfg["task"]["visualize_goal"] = False env = og.Environment(configs=cfg) # If we're evaluating, hide the ceilings and enable camera teleoperation so the user can easily # visualize the rollouts dynamically if args.eval: ceiling = env.scene.object_registry("name", "ceilings") ceiling.visible = False og.sim.enable_viewer_camera_teleoperation() # Set the set set_random_seed(seed) env.reset() policy_kwargs = dict( features_extractor_class=CustomCombinedExtractor, ) os.makedirs(tensorboard_log_dir, exist_ok=True) if args.eval: assert args.checkpoint is not None, "If evaluating a PPO policy, @checkpoint argument must be specified!" model = PPO.load(args.checkpoint) og.log.info("Starting evaluation...") mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=50) og.log.info("Finished evaluation!") og.log.info(f"Mean reward: {mean_reward} +/- {std_reward:.2f}") else: model = PPO( "MultiInputPolicy", env, verbose=1, tensorboard_log=tensorboard_log_dir, policy_kwargs=policy_kwargs, n_steps=20 * 10, batch_size=8, device='cuda', ) checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=tensorboard_log_dir, name_prefix=prefix) eval_callback = EvalCallback(eval_env=env, eval_freq=1000, n_eval_episodes=20) callback = CallbackList([checkpoint_callback, eval_callback]) og.log.debug(model.policy) og.log.info(f"model: {model}") og.log.info("Starting training...") model.learn( total_timesteps=10000000, callback=callback, ) og.log.info("Finished training!") if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/learning/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/simulator/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/simulator/sim_save_load_example.py
import os import numpy as np import omnigibson as og import omnigibson.lazy as lazy from omnigibson.utils.ui_utils import KeyboardEventHandler TEST_OUT_PATH = "" # Define output directory here. def main(random_selection=False, headless=False, short_exec=False): """ Prompts the user to select whether they are saving or loading an environment, and interactively shows how an environment can be saved or restored. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) cfg = { "scene": { "type": "InteractiveTraversableScene", "scene_model": "Rs_int", "load_object_categories": ["floors", "walls", "bed", "bottom_cabinet", "chair"], }, "robots": [ { "type": "Turtlebot", "obs_modalities": ["rgb", "depth"], }, ], } # Create the environment env = og.Environment(configs=cfg) # Set the camera to a good angle def set_camera_pose(): og.sim.viewer_camera.set_position_orientation( position=np.array([-0.229375, -3.40576 , 7.26143 ]), orientation=np.array([ 0.27619733, -0.00230233, -0.00801152, 0.9610648 ]), ) set_camera_pose() # Give user instructions, and then loop until completed completed = short_exec if not short_exec and not random_selection: # Notify user to manipulate environment until ready, then press Z to exit print() print("Modify the scene by SHIFT + left clicking objects and dragging them. Once finished, press Z.") # Register callback so user knows to press space once they're done manipulating the scene def complete_loop(): nonlocal completed completed = True KeyboardEventHandler.add_keyboard_callback(lazy.carb.input.KeyboardInput.Z, complete_loop) while not completed: env.step(np.random.uniform(-1, 1, env.robots[0].action_dim)) print("Completed scene modification, saving scene...") save_path = os.path.join(TEST_OUT_PATH, "saved_stage.json") og.sim.save(json_path=save_path) print("Re-loading scene...") og.sim.restore(json_path=save_path) # Take a sim step and play og.sim.step() og.sim.play() set_camera_pose() # Loop until user terminates completed = short_exec if not short_exec and not random_selection: # Notify user to manipulate environment until ready, then press Z to exit print() print("View reloaded scene. Once finished, press Z.") # Register callback so user knows to press space once they're done manipulating the scene KeyboardEventHandler.add_keyboard_callback(lazy.carb.input.KeyboardInput.Z, complete_loop) while not completed: env.step(np.zeros(env.robots[0].action_dim)) # Shutdown omnigibson at the end og.shutdown() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/teleoperation/robot_teleoperate_demo.py
""" Example script for using external devices to teleoperate a robot. """ import omnigibson as og from omnigibson.utils.ui_utils import choose_from_options ROBOTS = { "FrankaPanda": "Franka Emika Panda (default)", "Fetch": "Mobile robot with one arm", "Tiago": "Mobile robot with two arms", } TELEOP_METHOD = { "keyboard": "Keyboard (default)", "spacemouse": "SpaceMouse", "oculus": "Oculus Quest", "vision": "Human Keypoints with Camera", } def main(): """ Spawn a robot in an empty scene with a breakfast table and some toys. Users can try pick and place the toy into the basket using selected external devices and robot of their choice. """ from omnigibson.utils.teleop_utils import TeleopSystem from telemoma.utils.camera_utils import RealSenseCamera from telemoma.configs.base_config import teleop_config robot_name = choose_from_options(options=ROBOTS, name="robot") arm_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot arm teleop method") if robot_name != "FrankaPanda": base_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot base teleop method") else: base_teleop_method = "keyboard" # Dummy value since FrankaPanda does not have a base # Generate teleop config teleop_config.arm_left_controller = arm_teleop_method teleop_config.arm_right_controller = arm_teleop_method teleop_config.base_controller = base_teleop_method teleop_config.interface_kwargs["keyboard"] = {"arm_speed_scaledown": 0.04} teleop_config.interface_kwargs["spacemouse"] = {"arm_speed_scaledown": 0.04} if arm_teleop_method == "vision" or base_teleop_method == "vision": teleop_config.interface_kwargs["vision"] = {"camera": RealSenseCamera()} # Create the config for generating the environment we want scene_cfg = {"type": "Scene"} # Add the robot we want to load robot_cfg = { "type": robot_name, "obs_modalities": ["rgb"], "action_normalize": False, "grasping_mode": "assisted", } arms = ["left", "right"] if robot_name == "Tiago" else ["0"] robot_cfg["controller_config"] = {} for arm in arms: robot_cfg["controller_config"][f"arm_{arm}"] = { "name": "InverseKinematicsController", "command_input_limits": None, } robot_cfg["controller_config"][f"gripper_{arm}"] = { "name": "MultiFingerGripperController", "command_input_limits": (0.0, 1.0), "mode": "smooth", } object_cfg = [ { "type": "DatasetObject", "prim_path": "/World/breakfast_table", "name": "breakfast_table", "category": "breakfast_table", "model": "kwmfdg", "bounding_box": [2, 1, 0.4], "position": [0.8, 0, 0.3], "orientation": [0, 0, 0.707, 0.707], }, { "type": "DatasetObject", "prim_path": "/World/frail", "name": "frail", "category": "frail", "model": "zmjovr", "scale": [2, 2, 2], "position": [0.6, -0.35, 0.5], }, { "type": "DatasetObject", "prim_path": "/World/toy_figure1", "name": "toy_figure1", "category": "toy_figure", "model": "issvzv", "scale": [0.75, 0.75, 0.75], "position": [0.6, 0, 0.5], }, { "type": "DatasetObject", "prim_path": "/World/toy_figure2", "name": "toy_figure2", "category": "toy_figure", "model": "nncqfn", "scale": [0.75, 0.75, 0.75], "position": [0.6, 0.15, 0.5], }, { "type": "DatasetObject", "prim_path": "/World/toy_figure3", "name": "toy_figure3", "category": "toy_figure", "model": "eulekw", "scale": [0.25, 0.25, 0.25], "position": [0.6, 0.3, 0.5], } ] cfg = dict(scene=scene_cfg, robots=[robot_cfg], objects=object_cfg) # Create the environment env = og.Environment(configs=cfg) env.reset() # update viewer camera pose og.sim.viewer_camera.set_position_orientation([-0.22, 0.99, 1.09], [-0.14, 0.47, 0.84, -0.23]) # Start teleoperation system robot = env.robots[0] # Initialize teleoperation system teleop_sys = TeleopSystem(config=teleop_config, robot=robot, show_control_marker=True) teleop_sys.start() # main simulation loop for _ in range(10000): action = teleop_sys.get_action(teleop_sys.get_obs()) env.step(action) # Shut down the environment cleanly at the end teleop_sys.stop() env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/teleoperation/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/teleoperation/vr_simple_demo.py
""" Example script for interacting with OmniGibson scenes with VR and BehaviorRobot. """ import omnigibson as og from omnigibson.utils.teleop_utils import OVXRSystem def main(): """ Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR. """ # Create the config for generating the environment we want scene_cfg = {"type": "Scene"} #"InteractiveTraversableScene", "scene_model": "Rs_int"} robot0_cfg = { "type": "Tiago", "controller_config": { "gripper_left": {"command_input_limits": "default"}, "gripper_right": {"command_input_limits": "default"}, } } cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) # Create the environment env = og.Environment(configs=cfg) env.reset() # start vrsys vrsys = OVXRSystem(robot=env.robots[0], show_control_marker=False, system="SteamVR", align_anchor_to_robot_base=True) vrsys.start() # set headset position to be 1m above ground and facing +x vrsys.set_initial_transform(pos=[0, 0, 1], orn=[0, 0, 0, 1]) # main simulation loop for _ in range(10000): # step the VR system to get the latest data from VR runtime vrsys.update() # generate robot action and step the environment action = vrsys.teleop_data_to_action() env.step(action) # Shut down the environment cleanly at the end vrsys.stop() env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/action_primitives/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/action_primitives/rs_int_example.py
import os import yaml import numpy as np import omnigibson as og from omnigibson.macros import gm from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet # Don't use GPU dynamics and use flatcache for performance boost # gm.USE_GPU_DYNAMICS = True # gm.ENABLE_FLATCACHE = True def execute_controller(ctrl_gen, env): for action in ctrl_gen: env.step(action) def main(): """ Demonstrates how to use the action primitives to pick and place an object in a crowded scene. It loads Rs_int with a Fetch robot, and the robot picks and places an apple. """ # Load the config config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) # Update it to run a grocery shopping task config["scene"]["scene_model"] = "Rs_int" config["scene"]["not_load_object_categories"] = ["ceilings"] config["objects"] = [ { "type": "DatasetObject", "name": "apple", "category": "apple", "model": "agveuv", "position": [-0.3, -1.1, 0.5], "orientation": [0, 0, 0, 1] }, ] # Load the environment env = og.Environment(configs=config) scene = env.scene robot = env.robots[0] # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) cabinet = scene.object_registry("name", "bottom_cabinet_slgzfc_0") apple = scene.object_registry("name", "apple") # Grasp apple print("Executing controller") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, apple), env) print("Finished executing grasp") # Place on cabinet print("Executing controller") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, cabinet), env) print("Finished executing place") if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/action_primitives/solve_simple_task.py
import os import yaml import numpy as np import omnigibson as og from omnigibson.macros import gm from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet # Don't use GPU dynamics and use flatcache for performance boost # gm.USE_GPU_DYNAMICS = True # gm.ENABLE_FLATCACHE = True def execute_controller(ctrl_gen, env): for action in ctrl_gen: env.step(action) def main(): """ Demonstrates how to use the action primitives to pick and place an object in an empty scene. It loads Rs_int with a Fetch robot, and the robot picks and places a bottle of cologne. """ # Load the config config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) # Update it to create a custom environment and run some actions config["scene"]["scene_model"] = "Rs_int" config["scene"]["load_object_categories"] = ["floors", "ceilings", "walls", "coffee_table"] config["objects"] = [ { "type": "DatasetObject", "name": "cologne", "category": "bottle_of_cologne", "model": "lyipur", "position": [-0.3, -0.8, 0.5], "orientation": [0, 0, 0, 1] }, { "type": "DatasetObject", "name": "table", "category": "breakfast_table", "model": "rjgmmy", "scale": [0.3, 0.3, 0.3], "position": [-0.7, 0.5, 0.2], "orientation": [0, 0, 0, 1] } ] # Load the environment env = og.Environment(configs=config) scene = env.scene robot = env.robots[0] # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) # Grasp of cologne grasp_obj = scene.object_registry("name", "cologne") print("Executing controller") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, grasp_obj), env) print("Finished executing grasp") # Place cologne on another table print("Executing controller") table = scene.object_registry("name", "table") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, table), env) print("Finished executing place") if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/action_primitives/wip_solve_behavior_task.py
import os import yaml import numpy as np import omnigibson as og from omnigibson.macros import gm from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet # Don't use GPU dynamics and use flatcache for performance boost # gm.USE_GPU_DYNAMICS = True # gm.ENABLE_FLATCACHE = True def execute_controller(ctrl_gen, env): for action in ctrl_gen: env.step(action) def main(): """ Demonstrates how to use the action primitives to solve a simple BEHAVIOR-1K task. It loads Benevolence_1_int with a Fetch robot, and the robot attempts to solve the picking_up_trash task using a hardcoded sequence of primitives. """ # Load the config config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) # Update it to run a grocery shopping task config["scene"]["scene_model"] = "Benevolence_1_int" config["scene"]["load_task_relevant_only"] = True config["scene"]["not_load_object_categories"] = ["ceilings"] config["task"] = { "type": "BehaviorTask", "activity_name": "picking_up_trash", "activity_definition_id": 0, "activity_instance_id": 0, "predefined_problem": None, "online_object_sampling": False, } # Load the environment env = og.Environment(configs=config) scene = env.scene robot = env.robots[0] # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) # Grasp can of soda grasp_obj = scene.object_registry("name", "can_of_soda_89") print("Executing controller") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, grasp_obj), env) print("Finished executing grasp") # Place can in trash can print("Executing controller") trash = scene.object_registry("name", "trash_can_85") execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_INSIDE, trash), env) print("Finished executing place") if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/environments/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/environments/navigation_env_demo.py
import os import yaml import omnigibson as og from omnigibson.utils.ui_utils import choose_from_options def main(random_selection=False, headless=False, short_exec=False): """ Prompts the user to select a type of scene and loads a turtlebot into it, generating a Point-Goal navigation task within the environment. It steps the environment 100 times with random actions sampled from the action space, using the Gym interface, resetting it 10 times. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Load the config config_filename = os.path.join(og.example_config_path, f"turtlebot_nav.yaml") config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) # check if we want to quick load or full load the scene load_options = { "Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)", "Full": "Load all interactive objects in the scene", } load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection) if load_mode == "Quick": config["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"] # Load the environment env = og.Environment(configs=config) # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() # Run a simple loop and reset periodically max_iterations = 10 if not short_exec else 1 for j in range(max_iterations): og.log.info("Resetting environment") env.reset() for i in range(100): action = env.action_space.sample() state, reward, done, info = env.step(action) if done: og.log.info("Episode finished after {} timesteps".format(i + 1)) break # Always close the environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/environments/behavior_env_demo.py
import os import yaml import omnigibson as og from omnigibson.macros import gm from omnigibson.utils.ui_utils import choose_from_options # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True def main(random_selection=False, headless=False, short_exec=False): """ Generates a BEHAVIOR Task environment in an online fashion. It steps the environment 100 times with random actions sampled from the action space, using the Gym interface, resetting it 10 times. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Ask the user whether they want online object sampling or not sampling_options = { False: "Use a pre-sampled cached BEHAVIOR activity scene", True: "Sample the BEHAVIOR activity in an online fashion", } should_sample = choose_from_options(options=sampling_options, name="online object sampling", random_selection=random_selection) # Load the pre-selected configuration and set the online_sampling flag config_filename = os.path.join(og.example_config_path, "fetch_behavior.yaml") cfg = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader) cfg["task"]["online_object_sampling"] = should_sample # Load the environment env = og.Environment(configs=cfg) # Allow user to move camera more easily og.sim.enable_viewer_camera_teleoperation() # Run a simple loop and reset periodically max_iterations = 10 if not short_exec else 1 for j in range(max_iterations): og.log.info("Resetting environment") env.reset() for i in range(100): action = env.action_space.sample() state, reward, done, info = env.step(action) if done: og.log.info("Episode finished after {} timesteps".format(i + 1)) break # Always close the environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/attachment_demo.py
import yaml import numpy as np import omnigibson as og from omnigibson.macros import gm # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of attachment of different parts of a shelf """ cfg = yaml.load(open(f"{og.example_config_path}/default_cfg.yaml", "r"), Loader=yaml.FullLoader) # Add objects that we want to create obj_cfgs = [] obj_cfgs.append(dict( type="LightObject", name="light", light_type="Sphere", radius=0.01, intensity=5000, position=[0, 0, 1.0], )) base_z = 0.2 delta_z = 0.01 idx = 0 obj_cfgs.append(dict( type="DatasetObject", name="shelf_back_panel", category="shelf_back_panel", model="gjsnrt", position=[0, 0, 0.01], fixed_base=True, abilities={"attachable": {}}, )) idx += 1 obj_cfgs.append(dict( type="DatasetObject", name=f"shelf_side_left", category="shelf_side", model="bxfkjj", position=[-0.4, 0, base_z + delta_z * idx], abilities={"attachable": {}}, )) idx += 1 obj_cfgs.append(dict( type="DatasetObject", name=f"shelf_side_right", category="shelf_side", model="yujrmw", position=[0.4, 0, base_z + delta_z * idx], abilities={"attachable": {}}, )) idx += 1 ys = [-0.93, -0.61, -0.29, 0.03, 0.35, 0.68] for i in range(6): obj_cfgs.append(dict( type="DatasetObject", name=f"shelf_shelf_{i}", category="shelf_shelf", model="ymtnqa", position=[0, ys[i], base_z + delta_z * idx], abilities={"attachable": {}}, )) idx += 1 obj_cfgs.append(dict( type="DatasetObject", name="shelf_top_0", category="shelf_top", model="pfiole", position=[0, 1.0, base_z + delta_z * idx], abilities={"attachable": {}}, )) idx += 1 obj_cfgs.append(dict( type="DatasetObject", name=f"shelf_baseboard", category="shelf_baseboard", model="hlhneo", position=[0, -0.97884506, base_z + delta_z * idx], abilities={"attachable": {}}, )) idx += 1 cfg["objects"] = obj_cfgs env = og.Environment(configs=cfg) # Set viewer camera pose og.sim.viewer_camera.set_position_orientation( position=np.array([-1.689292, -2.11718198, 0.93332228]), orientation=np.array([0.57687967, -0.22995655, -0.29022759, 0.72807814]), ) for _ in range(10): env.step([]) shelf_baseboard = og.sim.scene.object_registry("name", "shelf_baseboard") shelf_baseboard.set_position_orientation([0, -0.979, 0.26], [0, 0, 0, 1]) shelf_baseboard.keep_still() shelf_baseboard.set_linear_velocity(np.array([-0.2, 0, 0])) shelf_side_left = og.sim.scene.object_registry("name", "shelf_side_left") shelf_side_left.set_position_orientation([-0.4, 0.0, 0.2], [0, 0, 0, 1]) shelf_side_left.keep_still() input("\n\nShelf parts fall to their correct poses and get automatically attached to the back panel.\n" "You can try to drag (Shift + Left-CLICK + Drag) parts of the shelf to break it apart (you may need to zoom out and drag with a larger force).\n" "Press [ENTER] to continue.\n") for _ in range(5000): og.sim.step() og.shutdown() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/heat_source_or_sink_demo.py
import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True def main(): # Create the scene config to load -- empty scene with a stove object added cfg = { "scene": { "type": "Scene", }, "objects": [ { "type": "DatasetObject", "name": "stove", "category": "stove", "model": "qbjiva", "bounding_box": [1.611, 0.769, 1.147], "abilities": { "heatSource": {"requires_toggled_on": True}, "toggleable": {}, }, "position": [0, 0, 0.61], } ], } # Create the environment env = og.Environment(configs=cfg) # Get reference to stove object stove = env.scene.object_registry("name", "stove") # Set camera to appropriate viewing pose og.sim.viewer_camera.set_position_orientation( position=np.array([-0.0792399, -1.30104, 1.51981]), orientation=np.array([0.54897692, 0.00110359, 0.00168013, 0.83583509]), ) # Make sure necessary object states are included with the stove assert object_states.HeatSourceOrSink in stove.states assert object_states.ToggledOn in stove.states # Take a few steps so that visibility propagates for _ in range(5): env.step(np.array([])) # Heat source is off. print("Heat source is OFF.") heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value() assert not heat_source_state # Toggle on stove, notify user input("Heat source will now turn ON: Press ENTER to continue.") stove.states[object_states.ToggledOn].set_value(True) assert stove.states[object_states.ToggledOn].get_value() # Need to take a step to update the state. env.step(np.array([])) # Heat source is on heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value() assert heat_source_state for _ in range(500): env.step(np.array([])) # Toggle off stove, notify user input("Heat source will now turn OFF: Press ENTER to continue.") stove.states[object_states.ToggledOn].set_value(False) assert not stove.states[object_states.ToggledOn].get_value() for _ in range(200): env.step(np.array([])) # Move stove, notify user input("Heat source is now moving: Press ENTER to continue.") stove.set_position(np.array([0, 1.0, 0.61])) for i in range(100): env.step(np.array([])) # Toggle on stove again, notify user input("Heat source will now turn ON: Press ENTER to continue.") stove.states[object_states.ToggledOn].set_value(True) assert stove.states[object_states.ToggledOn].get_value() for i in range(500): env.step(np.array([])) # Shutdown environment at end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/onfire_demo.py
import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of on fire state. Loads a stove (toggled on), and two apples. The first apple will be ignited by the stove first, then the second apple will be ignited by the first apple. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Define specific objects we want to load in with the scene directly obj_configs = [] # Light obj_configs.append(dict( type="LightObject", light_type="Sphere", name="light", radius=0.01, intensity=1e8, position=[-2.0, -2.0, 1.0], )) # Stove obj_configs.append(dict( type="DatasetObject", name="stove", category="stove", model="yhjzwg", bounding_box=[1.185, 0.978, 1.387], position=[0, 0, 0.69], )) # 2 Apples for i in range(2): obj_configs.append(dict( type="DatasetObject", name=f"apple{i}", category="apple", model="agveuv", bounding_box=[0.065, 0.065, 0.077], position=[0, i * 0.07, 2.0], abilities={"flammable": {"ignition_temperature": 100, "distance_threshold": 0.5}}, )) # Create the scene config to load -- empty scene with desired objects cfg = { "scene": { "type": "Scene", }, "objects": obj_configs, } # Create the environment env = og.Environment(configs=cfg) # Get reference to relevant objects stove = env.scene.object_registry("name", "stove") apples = list(env.scene.object_registry("category", "apple")) # Set camera to appropriate viewing pose og.sim.viewer_camera.set_position_orientation( position=np.array([-0.42246569, -0.34745704, 1.56810353]), orientation=np.array([0.50083786, -0.10407796, -0.17482619, 0.84128772]), ) # Let objects settle for _ in range(10): env.step(np.array([])) # Turn on the stove stove.states[object_states.ToggledOn].set_value(True) # The first apple will be affected by the stove apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.11, 0, 0.1])) # The second apple will NOT be affected by the stove, but will be affected by the first apple once it's on fire. apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.32, 0, 0.1])) steps = 0 max_steps = -1 if not short_exec else 1000 # Main recording loop while steps != max_steps: env.step(np.array([])) temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples] print(f"{'Apple temperature:':<20}", *temps, end="\r") steps += 1 # Always close env at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/temperature_demo.py
import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of temperature change Loads a stove, a microwave and an oven, all toggled on, and five frozen apples The user can move the apples to see them change from frozen, to normal temperature, to cooked and burnt This demo also shows how to load objects ToggledOn and how to set the initial temperature of an object """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Define specific objects we want to load in with the scene directly obj_configs = [] # Light obj_configs.append(dict( type="LightObject", light_type="Sphere", name="light", radius=0.01, intensity=1e8, position=[-2.0, -2.0, 1.0], )) # Stove obj_configs.append(dict( type="DatasetObject", name="stove", category="stove", model="yhjzwg", bounding_box=[1.185, 0.978, 1.387], position=[0, 0, 0.69], )) # Microwave obj_configs.append(dict( type="DatasetObject", name="microwave", category="microwave", model="hjjxmi", bounding_box=[0.384, 0.256, 0.196], position=[2.5, 0, 0.10], )) # Oven obj_configs.append(dict( type="DatasetObject", name="oven", category="oven", model="wuinhm", bounding_box=[1.075, 0.926, 1.552], position=[-1.25, 0, 0.88], )) # Tray obj_configs.append(dict( type="DatasetObject", name="tray", category="tray", model="xzcnjq", bounding_box=[0.319, 0.478, 0.046], position=[-0.25, -0.12, 1.26], )) # Fridge obj_configs.append(dict( type="DatasetObject", name="fridge", category="fridge", model="hivvdf", bounding_box=[1.065, 1.149, 1.528], abilities={ "coldSource": { "temperature": -100.0, "requires_inside": True, } }, position=[1.25, 0, 0.81], )) # 5 Apples for i in range(5): obj_configs.append(dict( type="DatasetObject", name=f"apple{i}", category="apple", model="agveuv", bounding_box=[0.065, 0.065, 0.077], position=[0, i * 0.1, 5.0], )) # Create the scene config to load -- empty scene with desired objects cfg = { "scene": { "type": "Scene", }, "objects": obj_configs, } # Create the environment env = og.Environment(configs=cfg) # Get reference to relevant objects stove = env.scene.object_registry("name", "stove") microwave = env.scene.object_registry("name", "microwave") oven = env.scene.object_registry("name", "oven") tray = env.scene.object_registry("name", "tray") fridge = env.scene.object_registry("name", "fridge") apples = list(env.scene.object_registry("category", "apple")) # Set camera to appropriate viewing pose og.sim.viewer_camera.set_position_orientation( position=np.array([ 0.46938863, -3.97887141, 1.64106008]), orientation=np.array([0.63311689, 0.00127259, 0.00155577, 0.77405359]), ) # Let objects settle for _ in range(25): env.step(np.array([])) # Turn on all scene objects stove.states[object_states.ToggledOn].set_value(True) microwave.states[object_states.ToggledOn].set_value(True) oven.states[object_states.ToggledOn].set_value(True) # Set initial temperature of the apples to -50 degrees Celsius, and move the apples to different objects for apple in apples: apple.states[object_states.Temperature].set_value(-50) apples[0].states[object_states.Inside].set_value(oven, True) apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0, 0, 0.1])) apples[2].states[object_states.OnTop].set_value(tray, True) apples[3].states[object_states.Inside].set_value(fridge, True) apples[4].states[object_states.Inside].set_value(microwave, True) steps = 0 max_steps = -1 if not short_exec else 1000 # Main recording loop locations = [f'{loc:>20}' for loc in ["Inside oven", "On stove", "On tray", "Inside fridge", "Inside microwave"]] print() print(f"{'Apple location:':<20}", *locations) while steps != max_steps: env.step(np.array([])) temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples] print(f"{'Apple temperature:':<20}", *temps, end="\r") steps += 1 # Always close env at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/sample_kinematics_demo.py
import os import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm from omnigibson.objects import DatasetObject # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True def main(random_selection=False, headless=False, short_exec=False): """ Demo to use the raycasting-based sampler to load objects onTop and/or inside another Loads a cabinet, a microwave open on top of it, and two plates with apples on top, one inside and one on top of the cabinet Then loads a shelf and cracker boxes inside of it """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene cfg = { "scene": { "type": "Scene", }, } # Define objects we want to sample at runtime microwave_cfg = dict( type="DatasetObject", name="microwave", category="microwave", model="hjjxmi", bounding_box=[0.768, 0.512, 0.392], ) cabinet_cfg = dict( type="DatasetObject", name="cabinet", category="bottom_cabinet", model="bamfsz", bounding_box=[1.075, 1.131, 1.355], ) plate_cfgs = [dict( type="DatasetObject", name=f"plate{i}", category="plate", model="iawoof", bounding_box=np.array([0.20, 0.20, 0.05]), ) for i in range(2)] apple_cfgs = [dict( type="DatasetObject", name=f"apple{i}", category="apple", model="agveuv", bounding_box=[0.065, 0.065, 0.077], ) for i in range(4)] shelf_cfg = dict( type="DatasetObject", name=f"shelf", category="shelf", model="pkgbcp", bounding_box=np.array([1.0, 0.4, 2.0]), ) box_cfgs = [dict( type="DatasetObject", name=f"box{i}", category="box_of_crackers", model="cmdigf", bounding_box=np.array([0.2, 0.05, 0.3]), ) for i in range(5)] # Compose objects cfg objects_cfg = [ microwave_cfg, cabinet_cfg, *plate_cfgs, *apple_cfgs, shelf_cfg, *box_cfgs, ] # Update their spawn positions so they don't collide immediately for i, obj_cfg in enumerate(objects_cfg): obj_cfg["position"] = [100 + i, 100 + i, 100 + i] cfg["objects"] = objects_cfg # Create the environment env = og.Environment(configs=cfg) env.step([]) # Sample microwave and boxes sample_boxes_on_shelf(env) sample_microwave_plates_apples(env) max_steps = 100 if short_exec else -1 step = 0 while step != max_steps: env.step(np.array([])) step += 1 # Always close environment at the end env.close() def sample_microwave_plates_apples(env): microwave = env.scene.object_registry("name", "microwave") cabinet = env.scene.object_registry("name", "cabinet") plates = list(env.scene.object_registry("category", "plate")) apples = list(env.scene.object_registry("category", "apple")) # Place the cabinet at a pre-determined location on the floor og.log.info("Placing cabinet on the floor...") cabinet.set_orientation([0, 0, 0, 1.0]) env.step(np.array([])) offset = cabinet.get_position()[2] - cabinet.aabb_center[2] cabinet.set_position(np.array([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset) env.step(np.array([])) # Set microwave on top of the cabinet, open it, and step 100 times og.log.info("Placing microwave OnTop of the cabinet...") assert microwave.states[object_states.OnTop].set_value(cabinet, True) assert microwave.states[object_states.Open].set_value(True) og.log.info("Microwave placed.") for _ in range(50): env.step(np.array([])) og.log.info("Placing plates") n_apples_per_plate = int(len(apples) / len(plates)) for i, plate in enumerate(plates): # Put the 1st plate in the microwave if i == 0: og.log.info(f"Placing plate {i} Inside the microwave...") assert plate.states[object_states.Inside].set_value(microwave, True) else: og.log.info(f"Placing plate {i} OnTop the microwave...") assert plate.states[object_states.OnTop].set_value(microwave, True) og.log.info(f"Plate {i} placed.") for _ in range(50): env.step(np.array([])) og.log.info(f"Placing {n_apples_per_plate} apples OnTop of the plate...") for j in range(n_apples_per_plate): apple_idx = i * n_apples_per_plate + j apple = apples[apple_idx] assert apple.states[object_states.OnTop].set_value(plate, True) og.log.info(f"Apple {apple_idx} placed.") for _ in range(50): env.step(np.array([])) def sample_boxes_on_shelf(env): shelf = env.scene.object_registry("name", "shelf") boxes = list(env.scene.object_registry("category", "box_of_crackers")) # Place the shelf at a pre-determined location on the floor og.log.info("Placing shelf on the floor...") shelf.set_orientation([0, 0, 0, 1.0]) env.step(np.array([])) offset = shelf.get_position()[2] - shelf.aabb_center[2] shelf.set_position(np.array([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset) env.step(np.array([])) # One step is needed for the object to be fully initialized og.log.info("Shelf placed.") for _ in range(50): env.step(np.array([])) og.log.info("Placing boxes...") for i, box in enumerate(boxes): box.states[object_states.Inside].set_value(shelf, True) og.log.info(f"Box {i} placed.") for _ in range(50): env.step(np.array([])) if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/object_state_texture_demo.py
import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm, macros from omnigibson.systems import get_system from omnigibson.utils.constants import ParticleModifyMethod # Make sure object states are enabled, we're using GPU dynamics, and HQ rendering is enabled gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_HQ_RENDERING = True def main(): # Create the scene config to load -- empty scene plus a cabinet cfg = { "scene": { "type": "Scene", "floor_plane_visible": True, }, "objects": [ { "type": "DatasetObject", "name": "cabinet", "category": "bottom_cabinet", "model": "zuwvdo", "bounding_box": [1.595, 0.537, 1.14], "abilities": { "freezable": {}, "cookable": {}, "burnable": {}, "saturable": {}, "particleRemover": { "method": ParticleModifyMethod.ADJACENCY, "conditions": { # For a specific particle system, this specifies what conditions are required in order for the # particle applier / remover to apply / remover particles associated with that system # The list should contain functions with signature condition() --> bool, # where True means the condition is satisfied # In this case, we only allow our cabinet to absorb water, with no conditions needed. # This is needed for the Saturated ("saturable") state so that we can modify the texture # according to the water. # NOTE: This will only change color if gm.ENABLE_HQ_RENDERING and gm.USE_GPU_DYNAMICS is # enabled! "water": [], }, }, }, "position": [0, 0, 0.59], }, ], } # Create the environment env = og.Environment(configs=cfg) # Set camera to appropriate viewing pose og.sim.viewer_camera.set_position_orientation( position=np.array([ 1.7789 , -1.68822, 1.13551]), orientation=np.array([0.57065614, 0.20331904, 0.267029 , 0.74947212]), ) # Grab reference to object of interest obj = env.scene.object_registry("name", "cabinet") # Make sure all the appropriate states are in the object assert object_states.Frozen in obj.states assert object_states.Cooked in obj.states assert object_states.Burnt in obj.states assert object_states.Saturated in obj.states def report_states(): # Make sure states are propagated before printing for i in range(5): env.step(np.array([])) print("=" * 20) print("temperature:", obj.states[object_states.Temperature].get_value()) print("obj is frozen:", obj.states[object_states.Frozen].get_value()) print("obj is cooked:", obj.states[object_states.Cooked].get_value()) print("obj is burnt:", obj.states[object_states.Burnt].get_value()) print("obj is soaked:", obj.states[object_states.Saturated].get_value(get_system("water"))) print("obj textures:", obj.get_textures()) # Report default states print("==== Initial state ====") report_states() # Notify user that we're about to freeze the object, and then freeze the object input("\nObject will be frozen. Press ENTER to continue.") obj.states[object_states.Temperature].set_value(-50) report_states() # Notify user that we're about to cook the object, and then cook the object input("\nObject will be cooked. Press ENTER to continue.") obj.states[object_states.Temperature].set_value(100) report_states() # Notify user that we're about to burn the object, and then burn the object input("\nObject will be burned. Press ENTER to continue.") obj.states[object_states.Temperature].set_value(250) report_states() # Notify user that we're about to reset the object to its default state, and then reset state input("\nObject will be reset to default state. Press ENTER to continue.") obj.states[object_states.Temperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE) obj.states[object_states.MaxTemperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE) report_states() # Notify user that we're about to soak the object, and then soak the object input("\nObject will be saturated with water. Press ENTER to continue.") obj.states[object_states.Saturated].set_value(get_system("water"), True) report_states() # Notify user that we're about to unsoak the object, and then unsoak the object input("\nObject will be unsaturated with water. Press ENTER to continue.") obj.states[object_states.Saturated].set_value(get_system("water"), False) report_states() # Close environment at the end input("Demo completed. Press ENTER to shutdown environment.") env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/particle_applier_remover_demo.py
import numpy as np import omnigibson as og from omnigibson.object_states import Covered from omnigibson.objects import DatasetObject from omnigibson.macros import gm, macros from omnigibson.systems import get_system from omnigibson.utils.usd_utils import create_joint from omnigibson.utils.ui_utils import choose_from_options from omnigibson.utils.constants import ParticleModifyMethod # Set macros for this example macros.object_states.particle_modifier.VISUAL_PARTICLES_REMOVAL_LIMIT = 1000 macros.object_states.particle_modifier.PHYSICAL_PARTICLES_REMOVAL_LIMIT = 8000 macros.object_states.particle_modifier.MAX_VISUAL_PARTICLES_APPLIED_PER_STEP = 4 macros.object_states.particle_modifier.MAX_PHYSICAL_PARTICLES_APPLIED_PER_STEP = 40 macros.object_states.covered.MAX_VISUAL_PARTICLES = 300 # Make sure object states and GPU dynamics are enabled (GPU dynamics needed for fluids) gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_HQ_RENDERING = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of ParticleApplier and ParticleRemover object states, which enable objects to either apply arbitrary particles and remove arbitrary particles from the simulator, respectively. Loads an empty scene with a table, and starts clean to allow particles to be applied or pre-covers the table with particles to be removed. The ParticleApplier / ParticleRemover state is applied to an imported cloth object and allowed to interact with the table, applying / removing particles from the table. NOTE: The key difference between ParticleApplier/Removers and ParticleSource/Sinks is that Applier/Removers requires contact (if using ParticleProjectionMethod.ADJACENCY) or overlap (if using ParticleProjectionMethod.PROJECTION) in order to spawn / remove particles, and generally only spawn particles at the contact points. ParticleSource/Sinks are special cases of ParticleApplier/Removers that always use ParticleProjectionMethod.PROJECTION and always spawn / remove particles within their projection volume, irregardless of overlap with other objects! """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Choose what configuration to load modifier_type = choose_from_options( options={ "particleApplier": "Demo object's ability to apply particles in the simulator", "particleRemover": "Demo object's ability to remove particles from the simulator", }, name="particle modifier type", random_selection=random_selection, ) modification_metalink = { "particleApplier": "particleapplier_link", "particleRemover": "particleremover_link", } particle_types = ["stain", "water"] particle_type = choose_from_options( options={name: f"{name} particles will be applied or removed from the simulator" for name in particle_types}, name="particle type", random_selection=random_selection, ) modification_method = { "Adjacency": ParticleModifyMethod.ADJACENCY, "Projection": ParticleModifyMethod.PROJECTION, } projection_mesh_params = { "Adjacency": None, "Projection": { # Either Cone or Cylinder; shape of the projection where particles can be applied / removed "type": "Cone", # Size of the cone "extents": np.array([0.1875, 0.1875, 0.375]), }, } method_type = choose_from_options( options={ "Adjacency": "Close proximity to the object will be used to determine whether particles can be applied / removed", "Projection": "A Cone or Cylinder shape protruding from the object will be used to determine whether particles can be applied / removed", }, name="modifier method type", random_selection=random_selection, ) # Create the ability kwargs to pass to the object state abilities = { modifier_type: { "method": modification_method[method_type], "conditions": { # For a specific particle system, this specifies what conditions are required in order for the # particle applier / remover to apply / remover particles associated with that system # The list should contain functions with signature condition() --> bool, # where True means the condition is satisified particle_type: [], }, "projection_mesh_params": projection_mesh_params[method_type], } } table_cfg = dict( type="DatasetObject", name="table", category="breakfast_table", model="kwmfdg", bounding_box=[3.402, 1.745, 1.175], position=[0, 0, 0.98], ) # Create the scene config to load -- empty scene with a light and table cfg = { "scene": { "type": "Scene", }, "objects": [table_cfg], } # Sanity check inputs: Remover + Adjacency + Fluid will not work because we are using a visual_only # object, so contacts will not be triggered with this object # Load the environment, then immediately stop the simulator since we need to add in the modifier object env = og.Environment(configs=cfg) og.sim.stop() # Grab references to table table = env.scene.object_registry("name", "table") # Set the viewer camera appropriately og.sim.viewer_camera.set_position_orientation( position=np.array([-1.61340969, -1.79803028, 2.53167412]), orientation=np.array([ 0.46291845, -0.12381886, -0.22679218, 0.84790371]), ) # If we're using a projection volume, we manually add in the required metalink required in order to use the volume modifier = DatasetObject( name="modifier", category="dishtowel", model="dtfspn", bounding_box=[0.34245, 0.46798, 0.07], visual_only=method_type == "Projection", # Non-fluid adjacency requires the object to have collision geoms active abilities=abilities, ) modifier_root_link_path = f"{modifier.prim_path}/base_link" modifier._prim = modifier._load() if method_type == "Projection": metalink_path = f"{modifier.prim_path}/{modification_metalink[modifier_type]}" og.sim.stage.DefinePrim(metalink_path, "Xform") create_joint( prim_path=f"{modifier_root_link_path}/{modification_metalink[modifier_type]}_joint", body0=modifier_root_link_path, body1=metalink_path, joint_type="FixedJoint", enabled=True, ) modifier._post_load() modifier._loaded = True og.sim.import_object(modifier) modifier.set_position(np.array([0, 0, 5.0])) # Play the simulator and take some environment steps to let the objects settle og.sim.play() for _ in range(25): env.step(np.array([])) # If we're removing particles, set the table's covered state to be True if modifier_type == "particleRemover": table.states[Covered].set_value(get_system(particle_type), True) # Take a few steps to let particles settle for _ in range(25): env.step(np.array([])) # Enable camera teleoperation for convenience og.sim.enable_viewer_camera_teleoperation() # Set the modifier object to be in position to modify particles if method_type == "Projection": # Higher z to showcase projection volume at work z = 1.85 elif particle_type == "stain": # Lower z needed to allow for adjacency bounding box to overlap properly z = 1.175 else: # Higher z needed for actual physical interaction to accommodate non-negligible particle radius z = 1.22 modifier.keep_still() modifier.set_position_orientation( position=np.array([0, 0.3, z]), orientation=np.array([0, 0, 0, 1.0]), ) # Move object in square around table deltas = [ [130, np.array([-0.01, 0, 0])], [60, np.array([0, -0.01, 0])], [130, np.array([0.01, 0, 0])], [60, np.array([0, 0.01, 0])], ] for t, delta in deltas: for i in range(t): modifier.set_position(modifier.get_position() + delta) env.step(np.array([])) # Always shut down environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/object_states/folded_unfolded_state_demo.py
from omnigibson.utils.constants import PrimType from omnigibson.object_states import Folded, Unfolded from omnigibson.macros import gm import numpy as np import omnigibson as og # Make sure object states and GPU dynamics are enabled (GPU dynamics needed for cloth) gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of cloth objects that can potentially be folded. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene + custom cloth object cfg = { "scene": { "type": "Scene", }, "objects": [ { "type": "DatasetObject", "name": "carpet", "category": "carpet", "model": "ctclvd", "bounding_box": [0.897, 0.568, 0.012], "prim_type": PrimType.CLOTH, "abilities": {"cloth": {}}, "position": [0, 0, 0.5], }, { "type": "DatasetObject", "name": "dishtowel", "category": "dishtowel", "model": "dtfspn", "bounding_box": [0.852, 1.1165, 0.174], "prim_type": PrimType.CLOTH, "abilities": {"cloth": {}}, "position": [1, 1, 0.5], }, { "type": "DatasetObject", "name": "shirt", "category": "t_shirt", "model": "kvidcx", "bounding_box": [0.472, 1.243, 1.158], "prim_type": PrimType.CLOTH, "abilities": {"cloth": {}}, "position": [-1, 1, 0.5], "orientation": [0.7071, 0., 0.7071, 0.], }, ], } # Create the environment env = og.Environment(configs=cfg) # Grab object references carpet = env.scene.object_registry("name", "carpet") dishtowel = env.scene.object_registry("name", "dishtowel") shirt = env.scene.object_registry("name", "shirt") objs = [carpet, dishtowel, shirt] # Set viewer camera og.sim.viewer_camera.set_position_orientation( position=np.array([0.46382895, -2.66703958, 1.22616824]), orientation=np.array([0.58779174, -0.00231237, -0.00318273, 0.80900271]), ) def print_state(): folded = carpet.states[Folded].get_value() unfolded = carpet.states[Unfolded].get_value() info = "carpet: [folded] %d [unfolded] %d" % (folded, unfolded) folded = dishtowel.states[Folded].get_value() unfolded = dishtowel.states[Unfolded].get_value() info += " || dishtowel: [folded] %d [unfolded] %d" % (folded, unfolded) folded = shirt.states[Folded].get_value() unfolded = shirt.states[Unfolded].get_value() info += " || tshirt: [folded] %d [unfolded] %d" % (folded, unfolded) print(f"{info}{' ' * (110 - len(info))}", end="\r") for _ in range(100): og.sim.step() print("\nCloth state:\n") if not short_exec: # Fold all three cloths along the x-axis for i in range(3): obj = objs[i] pos = obj.root_link.compute_particle_positions() x_min, x_max = np.min(pos, axis=0)[0], np.max(pos, axis=0)[0] x_extent = x_max - x_min # Get indices for the bottom 10 percent vertices in the x-axis indices = np.argsort(pos, axis=0)[:, 0][:(pos.shape[0] // 10)] start = np.copy(pos[indices]) # lift up a bit mid = np.copy(start) mid[:, 2] += x_extent * 0.2 # move towards x_max end = np.copy(mid) end[:, 0] += x_extent * 0.9 increments = 25 for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]): obj.root_link.set_particle_positions(ctrl_pts, idxs=indices) og.sim.step() print_state() # Fold the t-shirt twice again along the y-axis for direction in [-1, 1]: obj = shirt pos = obj.root_link.compute_particle_positions() y_min, y_max = np.min(pos, axis=0)[1], np.max(pos, axis=0)[1] y_extent = y_max - y_min if direction == 1: indices = np.argsort(pos, axis=0)[:, 1][:(pos.shape[0] // 20)] else: indices = np.argsort(pos, axis=0)[:, 1][-(pos.shape[0] // 20):] start = np.copy(pos[indices]) # lift up a bit mid = np.copy(start) mid[:, 2] += y_extent * 0.2 # move towards y_max end = np.copy(mid) end[:, 1] += direction * y_extent * 0.4 increments = 25 for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]): obj.root_link.set_particle_positions(ctrl_pts, idxs=indices) env.step(np.array([])) print_state() while True: env.step(np.array([])) print_state() # Shut down env at the end print() env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/overlaid_demo.py
import numpy as np import omnigibson as og from omnigibson.macros import gm from omnigibson.utils.constants import PrimType from omnigibson.object_states import Overlaid # Make sure object states and GPU dynamics are enabled (GPU dynamics needed for cloth) gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of cloth objects that can be overlaid on rigid objects. Loads a carpet on top of a table. Initially Overlaid will be True because the carpet largely covers the table. If you drag the carpet off the table or even just fold it into half, Overlaid will become False. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene + custom cloth object + custom rigid object cfg = { "scene": { "type": "Scene", }, "objects": [ { "type": "DatasetObject", "name": "carpet", "category": "carpet", "model": "ctclvd", "bounding_box": [1.346, 0.852, 0.017], "prim_type": PrimType.CLOTH, "abilities": {"cloth": {}}, "position": [0, 0, 1.0], }, { "type": "DatasetObject", "name": "breakfast_table", "category": "breakfast_table", "model": "rjgmmy", "bounding_box": [1.36, 1.081, 0.84], "prim_type": PrimType.RIGID, "position": [0, 0, 0.58], }, ], } # Create the environment env = og.Environment(configs=cfg) # Grab object references carpet = env.scene.object_registry("name", "carpet") breakfast_table = env.scene.object_registry("name", "breakfast_table") # Set camera pose og.sim.viewer_camera.set_position_orientation( position=np.array([ 0.88215526, -1.40086216, 2.00311063]), orientation=np.array([0.42013364, 0.12342107, 0.25339685, 0.86258043]), ) max_steps = 100 if short_exec else -1 steps = 0 print("\nTry dragging cloth around with CTRL + Left-Click to see the Overlaid state change:\n") while steps != max_steps: print(f"Overlaid {carpet.states[Overlaid].get_value(breakfast_table)} ", end="\r") env.step(np.array([])) # Shut down env at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/particle_source_sink_demo.py
import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm from omnigibson.utils.constants import ParticleModifyCondition # Make sure object states are enabled and GPU dynamics are used gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_HQ_RENDERING = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of ParticleSource and ParticleSink object states, which enable objects to either spawn arbitrary particles and remove arbitrary particles from the simulator, respectively. Loads an empty scene with a sink, which is enabled with both the ParticleSource and ParticleSink states. The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles, which is then destroyed ("sunk") by the sink's particle sink located at the drain. NOTE: The key difference between ParticleApplier/Removers and ParticleSource/Sinks is that Applier/Removers requires contact (if using ParticleProjectionMethod.ADJACENCY) or overlap (if using ParticleProjectionMethod.PROJECTION) in order to spawn / remove particles, and generally only spawn particles at the contact points. ParticleSource/Sinks are special cases of ParticleApplier/Removers that always use ParticleProjectionMethod.PROJECTION and always spawn / remove particles within their projection volume, irregardless of overlap with other objects! """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene cfg = { "scene": { "type": "Scene", } } # Define objects to load into the environment sink_cfg = dict( type="DatasetObject", name="sink", category="sink", model="egwapq", bounding_box=[2.427, 0.625, 1.2], abilities={ "toggleable": {}, "particleSource": { "conditions": { "water": [(ParticleModifyCondition.TOGGLEDON, True)], # Must be toggled on for water source to be active }, "initial_speed": 0.0, # Water merely falls out of the spout }, "particleSink": { "conditions": { "water": [], # No conditions, always sinking nearby particles }, }, }, position=[0.0, 0, 0.42], ) cfg["objects"] = [sink_cfg] # Create the environment! env = og.Environment(configs=cfg) # Set camera to ideal angle for viewing objects og.sim.viewer_camera.set_position_orientation( position=np.array([ 0.37860532, -0.65396566, 1.4067066 ]), orientation=np.array([0.49909498, 0.15201752, 0.24857062, 0.81609284]), ) # Take a few steps to let the objects settle, and then turn on the sink for _ in range(10): env.step(np.array([])) # Empty action since no robots are in the scene sink = env.scene.object_registry("name", "sink") assert sink.states[object_states.ToggledOn].set_value(True) # Take a step, and save the state env.step(np.array([])) initial_state = og.sim.dump_state() # Main simulation loop. max_steps = 1000 max_iterations = -1 if not short_exec else 1 iteration = 0 try: while iteration != max_iterations: # Keep stepping until table or bowl are clean, or we reach 1000 steps steps = 0 while steps != max_steps: steps += 1 env.step(np.array([])) og.log.info("Max steps reached; resetting.") # Reset to the initial state og.sim.load_state(initial_state) iteration += 1 finally: # Always shut down environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/slicing_demo.py
import numpy as np import omnigibson as og from omnigibson.macros import gm import omnigibson.utils.transform_utils as T # Make sure object states and transition rules are enabled gm.ENABLE_OBJECT_STATES = True gm.ENABLE_TRANSITION_RULES = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of slicing an apple into two apple slices """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene with table, knife, and apple table_cfg = dict( type="DatasetObject", name="table", category="breakfast_table", model="rjgmmy", bounding_box=[1.36, 1.081, 0.84], position=[0, 0, 0.58], ) apple_cfg = dict( type="DatasetObject", name="apple", category="apple", model="agveuv", bounding_box=[0.098, 0.098, 0.115], position=[0.085, 0, 0.92], ) knife_cfg = dict( type="DatasetObject", name="knife", category="table_knife", model="lrdmpf", bounding_box=[0.401, 0.044, 0.009], position=[0, 0, 20.0], ) light0_cfg = dict( type="LightObject", name="light0", light_type="Sphere", radius=0.01, intensity=4000.0, position=[1.217, -0.848, 1.388], ) light1_cfg = dict( type="LightObject", name="light1", light_type="Sphere", radius=0.01, intensity=4000.0, position=[-1.217, 0.848, 1.388], ) cfg = { "scene": { "type": "Scene", }, "objects": [table_cfg, apple_cfg, knife_cfg, light0_cfg, light1_cfg] } # Create the environment env = og.Environment(configs=cfg) # Grab reference to apple and knife apple = env.scene.object_registry("name", "apple") knife = env.scene.object_registry("name", "knife") # Update the simulator's viewer camera's pose so it points towards the table og.sim.viewer_camera.set_position_orientation( position=np.array([ 0.544888, -0.412084, 1.11569 ]), orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]), ) # Let apple settle for _ in range(50): env.step(np.array([])) knife.keep_still() knife.set_position_orientation( position=apple.get_position() + np.array([-0.15, 0.0, 0.2]), orientation=T.euler2quat([-np.pi / 2, 0, 0]), ) input("The knife will fall on the apple and slice it. Press [ENTER] to continue.") # Step simulation for a bit so that apple is sliced for i in range(1000): env.step(np.array([])) input("Apple has been sliced! Press [ENTER] to terminate the demo.") # Always close environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/dicing_demo.py
import numpy as np import omnigibson as og from omnigibson.macros import gm import omnigibson.utils.transform_utils as T # Make sure object states, GPU dynamics, and transition rules are enabled gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_TRANSITION_RULES = True def main(random_selection=False, headless=False, short_exec=False): """ Demo of dicing an apple into apple dices """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene with table, knife, and apple table_cfg = dict( type="DatasetObject", name="table", category="breakfast_table", model="rjgmmy", bounding_box=[1.36, 1.08, 0.84], position=[0, 0, 0.58], ) apple_cfg = dict( type="DatasetObject", name="apple", category="apple", model="agveuv", bounding_box=[0.098, 0.098, 0.115], position=[0.085, 0, 0.92], abilities={"diceable": {}} ) knife_cfg = dict( type="DatasetObject", name="knife", category="table_knife", model="lrdmpf", bounding_box=[0.401, 0.044, 0.009], position=[0, 0, 20.0], ) light0_cfg = dict( type="LightObject", name="light0", light_type="Sphere", radius=0.01, intensity=1e7, position=[1.217, -0.848, 1.388], ) light1_cfg = dict( type="LightObject", name="light1", light_type="Sphere", radius=0.01, intensity=1e7, position=[-1.217, 0.848, 1.388], ) cfg = { "scene": { "type": "Scene", }, "objects": [table_cfg, apple_cfg, knife_cfg, light0_cfg, light1_cfg] } # Create the environment env = og.Environment(configs=cfg) # Grab reference to apple and knife apple = env.scene.object_registry("name", "apple") knife = env.scene.object_registry("name", "knife") # Update the simulator's viewer camera's pose so it points towards the table og.sim.viewer_camera.set_position_orientation( position=np.array([ 0.544888, -0.412084, 1.11569 ]), orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]), ) # Let apple settle for _ in range(50): env.step(np.array([])) knife.keep_still() knife.set_position_orientation( position=apple.get_position() + np.array([-0.15, 0.0, 0.2]), orientation=T.euler2quat([-np.pi / 2, 0, 0]), ) input("The knife will fall on the apple and dice it. Press [ENTER] to continue.") # Step simulation for a bit so that apple is diced for i in range(1000): env.step(np.array([])) input("Apple has been diced! Press [ENTER] to terminate the demo.") # Always close environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/object_states/heated_state_demo.py
import numpy as np import omnigibson as og from omnigibson import object_states from omnigibson.macros import gm # Make sure object states are enabled gm.ENABLE_OBJECT_STATES = True def main(): # Define object configurations for objects to load -- we want to load a light and three bowls obj_configs = [] obj_configs.append(dict( type="LightObject", light_type="Sphere", name="light", radius=0.01, intensity=1e8, position=[-2.0, -2.0, 1.0], )) for i, (scale, x) in enumerate(zip([0.5, 1.0, 2.0], [-0.6, 0, 0.8])): obj_configs.append(dict( type="DatasetObject", name=f"bowl{i}", category="bowl", model="ajzltc", bounding_box=np.array([0.329, 0.293, 0.168]) * scale, abilities={"heatable": {}}, position=[x, 0, 0.2], )) # Create the scene config to load -- empty scene with light object and bowls cfg = { "scene": { "type": "Scene", }, "objects": obj_configs, } # Create the environment env = og.Environment(configs=cfg) # Set camera to appropriate viewing pose og.sim.viewer_camera.set_position_orientation( position=np.array([ 0.182103, -2.07295 , 0.14017 ]), orientation=np.array([0.77787037, 0.00267566, 0.00216149, 0.62841535]), ) # Dim the skybox so we can see the bowls' steam effectively env.scene.skybox.intensity = 100.0 # Grab reference to objects of relevance objs = list(env.scene.object_registry("category", "bowl")) def report_states(objs): for obj in objs: print("=" * 20) print("object:", obj.name) print("temperature:", obj.states[object_states.Temperature].get_value()) print("obj is heated:", obj.states[object_states.Heated].get_value()) # Report default states print("==== Initial state ====") report_states(objs) # Notify user that we're about to heat the object input("Objects will be heated, and steam will slowly rise. Press ENTER to continue.") # Heated. for obj in objs: obj.states[object_states.Temperature].set_value(50) env.step(np.array([])) report_states(objs) # Take a look at the steam effect. # After a while, objects will be below the Steam temperature threshold. print("==== Objects are now heated... ====") print() for _ in range(2000): env.step(np.array([])) # Also print temperatures temps = [f"{obj.states[object_states.Temperature].get_value():>7.2f}" for obj in objs] print(f"obj temps:", *temps, end="\r") print() # Objects are not heated anymore. print("==== Objects are no longer heated... ====") report_states(objs) # Close environment at the end input("Demo completed. Press ENTER to shutdown environment.") env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/objects/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/objects/draw_bounding_box.py
import matplotlib.pyplot as plt import numpy as np import omnigibson as og def main(random_selection=False, headless=False, short_exec=False): """ Shows how to obtain the bounding box of an articulated object. Draws the bounding box around the loaded object, a cabinet, and writes the visualized image to disk at the current directory named 'bbox_2d_[loose / tight]_img.png'. NOTE: In the GUI, bounding boxes can be natively viewed by clicking on the sensor ((*)) icon at the top, and then selecting the appropriate bounding box modalities, and clicking "Show". See: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/visualization.html#the-visualizer """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Specify objects to load banana_cfg = dict( type="DatasetObject", name="banana", category="banana", model="vvyyyv", bounding_box=[0.643, 0.224, 0.269], position=[-0.906661, -0.545106, 0.136824], orientation=[0, 0, 0.76040583, -0.6494482], ) door_cfg = dict( type="DatasetObject", name="door", category="door", model="ohagsq", bounding_box=[1.528, 0.064, 1.299], position=[-2.0, 0, 0.70000001], orientation=[0, 0, -0.38268343, 0.92387953], ) # Create the scene config to load -- empty scene with a few objects cfg = { "scene": { "type": "Scene", }, "objects": [banana_cfg, door_cfg], } # Create the environment env = og.Environment(configs=cfg) # Set camera to appropriate viewing pose cam = og.sim.viewer_camera cam.set_position_orientation( position=np.array([-4.62785 , -0.418575, 0.933943]), orientation=np.array([ 0.52196595, -0.4231939 , -0.46640436, 0.5752612 ]), ) # Add bounding boxes to camera sensor bbox_modalities = ["bbox_3d", "bbox_2d_loose", "bbox_2d_tight"] for bbox_modality in bbox_modalities: cam.add_modality(bbox_modality) # Take a few steps to let objects settle for i in range(100): env.step(np.array([])) # Grab observations from viewer camera and write them to disk obs, _ = cam.get_obs() for bbox_modality in bbox_modalities: # Print out each of the modalities og.log.info(f"Observation modality {bbox_modality}:\n{obs[bbox_modality]}") # Also write the 2d loose bounding box to disk if "3d" not in bbox_modality: from omnigibson.utils.deprecated_utils import colorize_bboxes colorized_img = colorize_bboxes(bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"], num_channels=4) fpath = f"{bbox_modality}_img.png" plt.imsave(fpath, colorized_img) og.log.info(f"Saving modality [{bbox_modality}] image to: {fpath}") # Always close environment down at end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/objects/highlight_objects.py
import numpy as np import omnigibson as og def main(random_selection=False, headless=False, short_exec=False): """ Highlights visually all object instances of windows and then removes the highlighting It also demonstrates how to apply an action on all instances of objects of a given category """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create the scene config to load -- empty scene cfg = { "scene": { "type": "InteractiveTraversableScene", "scene_model": "Rs_int", } } # Create the environment env = og.Environment(configs=cfg) # Grab all window objects windows = og.sim.scene.object_registry("category", "window") # Step environment while toggling window highlighting i = 0 highlighted = False max_steps = -1 if not short_exec else 1000 while i != max_steps: env.step(np.array([])) if i % 50 == 0: highlighted = not highlighted og.log.info(f"Toggling window highlight to: {highlighted}") for window in windows: # Note that this property is R/W! window.highlighted = highlighted i += 1 # Always close the environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/objects/load_object_selector.py
import numpy as np import omnigibson as og from omnigibson.utils.asset_utils import ( get_all_object_categories, get_og_avg_category_specs, get_all_object_category_models, ) from omnigibson.utils.ui_utils import choose_from_options def main(random_selection=False, headless=False, short_exec=False): """ This demo shows how to load any scaled objects from the OG object model dataset The user selects an object model to load The objects can be loaded into an empty scene or an interactive scene (OG) The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots and executing actions """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) scene_options = ["Scene", "InteractiveTraversableScene"] scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection) # -- Choose the object to load -- # Select a category to load available_obj_categories = get_all_object_categories() obj_category = choose_from_options(options=available_obj_categories, name="object category", random_selection=random_selection) # Select a model to load available_obj_models = get_all_object_category_models(obj_category) obj_model = choose_from_options(options=available_obj_models, name="object model", random_selection=random_selection) # Load the specs of the object categories, e.g., common scaling factor avg_category_spec = get_og_avg_category_specs() # Create and load this object into the simulator obj_cfg = dict( type="DatasetObject", name="obj", category=obj_category, model=obj_model, position=[0, 0, 50.0], ) cfg = { "scene": { "type": scene_type, }, "objects": [obj_cfg], } if scene_type == "InteractiveTraversableScene": cfg["scene"]["scene_model"] = "Rs_int" # Create the environment env = og.Environment(configs=cfg) # Place the object so it rests on the floor obj = env.scene.object_registry("name", "obj") center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, obj.aabb_extent[2] / 2.0]) obj.set_position(center_offset) # Step through the environment max_steps = 100 if short_exec else 10000 for i in range(max_steps): env.step(np.array([])) # Always close the environment at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/objects/visualize_object.py
import argparse import numpy as np import omnigibson as og from omnigibson.utils.asset_utils import ( get_all_object_categories, get_all_object_category_models, ) from omnigibson.utils.ui_utils import choose_from_options import omnigibson.utils.transform_utils as T def main(random_selection=False, headless=False, short_exec=False): """ Visualizes object as specified by its USD path, @usd_path. If None if specified, will instead result in an object selection from OmniGibson's object dataset """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Assuming that if random_selection=True, headless=True, short_exec=True, we are calling it from tests and we # do not want to parse args (it would fail because the calling function is pytest "testfile.py") usd_path = None if not (random_selection and headless and short_exec): parser = argparse.ArgumentParser() parser.add_argument( "--usd_path", default=None, help="USD Model to load", ) args = parser.parse_args() usd_path = args.usd_path # Define objects to load light0_cfg = dict( type="LightObject", light_type="Sphere", name="sphere_light0", radius=0.01, intensity=1e5, position=[-2.0, -2.0, 2.0], ) light1_cfg = dict( type="LightObject", light_type="Sphere", name="sphere_light1", radius=0.01, intensity=1e5, position=[-2.0, 2.0, 2.0], ) # Make sure we have a valid usd path if usd_path is None: # Select a category to load available_obj_categories = get_all_object_categories() obj_category = choose_from_options(options=available_obj_categories, name="object category", random_selection=random_selection) # Select a model to load available_obj_models = get_all_object_category_models(obj_category) obj_model = choose_from_options(options=available_obj_models, name="object model", random_selection=random_selection) kwargs = { "type": "DatasetObject", "category": obj_category, "model": obj_model, } else: kwargs = { "type": "USDObject", "usd_path": usd_path, } # Import the desired object obj_cfg = dict( **kwargs, name="obj", usd_path=usd_path, visual_only=True, position=[0, 0, 10.0], ) # Create the scene config to load -- empty scene cfg = { "scene": { "type": "Scene", }, "objects": [light0_cfg, light1_cfg, obj_cfg], } # Create the environment env = og.Environment(configs=cfg) # Set camera to appropriate viewing pose og.sim.viewer_camera.set_position_orientation( position=np.array([-0.00913503, -1.95750906, 1.36407314]), orientation=np.array([0.6350064 , 0. , 0. , 0.77250687]), ) # Grab the object references obj = env.scene.object_registry("name", "obj") # Standardize the scale of the object so it fits in a [1,1,1] box -- note that we have to stop the simulator # in order to set the scale extents = obj.aabb_extent og.sim.stop() obj.scale = (np.ones(3) / extents).min() og.sim.play() env.step(np.array([])) # Move the object so that its center is at [0, 0, 1] center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, 1.0]) obj.set_position(center_offset) # Allow the user to easily move the camera around og.sim.enable_viewer_camera_teleoperation() # Rotate the object in place steps_per_rotate = 360 steps_per_joint = steps_per_rotate / 10 max_steps = 100 if short_exec else 10000 for i in range(max_steps): z_angle = (2 * np.pi * (i % steps_per_rotate) / steps_per_rotate) quat = T.euler2quat(np.array([0, 0, z_angle])) pos = T.quat2mat(quat) @ center_offset if obj.n_dof > 0: frac = (i % steps_per_joint) / steps_per_joint j_frac = -1.0 + 2.0 * frac if (i // steps_per_joint) % 2 == 0 else 1.0 - 2.0 * frac obj.set_joint_positions(positions=j_frac * np.ones(obj.n_dof), normalized=True, drive=False) obj.keep_still() obj.set_position_orientation(position=pos, orientation=quat) env.step(np.array([])) # Shut down at the end og.shutdown() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/renderer_settings/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/renderer_settings/renderer_settings_example.py
import numpy as np import omnigibson as og from omnigibson.renderer_settings.renderer_settings import RendererSettings def main(random_selection=False, headless=False, short_exec=False): """ Shows how to use RendererSettings class """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Specify objects to load banana_cfg = dict( type="DatasetObject", name="banana", category="banana", model="vvyyyv", scale=[3.0, 5.0, 2.0], position=[-0.906661, -0.545106, 0.136824], orientation=[0, 0, 0.76040583, -0.6494482 ], ) door_cfg = dict( type="DatasetObject", name="door", category="door", model="ohagsq", position=[-2.0, 0, 0.70000001], orientation=[0, 0, -0.38268343, 0.92387953], ) # Create the scene config to load -- empty scene with a few objects cfg = { "scene": { "type": "Scene", }, "objects": [banana_cfg, door_cfg], } # Create the environment env = og.Environment(configs=cfg) # Set camera to appropriate viewing pose cam = og.sim.viewer_camera cam.set_position_orientation( position=np.array([-4.62785 , -0.418575, 0.933943]), orientation=np.array([ 0.52196595, -0.4231939 , -0.46640436, 0.5752612 ]), ) def steps(n): for _ in range(n): env.step(np.array([])) # Take a few steps to let objects settle steps(25) # Create renderer settings object. renderer_setting = RendererSettings() # RendererSettings is a singleton. renderer_setting2 = RendererSettings() assert renderer_setting == renderer_setting2 # Set current renderer. input("Setting renderer to Real-Time. Press [ENTER] to continue.") renderer_setting.set_current_renderer("Real-Time") assert renderer_setting.get_current_renderer() == "Real-Time" steps(5) input("Setting renderer to Interactive (Path Tracing). Press [ENTER] to continue.") renderer_setting.set_current_renderer("Interactive (Path Tracing)") assert renderer_setting.get_current_renderer() == "Interactive (Path Tracing)" steps(5) # Get all available settings. print(renderer_setting.settings.keys()) input("Showcasing how to use RendererSetting APIs. Please see example script for more information. " "Press [ENTER] to continue.") # Set setting (2 lines below are equivalent). renderer_setting.set_setting(path="/app/renderer/skipMaterialLoading", value=True) renderer_setting.common_settings.materials_settings.skip_material_loading.set(True) # Get setting (3 lines below are equivalent). assert renderer_setting.get_setting_from_path(path="/app/renderer/skipMaterialLoading") == True assert renderer_setting.common_settings.materials_settings.skip_material_loading.value == True assert renderer_setting.common_settings.materials_settings.skip_material_loading.get() == True # Reset setting (2 lines below are equivalent). renderer_setting.reset_setting(path="/app/renderer/skipMaterialLoading") renderer_setting.common_settings.materials_settings.skip_material_loading.reset() assert renderer_setting.get_setting_from_path(path="/app/renderer/skipMaterialLoading") == False # Set setting to an unallowed value using top-level method. # Examples below will use the "top-level" setting method. try: renderer_setting.set_setting(path="/app/renderer/skipMaterialLoading", value="foo") except AssertionError as e: print(e) # All good. We got an AssertionError. # Set setting to a value out-of-range. try: renderer_setting.set_setting(path="/rtx/fog/fogColorIntensity", value=0.0) except AssertionError as e: print(e) # All good. We got an AssertionError. # Set unallowed setting. try: renderer_setting.set_setting(path="foo", value="bar") except NotImplementedError as e: print(e) # All good. We got a NotImplementedError. # Set setting but the setting group is not enabled. # Setting is successful but there will be a warning message printed. renderer_setting.set_setting(path="/rtx/fog/fogColorIntensity", value=1.0) # Shutdown sim input("Completed demo. Press [ENTER] to shutdown simulation.") og.shutdown() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/robots/grasping_mode_example.py
""" Example script demo'ing robot manipulation control with grasping. """ import numpy as np import omnigibson as og from omnigibson.macros import gm from omnigibson.sensors import VisionSensor from omnigibson.utils.ui_utils import choose_from_options, KeyboardRobotController GRASPING_MODES = dict( sticky="Sticky Mitten - Objects are magnetized when they touch the fingers and a CLOSE command is given", assisted="Assisted Grasping - Objects are magnetized when they touch the fingers, are within the hand, and a CLOSE command is given", physical="Physical Grasping - No additional grasping assistance applied", ) # Don't use GPU dynamics and Use flatcache for performance boost gm.USE_GPU_DYNAMICS = False gm.ENABLE_FLATCACHE = True def main(random_selection=False, headless=False, short_exec=False): """ Robot grasping mode demo with selection Queries the user to select a type of grasping mode """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Choose type of grasping grasping_mode = choose_from_options(options=GRASPING_MODES, name="grasping mode", random_selection=random_selection) # Create environment configuration to use scene_cfg = dict(type="Scene") robot0_cfg = dict( type="Fetch", obs_modalities=["rgb"], # we're just doing a grasping demo so we don't need all observation modalities action_type="continuous", action_normalize=True, grasping_mode=grasping_mode, ) # Define objects to load table_cfg = dict( type="DatasetObject", name="table", category="breakfast_table", model="lcsizg", bounding_box=[0.5, 0.5, 0.8], fixed_base=True, position=[0.7, -0.1, 0.6], orientation=[0, 0, 0.707, 0.707], ) chair_cfg = dict( type="DatasetObject", name="chair", category="straight_chair", model="amgwaw", bounding_box=None, fixed_base=False, position=[0.45, 0.65, 0.425], orientation=[0, 0, -0.9990215, -0.0442276], ) box_cfg = dict( type="PrimitiveObject", name="box", primitive_type="Cube", rgba=[1.0, 0, 0, 1.0], size=0.05, position=[0.53, -0.1, 0.97], ) # Compile config cfg = dict(scene=scene_cfg, robots=[robot0_cfg], objects=[table_cfg, chair_cfg, box_cfg]) # Create the environment env = og.Environment(configs=cfg) # Reset the robot robot = env.robots[0] robot.set_position([0, 0, 0]) robot.reset() robot.keep_still() # Make the robot's camera(s) high-res for sensor in robot.sensors.values(): if isinstance(sensor, VisionSensor): sensor.image_height = 720 sensor.image_width = 720 # Update the simulator's viewer camera's pose so it points towards the robot og.sim.viewer_camera.set_position_orientation( position=np.array([-2.39951, 2.26469, 2.66227]), orientation=np.array([-0.23898481, 0.48475231, 0.75464013, -0.37204802]), ) # Create teleop controller action_generator = KeyboardRobotController(robot=robot) # Print out relevant keyboard info if using keyboard teleop action_generator.print_keyboard_teleop_info() # Other helpful user info print("Running demo with grasping mode {}.".format(grasping_mode)) print("Press ESC to quit") # Loop control until user quits max_steps = -1 if not short_exec else 100 step = 0 while step != max_steps: action = action_generator.get_random_action() if random_selection else action_generator.get_teleop_action() for _ in range(10): env.step(action) step += 1 # Always shut down the environment cleanly at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/robots/all_robots_visualizer.py
import numpy as np import omnigibson as og from omnigibson.robots import REGISTERED_ROBOTS def main(random_selection=False, headless=False, short_exec=False): """ Robot demo Loads all robots in an empty scene, generate random actions """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Create empty scene with no robots in it initially cfg = { "scene": { "type": "Scene", } } env = og.Environment(configs=cfg) # Iterate over all robots and demo their motion for robot_name, robot_cls in REGISTERED_ROBOTS.items(): # Create and import robot robot = robot_cls( prim_path=f"/World/{robot_name}", name=robot_name, obs_modalities=[], # We're just moving robots around so don't load any observation modalities ) og.sim.import_object(robot) # At least one step is always needed while sim is playing for any imported object to be fully initialized og.sim.play() og.sim.step() # Reset robot and make sure it's not moving robot.reset() robot.keep_still() # Log information og.log.info(f"Loaded {robot_name}") og.log.info(f"Moving {robot_name}") if not headless: # Set viewer in front facing robot og.sim.viewer_camera.set_position_orientation( position=np.array([ 2.69918369, -3.63686664, 4.57894564]), orientation=np.array([0.39592411, 0.1348514 , 0.29286304, 0.85982 ]), ) og.sim.enable_viewer_camera_teleoperation() # Hold still briefly so viewer can see robot for _ in range(100): og.sim.step() # Then apply random actions for a bit for _ in range(30): action = np.random.uniform(-1, 1, robot.action_dim) if robot_name == "Tiago": action[robot.base_action_idx] = np.random.uniform(-0.1, 0.1, len(robot.base_action_idx)) for _ in range(10): env.step(action) # Stop the simulator and remove the robot og.sim.stop() og.sim.remove_object(obj=robot) # Always shut down the environment cleanly at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/robots/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/robots/robot_control_example.py
""" Example script demo'ing robot control. Options for random actions, as well as selection of robot action space """ import numpy as np import omnigibson as og import omnigibson.lazy as lazy from omnigibson.macros import gm from omnigibson.robots import REGISTERED_ROBOTS from omnigibson.utils.ui_utils import choose_from_options, KeyboardRobotController CONTROL_MODES = dict( random="Use autonomous random actions (default)", teleop="Use keyboard control", ) SCENES = dict( Rs_int="Realistic interactive home environment (default)", empty="Empty environment with no objects", ) # Don't use GPU dynamics and use flatcache for performance boost gm.USE_GPU_DYNAMICS = False gm.ENABLE_FLATCACHE = True def choose_controllers(robot, random_selection=False): """ For a given robot, iterates over all components of the robot, and returns the requested controller type for each component. :param robot: BaseRobot, robot class from which to infer relevant valid controller options :param random_selection: bool, if the selection is random (for automatic demo execution). Default False :return dict: Mapping from individual robot component (e.g.: base, arm, etc.) to selected controller names """ # Create new dict to store responses from user controller_choices = dict() # Grab the default controller config so we have the registry of all possible controller options default_config = robot._default_controller_config # Iterate over all components in robot for component, controller_options in default_config.items(): # Select controller options = list(sorted(controller_options.keys())) choice = choose_from_options( options=options, name="{} controller".format(component), random_selection=random_selection ) # Add to user responses controller_choices[component] = choice return controller_choices def main(random_selection=False, headless=False, short_exec=False): """ Robot control demo with selection Queries the user to select a robot, the controllers, a scene and a type of input (random actions or teleop) """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Choose scene to load scene_model = choose_from_options(options=SCENES, name="scene", random_selection=random_selection) # Choose robot to create robot_name = choose_from_options( options=list(sorted(REGISTERED_ROBOTS.keys())), name="robot", random_selection=random_selection ) scene_cfg = dict() if scene_model == "empty": scene_cfg["type"] = "Scene" else: scene_cfg["type"] = "InteractiveTraversableScene" scene_cfg["scene_model"] = scene_model # Add the robot we want to load robot0_cfg = dict() robot0_cfg["type"] = robot_name robot0_cfg["obs_modalities"] = ["rgb", "depth", "seg_instance", "normal", "scan", "occupancy_grid"] robot0_cfg["action_type"] = "continuous" robot0_cfg["action_normalize"] = True # Compile config cfg = dict(scene=scene_cfg, robots=[robot0_cfg]) # Create the environment env = og.Environment(configs=cfg) # Choose robot controller to use robot = env.robots[0] controller_choices = choose_controllers(robot=robot, random_selection=random_selection) # Choose control mode if random_selection: control_mode = "random" else: control_mode = choose_from_options(options=CONTROL_MODES, name="control mode") # Update the control mode of the robot controller_config = {component: {"name": name} for component, name in controller_choices.items()} robot.reload_controllers(controller_config=controller_config) # Because the controllers have been updated, we need to update the initial state so the correct controller state # is preserved env.scene.update_initial_state() # Update the simulator's viewer camera's pose so it points towards the robot og.sim.viewer_camera.set_position_orientation( position=np.array([1.46949, -3.97358, 2.21529]), orientation=np.array([0.56829048, 0.09569975, 0.13571846, 0.80589577]), ) # Reset environment and robot env.reset() robot.reset() # Create teleop controller action_generator = KeyboardRobotController(robot=robot) # Register custom binding to reset the environment action_generator.register_custom_keymapping( key=lazy.carb.input.KeyboardInput.R, description="Reset the robot", callback_fn=lambda: env.reset(), ) # Print out relevant keyboard info if using keyboard teleop if control_mode == "teleop": action_generator.print_keyboard_teleop_info() # Other helpful user info print("Running demo.") print("Press ESC to quit") # Loop control until user quits max_steps = -1 if not short_exec else 100 step = 0 while step != max_steps: action = action_generator.get_random_action() if control_mode == "random" else action_generator.get_teleop_action() env.step(action=action) step += 1 # Always shut down the environment cleanly at the end env.close() if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/examples/robots/advanced/__init__.py
StanfordVL/OmniGibson/omnigibson/examples/robots/advanced/ik_example.py
import argparse import time import numpy as np import omnigibson as og import omnigibson.lazy as lazy from omnigibson.objects import PrimitiveObject from omnigibson.robots import Fetch from omnigibson.scenes import Scene from omnigibson.utils.control_utils import IKSolver def main(random_selection=False, headless=False, short_exec=False): """ Minimal example of usage of inverse kinematics solver This example showcases how to construct your own IK functionality using omniverse's native lula library without explicitly utilizing all of OmniGibson's class abstractions, and also showcases how to manipulate the simulator at a lower-level than the main Environment entry point. """ og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80) # Assuming that if random_selection=True, headless=True, short_exec=True, we are calling it from tests and we # do not want to parse args (it would fail because the calling function is pytest "testfile.py") if not (random_selection and headless and short_exec): parser = argparse.ArgumentParser() parser.add_argument( "--programmatic", "-p", dest="programmatic_pos", action="store_true", help="if the IK solvers should be used with the GUI or programmatically", ) args = parser.parse_args() programmatic_pos = args.programmatic_pos else: programmatic_pos = True # Import scene and robot (Fetch) scene_cfg = {"type": "Scene"} # Create Fetch robot # Note that since we only care about IK functionality, we fix the base (this also makes the robot more stable) # (any object can also have its fixed_base attribute set to True!) # Note that since we're going to be setting joint position targets, we also need to make sure the robot's arm joints # (which includes the trunk) are being controlled using joint positions robot_cfg = { "type": "Fetch", "fixed_base": True, "controller_config": { "arm_0": { "name": "NullJointController", "motor_type": "position", } } } cfg = dict(scene=scene_cfg, robots=[robot_cfg]) env = og.Environment(configs=cfg) # Update the viewer camera's pose so that it points towards the robot og.sim.viewer_camera.set_position_orientation( position=np.array([4.32248, -5.74338, 6.85436]), orientation=np.array([0.39592, 0.13485, 0.29286, 0.85982]), ) robot = env.robots[0] # Set robot base at the origin robot.set_position_orientation(np.array([0, 0, 0]), np.array([0, 0, 0, 1])) # At least one simulation step while the simulator is playing must occur for the robot (or in general, any object) # to be fully initialized after it is imported into the simulator og.sim.play() og.sim.step() # Make sure none of the joints are moving robot.keep_still() # Since this demo aims to showcase how users can directly control the robot with IK, # we will need to disable the built-in controllers in OmniGibson robot.control_enabled = False # Create the IK solver -- note that we are controlling both the trunk and the arm since both are part of the # controllable kinematic chain for the end-effector! control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]]) ik_solver = IKSolver( robot_description_path=robot.robot_arm_descriptor_yamls[robot.default_arm], robot_urdf_path=robot.urdf_path, reset_joint_pos=robot.get_joint_positions()[control_idx], eef_name=robot.eef_link_names[robot.default_arm], ) # Define a helper function for executing specific end-effector commands using the ik solver def execute_ik(pos, quat=None, max_iter=100): og.log.info("Querying joint configuration to current marker position") # Grab the joint positions in order to reach the desired pose target joint_pos = ik_solver.solve( target_pos=pos, target_quat=quat, tolerance_pos=0.002, tolerance_quat=0.01, weight_pos=20.0, weight_quat=0.05, max_iterations=max_iter, initial_joint_pos=robot.get_joint_positions()[control_idx], ) if joint_pos is not None: og.log.info("Solution found. Setting new arm configuration.") robot.set_joint_positions(joint_pos, indices=control_idx, drive=True) else: og.log.info("EE position not reachable.") og.sim.step() if programmatic_pos or headless: # Sanity check IK using pre-defined hardcoded positions query_positions = [[1, 0, 0.8], [1, 1, 1], [0.5, 0.5, 0], [0.5, 0.5, 0.5]] for query_pos in query_positions: execute_ik(query_pos) time.sleep(2) else: # Create a visual marker to be moved by the user, representing desired end-effector position marker = PrimitiveObject( prim_path=f"/World/marker", name="marker", primitive_type="Sphere", radius=0.03, visual_only=True, rgba=[1.0, 0, 0, 1.0], ) og.sim.import_object(marker) # Get initial EE position and set marker to that location command = robot.get_eef_position() marker.set_position(command) og.sim.step() # Setup callbacks for grabbing keyboard inputs from omni exit_now = False def keyboard_event_handler(event, *args, **kwargs): nonlocal command, exit_now # Check if we've received a key press or repeat if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \ or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT: if event.input == lazy.carb.input.KeyboardInput.ENTER: # Execute the command execute_ik(pos=command) elif event.input == lazy.carb.input.KeyboardInput.ESCAPE: # Quit og.log.info("Quit.") exit_now = True else: # We see if we received a valid delta command, and if so, we update our command and visualized # marker position delta_cmd = input_to_xyz_delta_command(inp=event.input) if delta_cmd is not None: command = command + delta_cmd marker.set_position(command) og.sim.step() # Callback must return True if valid return True # Hook up the callback function with omni's user interface appwindow = lazy.omni.appwindow.get_default_app_window() input_interface = lazy.carb.input.acquire_input_interface() keyboard = appwindow.get_keyboard() sub_keyboard = input_interface.subscribe_to_keyboard_events(keyboard, keyboard_event_handler) # Print out helpful information to the user print_message() # Loop until the user requests an exit while not exit_now: og.sim.step() # Always shut the simulation down cleanly at the end og.app.close() def input_to_xyz_delta_command(inp, delta=0.01): mapping = { lazy.carb.input.KeyboardInput.W: np.array([delta, 0, 0]), lazy.carb.input.KeyboardInput.S: np.array([-delta, 0, 0]), lazy.carb.input.KeyboardInput.DOWN: np.array([0, 0, -delta]), lazy.carb.input.KeyboardInput.UP: np.array([0, 0, delta]), lazy.carb.input.KeyboardInput.A: np.array([0, delta, 0]), lazy.carb.input.KeyboardInput.D: np.array([0, -delta, 0]), } return mapping.get(inp) def print_message(): print("*" * 80) print("Move the marker to a desired position to query IK and press ENTER") print("W/S: move marker further away or closer to the robot") print("A/D: move marker to the left or the right of the robot") print("UP/DOWN: move marker up and down") print("ESC: quit") if __name__ == "__main__": main()
StanfordVL/OmniGibson/omnigibson/sensors/sensor_noise_base.py
from abc import ABCMeta, abstractmethod from omnigibson.utils.python_utils import classproperty, Registerable # Registered sensor noises REGISTERED_SENSOR_NOISES = dict() class BaseSensorNoise(Registerable, metaclass=ABCMeta): """ Base SensorNoise class. Sensor noise-specific add_noise method is implemented in subclasses Args: enabled (bool): Whether this sensor should be enabled by default """ def __init__(self, enabled=True): # Store whether this noise model is enabled or not self._enabled = enabled def __call__(self, obs): """ If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading. This is an identical call to self.corrupt(...) Args: obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0] Returns: np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through """ return self.corrupt(obs=obs) def corrupt(self, obs): """ If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading. Args: obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0] Returns: np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through """ # Run sanity check to make sure obs is in acceptable range assert len(obs[(obs < 0.0) | (obs > 1.0)]) == 0, "sensor reading has to be between [0.0, 1.0]" return self._corrupt(obs=obs) if self._enabled else obs @abstractmethod def _corrupt(self, obs): """ Corrupts observation @obs by adding sensor noise to sensor reading Args: obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0] Returns: np.array: Corrupted observation numpy array """ raise NotImplementedError() @property def enabled(self): """ Returns: bool: Whether this noise model is enabled or not """ return self._enabled @enabled.setter def enabled(self, enabled): """ En/disables this noise model Args: enabled (bool): Whether this noise model should be enabled or not """ self._enabled = enabled @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("BaseSensorNoise") return classes @classproperty def _cls_registry(cls): # Global registry global REGISTERED_SENSOR_NOISES return REGISTERED_SENSOR_NOISES
StanfordVL/OmniGibson/omnigibson/sensors/dropout_sensor_noise.py
import numpy as np from omnigibson.sensors.sensor_noise_base import BaseSensorNoise class DropoutSensorNoise(BaseSensorNoise): """ Naive dropout sensor noise model Args: dropout_prob (float): Value in [0.0, 1.0] representing fraction of a single observation to be replaced with @dropout_value dropout_value (float): Value in [0.0, 1.0] to replace observations selected to be dropped out enabled (bool): Whether this sensor should be enabled by default """ def __init__( self, dropout_prob=0.05, dropout_value=1.0, enabled=True, ): # Store args, and make sure values are in acceptable range for name, val in zip(("dropout_prob", "dropout_value"), (dropout_prob, dropout_value)): assert 0.0 <= val <= 1.0, f"{name} should be in range [0.0, 1.0], got: {val}" self._dropout_prob = dropout_prob self._dropout_value = dropout_value # Run super method super().__init__(enabled=enabled) def _corrupt(self, obs): # If our noise rate is 0, we just return the obs if self._dropout_prob == 0.0: return obs # Corrupt with randomized dropout valid_mask = np.random.choice(2, obs.shape, p=[self._dropout_prob, 1.0 - self._dropout_prob]) obs[valid_mask == 0] = self._dropout_value return obs @property def dropout_prob(self): """ Returns: float: Value in [0.0, 1.0] representing fraction of a single observation to be replaced with self.dropout_value """ return self._dropout_prob @dropout_prob.setter def dropout_prob(self, p): """ Set the dropout probability for this noise model. Args: p (float): Value in [0.0, 1.0] representing fraction of a single observation to be replaced with self.dropout_value """ assert 0.0 <= p <= 1.0, f"dropout_prob should be in range [0.0, 1.0], got: {p}" self._dropout_prob = p @property def dropout_value(self): """ Returns: float: Value in [0.0, 1.0] to replace observations selected to be dropped out """ return self._dropout_value @dropout_value.setter def dropout_value(self, val): """ Set the dropout value for this noise model. Args: val (float): Value in [0.0, 1.0] to replace observations selected to be dropped out """ assert 0.0 <= val <= 1.0, f"dropout_value should be in range [0.0, 1.0], got: {val}" self._dropout_value = val
StanfordVL/OmniGibson/omnigibson/sensors/__init__.py
from omnigibson.utils.python_utils import assert_valid_key from omnigibson.sensors.sensor_base import BaseSensor, REGISTERED_SENSORS, ALL_SENSOR_MODALITIES from omnigibson.sensors.scan_sensor import ScanSensor from omnigibson.sensors.vision_sensor import VisionSensor from omnigibson.sensors.sensor_noise_base import BaseSensorNoise, REGISTERED_SENSOR_NOISES from omnigibson.sensors.dropout_sensor_noise import DropoutSensorNoise # Map sensor prim names to corresponding sensor classes SENSOR_PRIMS_TO_SENSOR_CLS = { "Lidar": ScanSensor, "Camera": VisionSensor, } def create_sensor( sensor_type, prim_path, name, modalities="all", enabled=True, sensor_kwargs=None, noise_type=None, noise_kwargs=None ): """ Create a sensor of type @sensor_type with optional keyword args @sensor_kwargs that should be passed to the constructor. Also, additionally send noise of type @noise_type with corresponding keyword args @noise_kwargs that should be passed to the noise constructor. Args: sensor_type (str): Type of sensor to create. Should be either one of SENSOR_PRIM_TO_SENSOR.keys() or one of REGISTERED_SENSORS (i.e.: the string name of the desired class to create) prim_path (str): prim path of the Sensor to encapsulate or create. name (str): Name for the sensor. Names need to be unique per scene. modalities (str or list of str): Modality(s) supported by this sensor. Valid options are part of sensor.all_modalities. Default is "all", which corresponds to all modalities being used enabled (bool): Whether this sensor should be enabled or not sensor_kwargs (dict): Any keyword kwargs to pass to the constructor noise_type (str): Type of sensor to create. Should be one of REGISTERED_SENSOR_NOISES (i.e.: the string name of the desired class to create) noise_kwargs (dict): Any keyword kwargs to pass to the constructor Returns: BaseSensor: Created sensor with specified params """ # Run basic sanity check assert isinstance(sensor_type, str), "Inputted sensor_type must be a string!" # Grab the requested sensor class if sensor_type in SENSOR_PRIMS_TO_SENSOR_CLS: sensor_cls = SENSOR_PRIMS_TO_SENSOR_CLS[sensor_type] elif sensor_type in REGISTERED_SENSORS: sensor_cls = REGISTERED_SENSORS[sensor_type] else: # This is an error, we didn't find the requested sensor ): raise ValueError(f"No sensor found with corresponding sensor_type: {sensor_type}") # Create the noise, and sanity check to make sure it's a valid type noise = None if noise_type is not None: assert_valid_key(key=noise_type, valid_keys=REGISTERED_SENSOR_NOISES, name="sensor noise type") noise_kwargs = dict() if noise_kwargs is None else noise_kwargs noise = REGISTERED_SENSOR_NOISES[noise_type](**noise_kwargs) # Create the sensor sensor_kwargs = dict() if sensor_kwargs is None else sensor_kwargs sensor = sensor_cls( prim_path=prim_path, name=name, modalities=modalities, enabled=enabled, noise=noise, **sensor_kwargs, ) return sensor
StanfordVL/OmniGibson/omnigibson/sensors/sensor_base.py
from abc import ABCMeta from omnigibson.prims.xform_prim import XFormPrim from omnigibson.utils.python_utils import classproperty, assert_valid_key, Registerable from omnigibson.utils.gym_utils import GymObservable from gym.spaces import Space # Registered sensors REGISTERED_SENSORS = dict() # All possible modalities across all sensors ALL_SENSOR_MODALITIES = set() class BaseSensor(XFormPrim, GymObservable, Registerable, metaclass=ABCMeta): """ Base Sensor class. Sensor-specific get_obs method is implemented in subclasses Args: prim_path (str): prim path of the Sensor to encapsulate or create. name (str): Name for the sensor. Names need to be unique per scene. modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds to all modalities being used. Otherwise, valid options should be part of cls.all_modalities. enabled (bool): Whether this sensor should be enabled by default noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor. load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for loading this sensor's prim at runtime. """ def __init__( self, prim_path, name, modalities="all", enabled=True, noise=None, load_config=None, ): # Store inputs (and sanity check modalities along the way) if modalities == "all": modalities = self.all_modalities else: modalities = [modalities] if isinstance(modalities, str) else modalities for modality in modalities: assert_valid_key(key=modality, valid_keys=self.all_modalities, name="modality") self._modalities = set(modalities) self._enabled = enabled self._noise = noise # Run super method super().__init__( prim_path=prim_path, name=name, load_config=load_config, ) def _load(self): # Sub-sensors must implement this class directly! Cannot use parent XForm class by default raise NotImplementedError("Sensor class must implement _load!") def _post_load(self): # Run super first super()._post_load() # Set the enabled property based on the internal value # This is done so that any subclassed sensors which require simulator specific enabling can handle this now self.enabled = self._enabled def get_obs(self): # Get sensor reading, and optionally corrupt the readings with noise using self.noise if # self.noise.enabled is True. # Note that the returned dictionary will only be filled in if this sensor is enabled! if not self._enabled: return dict() obs, info = self._get_obs() if self._noise is not None: for k, v in obs.items(): if k not in self.no_noise_modalities: obs[k] = self._noise(v) return obs, info def _get_obs(self): """ Get sensor reading. Should generally be extended by subclass. Returns: 2-tuple: dict: Keyword-mapped observations mapping modality names to numpy arrays of arbitrary dimension dict: Additional information about the observations. """ # Default is returning an empty dict return dict(), dict() def _load_observation_space(self): # Fill in observation space based on mapping and active modalities obs_space = dict() for modality, space in self._obs_space_mapping.items(): if modality in self._modalities: if isinstance(space, Space): # Directly add this space obs_space[modality] = space else: # Assume we are procedurally generating a box space shape, low, high, dtype = space obs_space[modality] = self._build_obs_box_space(shape=shape, low=low, high=high, dtype=dtype) return obs_space def add_modality(self, modality): """ Add a modality to this sensor. Must be a valid modality (one of self.all_modalities) Args: modality (str): Name of the modality to add to this sensor """ assert_valid_key(key=modality, valid_keys=self.all_modalities, name="modality") if modality not in self._modalities: self._modalities.add(modality) # Update observation space self.load_observation_space() def remove_modality(self, modality): """ Remove a modality from this sensor. Must be a valid modality that is active (one of self.modalities) Args: modality (str): Name of the modality to remove from this sensor """ assert_valid_key(key=modality, valid_keys=self._modalities, name="modality") if modality in self._modalities: self._modalities.remove(modality) # Update observation space self.load_observation_space() @property def modalities(self): """ Returns: set: Name of modalities provided by this sensor. This should correspond to all the keys provided in self.get_obs() """ return self._modalities @property def _obs_space_mapping(self): """ Returns: dict: Keyword-mapped observation space settings for each modality. For each modality in cls.all_modalities, its name should map directly to the corresponding gym space Space for that modality or a 4-tuple entry (shape, low, high, dtype) for procedurally generating the appropriate Box Space for that modality """ raise NotImplementedError() @classproperty def all_modalities(cls): """ Returns: set: All possible valid modalities for this sensor. Should be implemented by subclass. """ raise NotImplementedError() @property def noise(self): """ Returns: None or BaseSensorNoise: Noise model to use for this sensor """ return self._noise @classproperty def no_noise_modalities(cls): """ Returns: set: Modalities that should NOT be passed through noise, irregardless of whether noise is enabled or not. This is useful for some modalities which are not exclusively numerical arrays. """ raise NotImplementedError() @property def enabled(self): """ Returns: bool: Whether this sensor is enabled or not """ # By default, we simply return the internal value. Subclasses may need to extend this functionality, # e.g. by disabling actual sim functionality for better computational efficiency return self._enabled @enabled.setter def enabled(self, enabled): """ Args: enabled (bool): Whether this sensor should be enabled or not """ # By default, we simply store the value internally. Subclasses may need to extend this functionality, # e.g. by disabling actual sim functionality for better computational efficiency self._enabled = enabled @classproperty def sensor_type(cls): """ Returns: str: Type of this sensor. By default, this is the sensor class name """ return cls.__name__ @classmethod def _register_cls(cls): global ALL_SENSOR_MODALITIES # Run super first super()._register_cls() # Also store modalities from this sensor class if we're registering it if cls.__name__ not in cls._do_not_register_classes: ALL_SENSOR_MODALITIES.union(cls.all_modalities) @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("BaseSensor") return classes @classproperty def _cls_registry(cls): # Global registry global REGISTERED_SENSORS return REGISTERED_SENSORS
StanfordVL/OmniGibson/omnigibson/sensors/scan_sensor.py
import cv2 import numpy as np from collections.abc import Iterable from transforms3d.quaternions import quat2mat import omnigibson.lazy as lazy from omnigibson.sensors.sensor_base import BaseSensor from omnigibson.utils.constants import OccupancyGridState from omnigibson.utils.python_utils import classproperty class ScanSensor(BaseSensor): """ General 2D LiDAR range sensor and occupancy grid sensor. Args: prim_path (str): prim path of the Prim to encapsulate or create. name (str): Name for the object. Names need to be unique per scene. modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds to all modalities being used. Otherwise, valid options should be part of cls.all_modalities. For this scan sensor, this includes any of: {scan, occupancy_grid} Note that in order for "occupancy_grid" to be used, "scan" must also be included. enabled (bool): Whether this sensor should be enabled by default noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor. load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for loading this sensor's prim at runtime. min_range (float): Minimum range to sense in meters max_range (float): Maximum range to sense in meters horizontal_fov (float): Field of view of sensor, in degrees vertical_fov (float): Field of view of sensor, in degrees yaw_offset (float): Degrees for offsetting this sensors horizontal FOV. Useful in cases where this sensor's forward direction is different than expected horizontal_resolution (float): Degrees in between each horizontal scan hit vertical_resolution (float): Degrees in between each vertical scan hit rotation_rate (float): How fast the range sensor is rotating, in rotations per sec. Set to 0 for all scans be to hit at once draw_points (bool): Whether to draw the points hit by this sensor draw_lines (bool): Whether to draw the lines representing the scans from this sensor occupancy_grid_resolution (int): How many discretized nodes in the occupancy grid. This will specify the height == width of the map occupancy_grid_range (float): Range of the occupancy grid, in meters occupancy_grid_inner_radius (float): Inner range of the occupancy grid that will assumed to be empty, in meters occupancy_grid_local_link (None or XFormPrim): XForm prim that represents the "origin" of any generated occupancy grid, e.g.: if this scan sensor is attached to a robot, then this should possibly be the base link for that robot. If None is specified, then this will default to this own sensor's frame as the origin. """ def __init__( self, prim_path, name, modalities="all", enabled=True, noise=None, load_config=None, # Basic LIDAR kwargs min_range=0.05, max_range=10.0, horizontal_fov=360.0, vertical_fov=1.0, yaw_offset=0.0, horizontal_resolution=1.0, vertical_resolution=1.0, rotation_rate=0.0, draw_points=False, draw_lines=False, # Occupancy Grid kwargs occupancy_grid_resolution=128, occupancy_grid_range=5.0, occupancy_grid_inner_radius=0.5, occupancy_grid_local_link=None, ): # Store settings self.occupancy_grid_resolution = occupancy_grid_resolution self.occupancy_grid_range = occupancy_grid_range self.occupancy_grid_inner_radius = int(occupancy_grid_inner_radius * occupancy_grid_resolution / occupancy_grid_range) self.occupancy_grid_local_link = self if occupancy_grid_local_link is None else occupancy_grid_local_link # Create variables that will be filled in at runtime self._rs = None # Range sensor interface, analagous to others, e.g.: dynamic control interface # Create load config from inputs load_config = dict() if load_config is None else load_config load_config["min_range"] = min_range load_config["max_range"] = max_range load_config["horizontal_fov"] = horizontal_fov load_config["vertical_fov"] = vertical_fov load_config["yaw_offset"] = yaw_offset load_config["horizontal_resolution"] = horizontal_resolution load_config["vertical_resolution"] = vertical_resolution load_config["rotation_rate"] = rotation_rate load_config["draw_points"] = draw_points load_config["draw_lines"] = draw_lines # Sanity check modalities -- if we're using occupancy_grid without scan modality, raise an error if isinstance(modalities, Iterable) and not isinstance(modalities, str) and "occupancy_grid" in modalities: assert "scan" in modalities, f"'scan' modality must be included in order to get occupancy_grid modality!" # Run super method super().__init__( prim_path=prim_path, name=name, modalities=modalities, enabled=enabled, noise=noise, load_config=load_config, ) def _load(self): # Define a LIDAR prim at the current stage result, lidar = lazy.omni.kit.commands.execute("RangeSensorCreateLidar", path=self._prim_path) return lidar.GetPrim() def _post_load(self): # run super first super()._post_load() # Set all the lidar kwargs self.min_range = self._load_config["min_range"] self.max_range = self._load_config["max_range"] self.horizontal_fov = self._load_config["horizontal_fov"] self.vertical_fov = self._load_config["vertical_fov"] self.yaw_offset = self._load_config["yaw_offset"] self.horizontal_resolution = self._load_config["horizontal_resolution"] self.vertical_resolution = self._load_config["vertical_resolution"] self.rotation_rate = self._load_config["rotation_rate"] self.draw_points = self._load_config["draw_points"] self.draw_lines = self._load_config["draw_lines"] def _initialize(self): # run super first super()._initialize() # Initialize lidar sensor interface self._rs = lazy.omni.isaac.range_sensor._range_sensor.acquire_lidar_sensor_interface() @property def _obs_space_mapping(self): # Set the remaining modalities' values # (obs modality, shape, low, high) obs_space_mapping = dict( scan=((self.n_horizontal_rays, self.n_vertical_rays), 0.0, 1.0, np.float32), occupancy_grid=((self.occupancy_grid_resolution, self.occupancy_grid_resolution, 1), 0.0, 1.0, np.float32), ) return obs_space_mapping def get_local_occupancy_grid(self, scan): """ Get local occupancy grid based on current 1D scan Args: n-array: 1D LiDAR scan Returns: 2D-array: (occupancy_grid_resolution, occupancy_grid_resolution)-sized numpy array of the local occupancy grid """ # Run sanity checks first assert "occupancy_grid" in self._modalities, "Occupancy grid is not enabled for this range sensor!" assert self.n_vertical_rays == 1, "Occupancy grid is only valid for a 1D range sensor (n_vertical_rays = 1)!" # Grab vector of corresponding angles for each scan line angles = np.arange( -np.radians(self.horizontal_fov / 2), np.radians(self.horizontal_fov / 2), np.radians(self.horizontal_resolution), ) # Convert into 3D unit vectors for each angle unit_vector_laser = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angles]) # Scale unit vectors by corresponding laser scan distnaces assert ((scan >= 0.0) & (scan <= 1.0)).all(), "scan out of valid range [0, 1]" scan_laser = unit_vector_laser * (scan * (self.max_range - self.min_range) + self.min_range) # Convert scans from laser frame to world frame pos, ori = self.get_position_orientation() scan_world = quat2mat(ori).dot(scan_laser.T).T + pos # Convert scans from world frame to local base frame base_pos, base_ori = self.occupancy_grid_local_link.get_position_orientation() scan_local = quat2mat(base_ori).T.dot((scan_world - base_pos).T).T scan_local = scan_local[:, :2] scan_local = np.concatenate([np.array([[0, 0]]), scan_local, np.array([[0, 0]])], axis=0) # flip y axis scan_local[:, 1] *= -1 # Initialize occupancy grid -- default is unknown values occupancy_grid = np.zeros((self.occupancy_grid_resolution, self.occupancy_grid_resolution)).astype(np.uint8) occupancy_grid.fill(int(OccupancyGridState.UNKNOWN * 2.0)) # Convert local scans into the corresponding OG square it should belong to (note now all values are > 0, since # OG ranges from [0, resolution] x [0, resolution]) scan_local_in_map = scan_local / self.occupancy_grid_range * self.occupancy_grid_resolution + \ (self.occupancy_grid_resolution / 2) scan_local_in_map = scan_local_in_map.reshape((1, -1, 1, 2)).astype(np.int32) # For each scan hit, for i in range(scan_local_in_map.shape[1]): cv2.circle( img=occupancy_grid, center=(scan_local_in_map[0, i, 0, 0], scan_local_in_map[0, i, 0, 1]), radius=2, color=int(OccupancyGridState.OBSTACLES * 2.0), thickness=-1, ) cv2.fillPoly( img=occupancy_grid, pts=scan_local_in_map, color=int(OccupancyGridState.FREESPACE * 2.0), lineType=1 ) cv2.circle( img=occupancy_grid, center=(self.occupancy_grid_resolution // 2, self.occupancy_grid_resolution // 2), radius=self.occupancy_grid_inner_radius, color=int(OccupancyGridState.FREESPACE * 2.0), thickness=-1, ) return occupancy_grid[:, :, None].astype(np.float32) / 2.0 def _get_obs(self): # Run super first to grab any upstream obs obs, info = super()._get_obs() # Add scan info (normalized to [0.0, 1.0]) if "scan" in self._modalities: raw_scan = self._rs.get_linear_depth_data(self._prim_path) # Sometimes get_linear_depth_data will return values that are slightly out of range, needs clipping raw_scan = np.clip(raw_scan, self.min_range, self.max_range) obs["scan"] = (raw_scan - self.min_range) / (self.max_range - self.min_range) # Optionally add occupancy grid info if "occupancy_grid" in self._modalities: obs["occupancy_grid"] = self.get_local_occupancy_grid(scan=obs["scan"]) return obs, info @property def n_horizontal_rays(self): """ Returns: int: Number of horizontal rays for this range sensor """ return int(self.horizontal_fov // self.horizontal_resolution) @property def n_vertical_rays(self): """ Returns: int: Number of vertical rays for this range sensor """ return int(self.vertical_fov // self.vertical_resolution) @property def min_range(self): """ Gets this range sensor's min_range (minimum distance in meters which will register a hit) Returns: float: minimum range for this range sensor, in meters """ return self.get_attribute("minRange") @min_range.setter def min_range(self, val): """ Sets this range sensor's min_range (minimum distance in meters which will register a hit) Args: val (float): minimum range for this range sensor, in meters """ self.set_attribute("minRange", val) @property def max_range(self): """ Gets this range sensor's max_range (maximum distance in meters which will register a hit) Returns: float: maximum range for this range sensor, in meters """ return self.get_attribute("maxRange") @max_range.setter def max_range(self, val): """ Sets this range sensor's max_range (maximum distance in meters which will register a hit) Args: val (float): maximum range for this range sensor, in meters """ self.set_attribute("maxRange", val) @property def draw_lines(self): """ Gets whether range lines are drawn for this sensor Returns: bool: Whether range lines are drawn for this sensor """ return self.get_attribute("drawLines") @draw_lines.setter def draw_lines(self, draw): """ Sets whether range lines are drawn for this sensor Args: draw (float): Whether range lines are drawn for this sensor """ self.set_attribute("drawLines", draw) @property def draw_points(self): """ Gets whether range points are drawn for this sensor Returns: bool: Whether range points are drawn for this sensor """ return self.get_attribute("drawPoints") @draw_points.setter def draw_points(self, draw): """ Sets whether range points are drawn for this sensor Args: draw (float): Whether range points are drawn for this sensor """ self.set_attribute("drawPoints", draw) @property def horizontal_fov(self): """ Gets this range sensor's horizontal_fov Returns: float: horizontal field of view for this range sensor """ return self.get_attribute("horizontalFov") @horizontal_fov.setter def horizontal_fov(self, fov): """ Sets this range sensor's horizontal_fov Args: fov (float): horizontal field of view to set """ self.set_attribute("horizontalFov", fov) @property def horizontal_resolution(self): """ Gets this range sensor's horizontal_resolution (degrees in between each horizontal hit) Returns: float: horizontal resolution for this range sensor, in degrees """ return self.get_attribute("horizontalResolution") @horizontal_resolution.setter def horizontal_resolution(self, resolution): """ Sets this range sensor's horizontal_resolution (degrees in between each horizontal hit) Args: resolution (float): horizontal resolution to set, in degrees """ self.set_attribute("horizontalResolution", resolution) @property def vertical_fov(self): """ Gets this range sensor's vertical_fov Returns: float: vertical field of view for this range sensor """ return self.get_attribute("verticalFov") @vertical_fov.setter def vertical_fov(self, fov): """ Sets this range sensor's vertical_fov Args: fov (float): vertical field of view to set """ self.set_attribute("verticalFov", fov) @property def vertical_resolution(self): """ Gets this range sensor's vertical_resolution (degrees in between each vertical hit) Returns: float: vertical resolution for this range sensor, in degrees """ return self.get_attribute("verticalResolution") @vertical_resolution.setter def vertical_resolution(self, resolution): """ Sets this range sensor's vertical_resolution (degrees in between each vertical hit) Args: resolution (float): vertical resolution to set, in degrees """ self.set_attribute("verticalResolution", resolution) @property def yaw_offset(self): """ Gets this range sensor's yaw_offset (used in cases where this sensor's forward direction is different than expected) Returns: float: yaw offset for this range sensor in degrees """ return self.get_attribute("yawOffset") @yaw_offset.setter def yaw_offset(self, offset): """ Sets this range sensor's yaw_offset (used in cases where this sensor's forward direction is different than expected) Args: offset (float): yaw offset to set in degrees. """ self.set_attribute("yawOffset", offset) @property def rotation_rate(self): """ Gets this range sensor's rotation_rate, in degrees per second. Note that a 0 value corresponds to no rotation, and all range hits are assumed to be received at the exact same time. Returns: float: rotation rate for this range sensor in degrees per second """ return self.get_attribute("rotationRate") @rotation_rate.setter def rotation_rate(self, rate): """ Sets this range sensor's rotation_rate, in degrees per second. Note that a 0 value corresponds to no rotation, and all range hits are assumed to be received at the exact same time. Args: rate (float): rotation rate for this range sensor in degrees per second """ self.set_attribute("rotationRate", rate) @classproperty def all_modalities(cls): return {"scan", "occupancy_grid"} @classproperty def no_noise_modalities(cls): # Occupancy grid should have no noise return {"occupancy_grid"} @property def enabled(self): # Just use super return super().enabled @enabled.setter def enabled(self, enabled): # We must use super and additionally directly en/disable the sensor in the simulation # Note: weird syntax below required to "extend" super class's implementation, see: # https://stackoverflow.com/a/37663266 super(ScanSensor, self.__class__).enabled.fset(self, enabled) self.set_attribute("enabled", enabled)
StanfordVL/OmniGibson/omnigibson/sensors/vision_sensor.py
import numpy as np import time import gym import omnigibson as og import omnigibson.lazy as lazy from omnigibson.sensors.sensor_base import BaseSensor from omnigibson.systems.system_base import REGISTERED_SYSTEMS from omnigibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT, MAX_VIEWER_SIZE, semantic_class_name_to_id, semantic_class_id_to_name from omnigibson.utils.python_utils import assert_valid_key, classproperty from omnigibson.utils.sim_utils import set_carb_setting from omnigibson.utils.ui_utils import dock_window from omnigibson.utils.vision_utils import Remapper # Duplicate of simulator's render method, used so that this can be done before simulator is created! def render(): """ Refreshes the Isaac Sim app rendering components including UI elements and view ports..etc. """ set_carb_setting(og.app._carb_settings, "/app/player/playSimulations", False) og.app.update() set_carb_setting(og.app._carb_settings, "/app/player/playSimulations", True) class VisionSensor(BaseSensor): """ Vision sensor that handles a variety of modalities, including: - RGB (normal) - Depth (normal, linear) - Normals - Segmentation (semantic, instance) - Optical flow - 2D Bounding boxes (tight, loose) - 3D Bounding boxes - Camera state Args: prim_path (str): prim path of the Prim to encapsulate or create. name (str): Name for the object. Names need to be unique per scene. modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds to all modalities being used. Otherwise, valid options should be part of cls.all_modalities. For this vision sensor, this includes any of: {rgb, depth, depth_linear, normal, seg_semantic, seg_instance, flow, bbox_2d_tight, bbox_2d_loose, bbox_3d, camera} enabled (bool): Whether this sensor should be enabled by default noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor. load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for loading this sensor's prim at runtime. image_height (int): Height of generated images, in pixels image_width (int): Width of generated images, in pixels focal_length (float): Focal length to set clipping_range (2-tuple): (min, max) viewing range of this vision sensor viewport_name (None or str): If specified, will link this camera to the specified viewport, overriding its current camera. Otherwise, creates a new viewport """ ALL_MODALITIES = ( "rgb", "depth", "depth_linear", "normal", "seg_semantic", # Semantic segmentation shows the category each pixel belongs to "seg_instance", # Instance segmentation shows the name of the object each pixel belongs to "seg_instance_id", # Instance ID segmentation shows the prim path of the visual mesh each pixel belongs to "flow", "bbox_2d_tight", "bbox_2d_loose", "bbox_3d", "camera_params", ) # Documentation for the different types of segmentation for particle systems: # - Cloth (e.g. `dishtowel`): # - semantic: all shows up under one semantic label (e.g. `"4207839377": "dishtowel"`) # - instance: entire cloth shows up under one label (e.g. `"87": "dishtowel_0"`) # - instance id: entire cloth shows up under one label (e.g. `"31": "/World/dishtowel_0/base_link_cloth"`) # - MicroPhysicalParticleSystem - FluidSystem (e.g. `water`): # - semantic: all shows up under one semantic label (e.g. `"3330677804": "water"`) # - instance: all shows up under one instance label (e.g. `"21": "water"`) # - instance id: all shows up under one instance ID label (e.g. `"36": "water"`) # - MicroPhysicalParticleSystem - GranularSystem (e.g. `sesame seed`): # - semantic: all shows up under one semantic label (e.g. `"2975304485": "sesame_seed"`) # - instance: all shows up under one instance label (e.g. `"21": "sesame_seed"`) # - instance id: all shows up under one instance ID label (e.g. `"36": "sesame_seed"`) # - MacroPhysicalParticleSystem (e.g. `diced__carrot`): # - semantic: all shows up under one semantic label (e.g. `"2419487146": "diced__carrot"`) # - instance: all shows up under one instance label (e.g. `"21": "diced__carrot"`) # - instance id: all shows up under one instance ID label (e.g. `"36": "diced__carrot"`) # - MacroVisualParticleSystem (e.g. `stain`): # - semantic: all shows up under one semantic label (e.g. `"884110082": "stain"`) # - instance: all shows up under one instance label (e.g. `"21": "stain"`) # - instance id: all shows up under one instance ID label (e.g. `"36": "stain"`) # Persistent dictionary of sensors, mapped from prim_path to sensor SENSORS = dict() SEMANTIC_REMAPPER = Remapper() INSTANCE_REMAPPER = Remapper() INSTANCE_ID_REMAPPER = Remapper() INSTANCE_REGISTRY = {0: "background", 1: "unlabelled"} INSTANCE_ID_REGISTRY = {0: "background"} def __init__( self, prim_path, name, modalities="all", enabled=True, noise=None, load_config=None, image_height=128, image_width=128, focal_length=17.0, # Default 17.0 since this is roughly the human eye focal length clipping_range=(0.001, 10000000.0), viewport_name=None, ): # Create load config from inputs load_config = dict() if load_config is None else load_config load_config["image_height"] = image_height load_config["image_width"] = image_width load_config["focal_length"] = focal_length load_config["clipping_range"] = clipping_range load_config["viewport_name"] = viewport_name # Create variables that will be filled in later at runtime self._viewport = None # Viewport from which to grab data self._annotators = None self._render_product = None self._RAW_SENSOR_TYPES = dict( rgb="rgb", depth="distance_to_camera", depth_linear="distance_to_image_plane", normal="normals", # Semantic segmentation shows the category each pixel belongs to seg_semantic="semantic_segmentation", # Instance segmentation shows the name of the object each pixel belongs to seg_instance="instance_segmentation", # Instance ID segmentation shows the prim path of the visual mesh each pixel belongs to seg_instance_id="instance_id_segmentation", flow="motion_vectors", bbox_2d_tight="bounding_box_2d_tight", bbox_2d_loose="bounding_box_2d_loose", bbox_3d="bounding_box_3d", camera_params="camera_params", ) assert {key for key in self._RAW_SENSOR_TYPES.keys() if key != "camera_params"} == set(self.all_modalities), \ "VisionSensor._RAW_SENSOR_TYPES must have the same keys as VisionSensor.all_modalities!" modalities = set([modalities]) if isinstance(modalities, str) else modalities # 1) seg_instance and seg_instance_id require seg_semantic to be enabled (for rendering particle systems) # 2) bounding box observations require seg_semantic to be enabled (for remapping bounding box semantic IDs) semantic_dependent_modalities = {"seg_instance", "seg_instance_id", "bbox_2d_loose", "bbox_2d_tight", "bbox_3d"} # if any of the semantic dependent modalities are enabled, then seg_semantic must be enabled if semantic_dependent_modalities.intersection(modalities) and "seg_semantic" not in modalities: modalities.add("seg_semantic") # Run super method super().__init__( prim_path=prim_path, name=name, modalities=modalities, enabled=enabled, noise=noise, load_config=load_config, ) def _load(self): # Define a new camera prim at the current stage # Note that we can't use og.sim.stage here because the vision sensors get loaded first return lazy.pxr.UsdGeom.Camera.Define(lazy.omni.isaac.core.utils.stage.get_current_stage(), self._prim_path).GetPrim() def _post_load(self): # run super first super()._post_load() # Add this sensor to the list of global sensors self.SENSORS[self._prim_path] = self resolution = (self._load_config["image_width"], self._load_config["image_height"]) self._render_product = lazy.omni.replicator.core.create.render_product(self._prim_path, resolution) # Create a new viewport to link to this camera or link to a pre-existing one viewport_name = self._load_config["viewport_name"] if viewport_name is not None: vp_names_to_handles = {vp.name: vp for vp in lazy.omni.kit.viewport.window.get_viewport_window_instances()} assert_valid_key(key=viewport_name, valid_keys=vp_names_to_handles, name="viewport name") viewport = vp_names_to_handles[viewport_name] else: viewport = lazy.omni.kit.viewport.utility.create_viewport_window() # Take a render step to make sure the viewport is generated before docking it render() # Grab the newly created viewport and dock it to the GUI # The first viewport is always the "main" global camera, and any additional cameras are auxiliary views # These auxiliary views will be stacked in a single column # Thus, the first auxiliary viewport should be generated to the left of the main dockspace, and any # subsequent viewports should be equally spaced according to the number of pre-existing auxiliary views n_auxiliary_sensors = len(self.SENSORS) - 1 if n_auxiliary_sensors == 1: # This is the first auxiliary viewport, dock to the left of the main dockspace dock_window(space=lazy.omni.ui.Workspace.get_window("DockSpace"), name=viewport.name, location=lazy.omni.ui.DockPosition.LEFT, ratio=0.25) elif n_auxiliary_sensors > 1: # This is any additional auxiliary viewports, dock equally-spaced in the auxiliary column # We also need to re-dock any prior viewports! for i in range(2, n_auxiliary_sensors + 1): dock_window(space=lazy.omni.ui.Workspace.get_window(f"Viewport {i - 1}"), name=f"Viewport {i}", location=lazy.omni.ui.DockPosition.BOTTOM, ratio=(1 + n_auxiliary_sensors - i) / (2 + n_auxiliary_sensors - i)) self._viewport = viewport # Link the camera and viewport together self._viewport.viewport_api.set_active_camera(self._prim_path) # Requires 3 render updates to propagate changes for i in range(3): render() # Set the viewer size (requires taking one render step afterwards) self._viewport.viewport_api.set_texture_resolution(resolution) # Also update focal length and clipping range self.focal_length = self._load_config["focal_length"] self.clipping_range = self._load_config["clipping_range"] # Requires 3 render updates to propagate changes for i in range(3): render() def _initialize(self): # Run super first super()._initialize() self._annotators = {modality: None for modality in self._modalities} # Initialize sensors self.initialize_sensors(names=self._modalities) for _ in range(3): render() def initialize_sensors(self, names): """Initializes a raw sensor in the simulation. Args: names (str or list of str): Name of the raw sensor(s) to initialize. If they are not part of self._RAW_SENSOR_TYPES' keys, we will simply pass over them """ names = {names} if isinstance(names, str) else set(names) for name in names: self._add_modality_to_backend(modality=name) def _get_obs(self): # Make sure we're initialized assert self.initialized, "Cannot grab vision observations without first initializing this VisionSensor!" # Run super first to grab any upstream obs obs, info = super()._get_obs() # Reorder modalities to ensure that seg_semantic is always ran before seg_instance or seg_instance_id if "seg_semantic" in self._modalities: reordered_modalities = ["seg_semantic"] + [modality for modality in self._modalities if modality != "seg_semantic"] else: reordered_modalities = self._modalities for modality in reordered_modalities: raw_obs = self._annotators[modality].get_data() # Obs is either a dictionary of {"data":, ..., "info": ...} or a direct array obs[modality] = raw_obs["data"] if isinstance(raw_obs, dict) else raw_obs if modality == "seg_semantic": id_to_labels = raw_obs["info"]["idToLabels"] obs[modality], info[modality] = self._remap_semantic_segmentation(obs[modality], id_to_labels) elif modality == "seg_instance": id_to_labels = raw_obs["info"]["idToLabels"] obs[modality], info[modality] = self._remap_instance_segmentation( obs[modality], id_to_labels, obs["seg_semantic"], info["seg_semantic"], id=False) elif modality == "seg_instance_id": id_to_labels = raw_obs["info"]["idToLabels"] obs[modality], info[modality] = self._remap_instance_segmentation( obs[modality], id_to_labels, obs["seg_semantic"], info["seg_semantic"], id=True) elif "bbox" in modality: obs[modality] = self._remap_bounding_box_semantic_ids(obs[modality]) return obs, info def _remap_semantic_segmentation(self, img, id_to_labels): """ Remap the semantic segmentation image to the class IDs defined in semantic_class_name_to_id(). Also, correct the id_to_labels input with the labels from semantic_class_name_to_id() and return it. Args: img (np.ndarray): Semantic segmentation image to remap id_to_labels (dict): Dictionary of semantic IDs to class labels Returns: np.ndarray: Remapped semantic segmentation image dict: Corrected id_to_labels dictionary """ # Preprocess id_to_labels to feed into the remapper replicator_mapping = {} for key, val in id_to_labels.items(): key = int(key) replicator_mapping[key] = val["class"].lower() if "," in replicator_mapping[key]: # If there are multiple class names, grab the one that is a registered system # This happens with MacroVisual particles, e.g. {"11": {"class": "breakfast_table,stain"}} categories = [cat for cat in replicator_mapping[key].split(",") if cat in REGISTERED_SYSTEMS] assert len(categories) == 1, "There should be exactly one category that belongs to REGISTERED_SYSTEMS" replicator_mapping[key] = categories[0] assert replicator_mapping[key] in semantic_class_id_to_name().values(), f"Class {val['class']} does not exist in the semantic class name to id mapping!" assert set(np.unique(img)).issubset(set(replicator_mapping.keys())), "Semantic segmentation image does not match the original id_to_labels mapping." return VisionSensor.SEMANTIC_REMAPPER.remap(replicator_mapping, semantic_class_id_to_name(), img) def _remap_instance_segmentation(self, img, id_to_labels, semantic_img, semantic_labels, id=False): """ Remap the instance segmentation image to our own instance IDs. Also, correct the id_to_labels input with our new labels and return it. Args: img (np.ndarray): Instance segmentation image to remap id_to_labels (dict): Dictionary of instance IDs to class labels semantic_img (np.ndarray): Semantic segmentation image to use for instance registry semantic_labels (dict): Dictionary of semantic IDs to class labels id (bool): Whether to remap for instance ID segmentation Returns: np.ndarray: Remapped instance segmentation image dict: Corrected id_to_labels dictionary """ # Sometimes 0 and 1 show up in the image, but they are not in the id_to_labels mapping id_to_labels.update({"0": "BACKGROUND"}) if not id: id_to_labels.update({"1": "UNLABELLED"}) # Preprocess id_to_labels and update instance registry replicator_mapping = {} for key, value in id_to_labels.items(): key = int(key) if value in ["BACKGROUND", "UNLABELLED"]: value = value.lower() else: assert "/" in value, f"Instance segmentation (ID) label {value} is not a valid prim path!" prim_name = value.split("/")[-1] # Hacky way to get the particles of MacroVisual/PhysicalParticleSystem # Remap instance segmentation and instance segmentation ID labels to system name if "Particle" in prim_name: category_name = prim_name.split("Particle")[0] assert category_name in REGISTERED_SYSTEMS, f"System name {category_name} is not in the registered systems!" value = category_name else: # Remap instance segmentation labels to object name if not id: # value is the prim path of the object if value == "/World/groundPlane": value = "groundPlane" else: obj = og.sim.scene.object_registry("prim_path", value) # Remap instance segmentation labels from prim path to object name assert obj is not None, f"Object with prim path {value} cannot be found in objct registry!" value = obj.name # Keep the instance segmentation ID labels intact (prim paths of visual meshes) else: pass self._register_instance(value, id=id) replicator_mapping[key] = value # Handle the cases for MicroPhysicalParticleSystem (FluidSystem, GranularSystem). # They show up in the image, but not in the info (id_to_labels). # We identify these values, find the corresponding semantic label (system name), and add the mapping. for key, img_idx in zip(*np.unique(img, return_index=True)): if str(key) not in id_to_labels: semantic_label = semantic_img.flatten()[img_idx] assert semantic_label in semantic_labels, f"Semantic map value {semantic_label} is not in the semantic labels!" category_name = semantic_labels[semantic_label] if category_name in REGISTERED_SYSTEMS: value = category_name self._register_instance(value, id=id) # If the category name is not in the registered systems, # which happens because replicator sometimes returns segmentation map and id_to_labels that are not in sync, # we will label this as "unlabelled" for now else: value = "unlabelled" replicator_mapping[key] = value registry = VisionSensor.INSTANCE_ID_REGISTRY if id else VisionSensor.INSTANCE_REGISTRY remapper = VisionSensor.INSTANCE_ID_REMAPPER if id else VisionSensor.INSTANCE_REMAPPER assert set(np.unique(img)).issubset(set(replicator_mapping.keys())), "Instance segmentation image does not match the original id_to_labels mapping." return remapper.remap(replicator_mapping, registry, img) def _register_instance(self, instance_name, id=False): registry = VisionSensor.INSTANCE_ID_REGISTRY if id else VisionSensor.INSTANCE_REGISTRY if instance_name not in registry.values(): registry[len(registry)] = instance_name def _remap_bounding_box_semantic_ids(self, bboxes): """ Remap the semantic IDs of the bounding boxes to our own semantic IDs. Args: bboxes (list of dict): List of bounding boxes to remap Returns: list of dict: Remapped list of bounding boxes """ for bbox in bboxes: bbox["semanticId"] = VisionSensor.SEMANTIC_REMAPPER.remap_bbox(bbox["semanticId"]) return bboxes def add_modality(self, modality): # Check if we already have this modality (if so, no need to initialize it explicitly) should_initialize = modality not in self._modalities # Run super super().add_modality(modality=modality) # We also need to initialize this new modality if should_initialize: self.initialize_sensors(names=modality) def remove_modality(self, modality): # Check if we don't have this modality (if not, no need to remove it explicitly) should_remove = modality in self._modalities # Run super super().remove_modality(modality=modality) if should_remove: self._remove_modality_from_backend(modality=modality) def _add_modality_to_backend(self, modality): """ Helper function to add specified modality @modality to the omniverse Replicator backend so that its data is generated during get_obs() Args: modality (str): Name of the modality to add to the Replicator backend """ if self._annotators.get(modality, None) is None: self._annotators[modality] = lazy.omni.replicator.core.AnnotatorRegistry.get_annotator(self._RAW_SENSOR_TYPES[modality]) self._annotators[modality].attach([self._render_product]) def _remove_modality_from_backend(self, modality): """ Helper function to remove specified modality @modality from the omniverse Replicator backend so that its data is no longer generated during get_obs() Args: modality (str): Name of the modality to remove from the Replicator backend """ if self._annotators.get(modality, None) is not None: self._annotators[modality].detach([self._render_product]) self._annotators[modality] = None def remove(self): # Remove from global sensors dictionary self.SENSORS.pop(self._prim_path) # Remove viewport self._viewport.destroy() # Run super super().remove() @property def camera_parameters(self): """ Returns a dictionary of keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor. The returned dictionary includes the following keys and their corresponding data types: - "cameraAperture": np.ndarray (float32) - Camera aperture dimensions. - "cameraApertureOffset": np.ndarray (float32) - Offset of the camera aperture. - "cameraFisheyeLensP": np.ndarray (float32) - Fisheye lens P parameter. - "cameraFisheyeLensS": np.ndarray (float32) - Fisheye lens S parameter. - "cameraFisheyeMaxFOV": float - Maximum field of view for fisheye lens. - "cameraFisheyeNominalHeight": int - Nominal height for fisheye lens. - "cameraFisheyeNominalWidth": int - Nominal width for fisheye lens. - "cameraFisheyeOpticalCentre": np.ndarray (float32) - Optical center for fisheye lens. - "cameraFisheyePolynomial": np.ndarray (float32) - Polynomial parameters for fisheye lens distortion. - "cameraFocalLength": float - Focal length of the camera. - "cameraFocusDistance": float - Focus distance of the camera. - "cameraFStop": float - F-stop value of the camera. - "cameraModel": str - Camera model identifier. - "cameraNearFar": np.ndarray (float32) - Near and far plane distances. - "cameraProjection": np.ndarray (float32) - Camera projection matrix. - "cameraViewTransform": np.ndarray (float32) - Camera view transformation matrix. - "metersPerSceneUnit": float - Scale factor from scene units to meters. - "renderProductResolution": np.ndarray (int32) - Resolution of the rendered product. Returns: dict: Keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor. """ # Add the camera params modality if it doesn't already exist if "camera_params" not in self._annotators: self.initialize_sensors(names="camera_params") # Requires 3 render updates for camera params annotator to decome active for _ in range(3): render() # Grab and return the parameters return self._annotators["camera_params"].get_data() @property def viewer_visibility(self): """ Returns: bool: Whether the viewer is visible or not """ return self._viewport.visible @viewer_visibility.setter def viewer_visibility(self, visible): """ Sets whether the viewer should be visible or not in the Omni UI Args: visible (bool): Whether the viewer should be visible or not """ self._viewport.visible = visible # Requires 1 render update to propagate changes render() @property def image_height(self): """ Returns: int: Image height of this sensor, in pixels """ return self._viewport.viewport_api.get_texture_resolution()[1] @image_height.setter def image_height(self, height): """ Sets the image height @height for this sensor Args: height (int): Image height of this sensor, in pixels """ width, _ = self._viewport.viewport_api.get_texture_resolution() self._viewport.viewport_api.set_texture_resolution((width, height)) # Requires 3 updates to propagate changes for i in range(3): render() @property def image_width(self): """ Returns: int: Image width of this sensor, in pixels """ return self._viewport.viewport_api.get_texture_resolution()[0] @image_width.setter def image_width(self, width): """ Sets the image width @width for this sensor Args: width (int): Image width of this sensor, in pixels """ _, height = self._viewport.viewport_api.get_texture_resolution() self._viewport.viewport_api.set_texture_resolution((width, height)) # Requires 3 updates to propagate changes for i in range(3): render() @property def clipping_range(self): """ Returns: 2-tuple: [min, max] value of the sensor's clipping range, in meters """ return np.array(self.get_attribute("clippingRange")) @clipping_range.setter def clipping_range(self, limits): """ Sets the clipping range @limits for this sensor Args: limits (2-tuple): [min, max] value of the sensor's clipping range, in meters """ self.set_attribute(attr="clippingRange", val=lazy.pxr.Gf.Vec2f(*limits)) # In order for sensor changes to propagate, we must toggle its visibility self.visible = False # A single update step has to happen here before we toggle visibility for changes to propagate render() self.visible = True @property def horizontal_aperture(self): """ Returns: float: horizontal aperture of this sensor, in mm """ return self.get_attribute("horizontalAperture") @horizontal_aperture.setter def horizontal_aperture(self, length): """ Sets the focal length @length for this sensor Args: length (float): horizontal aperture of this sensor, in meters """ self.set_attribute("horizontalAperture", length) @property def focal_length(self): """ Returns: float: focal length of this sensor, in mm """ return self.get_attribute("focalLength") @focal_length.setter def focal_length(self, length): """ Sets the focal length @length for this sensor Args: length (float): focal length of this sensor, in mm """ self.set_attribute("focalLength", length) @property def intrinsic_matrix(self): """ Returns: n-array: (3, 3) camera intrinsic matrix. Transforming point p (x,y,z) in the camera frame via K * p will produce p' (x', y', w) - the point in the image plane. To get pixel coordiantes, divide x' and y' by w """ projection_matrix = self.camera_parameters["cameraProjection"] projection_matrix = np.array(projection_matrix).reshape(4, 4) fx = projection_matrix[0, 0] fy = projection_matrix[1, 1] cx = projection_matrix[0, 2] cy = projection_matrix[1, 2] s = projection_matrix[0, 1] # Skew factor intrinsic_matrix = np.array([[fx, s, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]]) return intrinsic_matrix @property def _obs_space_mapping(self): # Generate the complex space types for special modalities: # {"bbox_2d_tight", "bbox_2d_loose", "bbox_3d"} bbox_3d_space = gym.spaces.Sequence(space=gym.spaces.Tuple(( gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_min gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_min gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_min gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_max gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_max gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_max gym.spaces.Box(low=-np.inf, high=np.inf, shape=(4, 4), dtype=np.float32), # transform gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio ))) bbox_2d_space = gym.spaces.Sequence(space=gym.spaces.Tuple(( gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_min gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_min gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_max gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_max gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio ))) obs_space_mapping = dict( rgb=((self.image_height, self.image_width, 4), 0, 255, np.uint8), depth=((self.image_height, self.image_width), 0.0, np.inf, np.float32), depth_linear=((self.image_height, self.image_width), 0.0, np.inf, np.float32), normal=((self.image_height, self.image_width, 4), -1.0, 1.0, np.float32), seg_semantic=((self.image_height, self.image_width), 0, MAX_CLASS_COUNT, np.uint32), seg_instance=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32), seg_instance_id=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32), flow=((self.image_height, self.image_width, 4), -np.inf, np.inf, np.float32), bbox_2d_tight=bbox_2d_space, bbox_2d_loose=bbox_2d_space, bbox_3d=bbox_3d_space, ) return obs_space_mapping @classmethod def clear(cls): """ Clears all cached sensors that have been generated. Should be used when the simulator is completely reset; i.e.: all objects on the stage are destroyed """ for sensor in cls.SENSORS.values(): # Destroy any sensor that is not attached to the main viewport window if sensor._viewport.name != "Viewport": sensor._viewport.destroy() # Render to update render() cls.SENSORS = dict() cls.KNOWN_SEMANTIC_IDS = set() cls.KEY_ARRAY = None cls.INSTANCE_REGISTRY = {0: "background", 1: "unlabelled"} cls.INSTANCE_ID_REGISTRY = {0: "background"} @classproperty def all_modalities(cls): return {modality for modality in cls.ALL_MODALITIES if modality != "camera_params"} @classproperty def no_noise_modalities(cls): # bounding boxes and camera state should not have noise return {"bbox_2d_tight", "bbox_2d_loose", "bbox_3d"}
StanfordVL/OmniGibson/omnigibson/envs/__init__.py
from omnigibson.envs.env_base import Environment from omnigibson.envs.env_wrapper import EnvironmentWrapper, create_wrapper, REGISTERED_ENV_WRAPPERS
StanfordVL/OmniGibson/omnigibson/envs/env_base.py
import gym import numpy as np from copy import deepcopy import omnigibson as og from omnigibson.objects import REGISTERED_OBJECTS from omnigibson.robots import REGISTERED_ROBOTS from omnigibson.scene_graphs.graph_builder import SceneGraphBuilder from omnigibson.simulator import launch_simulator from omnigibson.tasks import REGISTERED_TASKS from omnigibson.scenes import REGISTERED_SCENES from omnigibson.sensors import create_sensor from omnigibson.utils.gym_utils import GymObservable, recursively_generate_flat_dict, recursively_generate_compatible_dict from omnigibson.utils.config_utils import parse_config from omnigibson.utils.ui_utils import create_module_logger from omnigibson.utils.python_utils import assert_valid_key, merge_nested_dicts, create_class_from_registry_and_config,\ Recreatable from omnigibson.macros import gm # Create module logger log = create_module_logger(module_name=__name__) class Environment(gym.Env, GymObservable, Recreatable): """ Core environment class that handles loading scene, robot(s), and task, following OpenAI Gym interface. """ def __init__(self, configs): """ Args: configs (str or dict or list of str or dict): config_file path(s) or raw config dictionaries. If multiple configs are specified, they will be merged sequentially in the order specified. This allows procedural generation of a "full" config from small sub-configs. For valid keys, please see @default_config below """ # Call super first super().__init__() # Launch Isaac Sim launch_simulator() # Initialize other placeholders that will be filled in later self._task = None self._external_sensors = None self._loaded = None self._current_episode = 0 # Variables reset at the beginning of each episode self._current_step = 0 # Convert config file(s) into a single parsed dict configs = configs if isinstance(configs, list) or isinstance(configs, tuple) else [configs] # Initial default config self.config = self.default_config # Merge in specified configs for config in configs: merge_nested_dicts(base_dict=self.config, extra_dict=parse_config(config), inplace=True) # Store settings and other initialized values self._automatic_reset = self.env_config["automatic_reset"] self._flatten_action_space = self.env_config["flatten_action_space"] self._flatten_obs_space = self.env_config["flatten_obs_space"] self.physics_frequency = self.env_config["physics_frequency"] self.action_frequency = self.env_config["action_frequency"] self.device = self.env_config["device"] self._initial_pos_z_offset = self.env_config["initial_pos_z_offset"] # how high to offset object placement to account for one action step of dropping # Create the scene graph builder self._scene_graph_builder = None if "scene_graph" in self.config and self.config["scene_graph"] is not None: self._scene_graph_builder = SceneGraphBuilder(**self.config["scene_graph"]) # Load this environment self.load() def reload(self, configs, overwrite_old=True): """ Reload using another set of config file(s). This allows one to change the configuration and hot-reload the environment on the fly. Args: configs (dict or str or list of dict or list of str): config_file dict(s) or path(s). If multiple configs are specified, they will be merged sequentially in the order specified. This allows procedural generation of a "full" config from small sub-configs. overwrite_old (bool): If True, will overwrite the internal self.config with @configs. Otherwise, will merge in the new config(s) into the pre-existing one. Setting this to False allows for minor modifications to be made without having to specify entire configs during each reload. """ # Convert config file(s) into a single parsed dict configs = [configs] if isinstance(configs, dict) or isinstance(configs, str) else configs # Initial default config new_config = self.default_config # Merge in specified configs for config in configs: merge_nested_dicts(base_dict=new_config, extra_dict=parse_config(config), inplace=True) # Either merge in or overwrite the old config if overwrite_old: self.config = new_config else: merge_nested_dicts(base_dict=self.config, extra_dict=new_config, inplace=True) # Load this environment again self.load() def reload_model(self, scene_model): """ Reload another scene model. This allows one to change the scene on the fly. Args: scene_model (str): new scene model to load (eg.: Rs_int) """ self.scene_config["model"] = scene_model self.load() def _load_variables(self): """ Load variables from config """ # Store additional variables after config has been loaded fully self._initial_pos_z_offset = self.env_config["initial_pos_z_offset"] # Reset bookkeeping variables self._reset_variables() self._current_episode = 0 # Manually set this to 0 since resetting actually increments this # - Potentially overwrite the USD entry for the scene if none is specified and we're online sampling - # Make sure the requested scene is valid scene_type = self.scene_config["type"] assert_valid_key(key=scene_type, valid_keys=REGISTERED_SCENES, name="scene type") # Verify scene and task configs are valid for the given task type REGISTERED_TASKS[self.task_config["type"]].verify_scene_and_task_config( scene_cfg=self.scene_config, task_cfg=self.task_config, ) # - Additionally run some sanity checks on these values - # Check to make sure our z offset is valid -- check that the distance travelled over 1 action timestep is # less than the offset we set (dist = 0.5 * gravity * (t^2)) drop_distance = 0.5 * 9.8 * ((1. / self.action_frequency) ** 2) assert drop_distance < self._initial_pos_z_offset, "initial_pos_z_offset is too small for collision checking" def _load_task(self, task_config=None): """ Load task Args: task_confg (None or dict): If specified, custom task configuration to use. Otherwise, will use self.task_config. Note that if a custom task configuration is specified, the internal task config will be updated as well """ # Update internal config if specified if task_config is not None: # Copy task config, in case self.task_config and task_config are the same! task_config = deepcopy(task_config) self.task_config.clear() self.task_config.update(task_config) # Sanity check task to make sure it's valid task_type = self.task_config["type"] assert_valid_key(key=task_type, valid_keys=REGISTERED_TASKS, name="task type") # Grab the kwargs relevant for the specific task and create the task self._task = create_class_from_registry_and_config( cls_name=self.task_config["type"], cls_registry=REGISTERED_TASKS, cfg=self.task_config, cls_type_descriptor="task", ) assert og.sim.is_stopped(), "Simulator must be stopped before loading tasks!" # Load task. Should load additional task-relevant objects and configure the scene into its default initial state self._task.load(env=self) assert og.sim.is_stopped(), "Simulator must be stopped after loading tasks!" def _load_scene(self): """ Load the scene and robot specified in the config file. """ assert og.sim.is_stopped(), "Simulator must be stopped before loading scene!" # Set the simulator settings # NOTE: This must be done BEFORE the scene is loaded, or else all vision sensors can't retrieve observations og.sim.set_simulation_dt(physics_dt=(1. / self.physics_frequency), rendering_dt=(1. / self.action_frequency)) # Create the scene from our scene config scene = create_class_from_registry_and_config( cls_name=self.scene_config["type"], cls_registry=REGISTERED_SCENES, cfg=self.scene_config, cls_type_descriptor="scene", ) og.sim.import_scene(scene) # Set the rendering settings if gm.RENDER_VIEWER_CAMERA: og.sim.viewer_width = self.render_config["viewer_width"] og.sim.viewer_height = self.render_config["viewer_height"] og.sim.device = self.device assert og.sim.is_stopped(), "Simulator must be stopped after loading scene!" def _load_robots(self): """ Load robots into the scene """ # Only actually load robots if no robot has been imported from the scene loading directly yet if len(self.scene.robots) == 0: assert og.sim.is_stopped(), "Simulator must be stopped before loading robots!" # Iterate over all robots to generate in the robot config for i, robot_config in enumerate(self.robots_config): # Add a name for the robot if necessary if "name" not in robot_config: robot_config["name"] = f"robot{i}" position, orientation = robot_config.pop("position", None), robot_config.pop("orientation", None) # Make sure robot exists, grab its corresponding kwargs, and create / import the robot robot = create_class_from_registry_and_config( cls_name=robot_config["type"], cls_registry=REGISTERED_ROBOTS, cfg=robot_config, cls_type_descriptor="robot", ) # Import the robot into the simulator og.sim.import_object(robot) robot.set_position_orientation(position=position, orientation=orientation) if len(self.robots_config) > 0: # Auto-initialize all robots og.sim.play() self.scene.reset() self.scene.update_initial_state() og.sim.stop() assert og.sim.is_stopped(), "Simulator must be stopped after loading robots!" def _load_objects(self): """ Load any additional custom objects into the scene """ assert og.sim.is_stopped(), "Simulator must be stopped before loading objects!" for i, obj_config in enumerate(self.objects_config): # Add a name for the object if necessary if "name" not in obj_config: obj_config["name"] = f"obj{i}" # Pop the desired position and orientation position, orientation = obj_config.pop("position", None), obj_config.pop("orientation", None) # Make sure robot exists, grab its corresponding kwargs, and create / import the robot obj = create_class_from_registry_and_config( cls_name=obj_config["type"], cls_registry=REGISTERED_OBJECTS, cfg=obj_config, cls_type_descriptor="object", ) # Import the robot into the simulator and set the pose og.sim.import_object(obj) obj.set_position_orientation(position=position, orientation=orientation) if len(self.objects_config) > 0: # Auto-initialize all objects og.sim.play() self.scene.reset() self.scene.update_initial_state() og.sim.stop() assert og.sim.is_stopped(), "Simulator must be stopped after loading objects!" def _load_external_sensors(self): """ Load any additional custom external sensors into the scene """ assert og.sim.is_stopped(), "Simulator must be stopped before loading external sensors!" sensors_config = self.env_config["external_sensors"] if sensors_config is not None: self._external_sensors = dict() for i, sensor_config in enumerate(sensors_config): # Add a name for the object if necessary if "name" not in sensor_config: sensor_config["name"] = f"external_sensor{i}" # Determine prim path if not specified if "prim_path" not in sensor_config: sensor_config["prim_path"] = f"/World/{sensor_config['name']}" # Pop the desired position and orientation local_position, local_orientation = sensor_config.pop("local_position", None), sensor_config.pop("local_orientation", None) # Make sure sensor exists, grab its corresponding kwargs, and create the sensor sensor = create_sensor(**sensor_config) # Load an initialize this sensor sensor.load() sensor.initialize() sensor.set_local_pose(local_position, local_orientation) self._external_sensors[sensor.name] = sensor assert og.sim.is_stopped(), "Simulator must be stopped after loading external sensors!" def _load_observation_space(self): # Grab robot(s) and task obs spaces obs_space = dict() for robot in self.robots: # Load the observation space for the robot obs_space[robot.name] = robot.load_observation_space() # Also load the task obs space obs_space["task"] = self._task.load_observation_space() # Also load any external sensors if self._external_sensors is not None: external_obs_space = dict() for sensor_name, sensor in self._external_sensors.items(): # Load the sensor observation space external_obs_space[sensor_name] = sensor.load_observation_space() obs_space["external"] = gym.spaces.Dict(external_obs_space) return obs_space def load_observation_space(self): # Call super first obs_space = super().load_observation_space() # If we want to flatten it, modify the observation space by recursively searching through all if self._flatten_obs_space: self.observation_space = gym.spaces.Dict(recursively_generate_flat_dict(dic=obs_space)) return self.observation_space def _load_action_space(self): """ Load action space for each robot """ action_space = gym.spaces.Dict({robot.name: robot.action_space for robot in self.robots}) # Convert into flattened 1D Box space if requested if self._flatten_action_space: lows = [] highs = [] for space in action_space.values(): assert isinstance(space, gym.spaces.Box), \ "Can only flatten action space where all individual spaces are gym.space.Box instances!" assert len(space.shape) == 1, \ "Can only flatten action space where all individual spaces are 1D instances!" lows.append(space.low) highs.append(space.high) action_space = gym.spaces.Box(np.concatenate(lows), np.concatenate(highs), dtype=np.float32) # Store action space self.action_space = action_space def load(self): """ Load the scene and robot specified in the config file. """ # This environment is not loaded self._loaded = False # Load config variables self._load_variables() # Load the scene, robots, and task self._load_scene() self._load_robots() self._load_objects() self._load_task() self._load_external_sensors() og.sim.play() self.reset() # Load the obs / action spaces self.load_observation_space() self._load_action_space() # Start the scene graph builder if self._scene_graph_builder: self._scene_graph_builder.start(self.scene) # Denote that the scene is loaded self._loaded = True def update_task(self, task_config): """ Updates the internal task using @task_config. NOTE: This will internally reset the environment as well! Args: task_config (dict): Task configuration for updating the new task """ # Make sure sim is playing assert og.sim.is_playing(), "Update task should occur while sim is playing!" # Denote scene as not loaded yet self._loaded = False og.sim.stop() self._load_task(task_config=task_config) og.sim.play() self.reset() # Load obs / action spaces self.load_observation_space() self._load_action_space() # Scene is now loaded again self._loaded = True def close(self): """ Clean up the environment and shut down the simulation. """ og.shutdown() def get_obs(self): """ Get the current environment observation. Returns: 2-tuple: dict: Keyword-mapped observations, which are possibly nested dict: Additional information about the observations """ obs = dict() info = dict() # Grab all observations from each robot for robot in self.robots: obs[robot.name], info[robot.name] = robot.get_obs() # Add task observations obs["task"] = self._task.get_obs(env=self) # Add external sensor observations if they exist if self._external_sensors is not None: external_obs = dict() external_info = dict() for sensor_name, sensor in self._external_sensors.items(): external_obs[sensor_name], external_info[sensor_name] = sensor.get_obs() obs["external"] = external_obs info["external"] = external_info # Possibly flatten obs if requested if self._flatten_obs_space: obs = recursively_generate_flat_dict(dic=obs) return obs, info def get_scene_graph(self): """ Get the current scene graph. Returns: SceneGraph: Current scene graph """ assert self._scene_graph_builder is not None, "Scene graph builder must be specified in config!" return self._scene_graph_builder.get_scene_graph() def _populate_info(self, info): """ Populate info dictionary with any useful information. Args: info (dict): Information dictionary to populate Returns: dict: Information dictionary with added info """ info["episode_length"] = self._current_step if self._scene_graph_builder is not None: info["scene_graph"] = self.get_scene_graph() def step(self, action): """ Apply robot's action and return the next state, reward, done and info, following OpenAI Gym's convention Args: action (gym.spaces.Dict or dict or np.array): robot actions. If a dict is specified, each entry should map robot name to corresponding action. If a np.array, it should be the flattened, concatenated set of actions Returns: 4-tuple: - dict: state, i.e. next observation - float: reward, i.e. reward at this current timestep - bool: done, i.e. whether this episode is terminated - dict: info, i.e. dictionary with any useful information """ try: # If the action is not a dictionary, convert into a dictionary if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict): action_dict = dict() idx = 0 for robot in self.robots: action_dim = robot.action_dim action_dict[robot.name] = action[idx: idx + action_dim] idx += action_dim else: # Our inputted action is the action dictionary action_dict = action # Iterate over all robots and apply actions for robot in self.robots: robot.apply_action(action_dict[robot.name]) # Run simulation step og.sim.step() # Grab observations obs, obs_info = self.get_obs() # Step the scene graph builder if necessary if self._scene_graph_builder is not None: self._scene_graph_builder.step(self.scene) # Grab reward, done, and info, and populate with internal info reward, done, info = self.task.step(self, action) self._populate_info(info) info["obs_info"] = obs_info if done and self._automatic_reset: # Add lost observation to our information dict, and reset info["last_observation"] = obs obs = self.reset() # Increment step self._current_step += 1 return obs, reward, done, info except: raise ValueError(f"Failed to execute environment step {self._current_step} in episode {self._current_episode}") def _reset_variables(self): """ Reset bookkeeping variables for the next new episode. """ self._current_episode += 1 self._current_step = 0 # TODO: Match super class signature? def reset(self): """ Reset episode. """ # Reset the task self.task.reset(self) # Reset internal variables self._reset_variables() # Run a single simulator step to make sure we can grab updated observations og.sim.step() # Grab and return observations obs, _ = self.get_obs() if self._loaded: # Sanity check to make sure received observations match expected observation space check_obs = recursively_generate_compatible_dict(dic=obs) if not self.observation_space.contains(check_obs): exp_obs = dict() for key, value in recursively_generate_flat_dict(dic=self.observation_space).items(): exp_obs[key] = ("obs_space", key, value.dtype, value.shape) real_obs = dict() for key, value in recursively_generate_flat_dict(dic=check_obs).items(): if isinstance(value, np.ndarray): real_obs[key] = ("obs", key, value.dtype, value.shape) else: real_obs[key] = ("obs", key, type(value), "()") exp_keys = set(exp_obs.keys()) real_keys = set(real_obs.keys()) shared_keys = exp_keys.intersection(real_keys) missing_keys = exp_keys - real_keys extra_keys = real_keys - exp_keys if missing_keys: log.error("MISSING OBSERVATION KEYS:") log.error(missing_keys) if extra_keys: log.error("EXTRA OBSERVATION KEYS:") log.error(extra_keys) mismatched_keys = [] for k in shared_keys: if exp_obs[k][2:] != real_obs[k][2:]: # Compare dtypes and shapes mismatched_keys.append(k) log.error(f"MISMATCHED OBSERVATION FOR KEY '{k}':") log.error(f"Expected: {exp_obs[k]}") log.error(f"Received: {real_obs[k]}") raise ValueError("Observation space does not match returned observations!") return obs @property def episode_steps(self): """ Returns: int: Current number of steps in episode """ return self._current_step @property def initial_pos_z_offset(self): """ Returns: float: how high to offset object placement to test valid pose & account for one action step of dropping """ return self._initial_pos_z_offset @property def task(self): """ Returns: BaseTask: Active task instance """ return self._task @property def scene(self): """ Returns: Scene: Active scene in this environment """ return og.sim.scene @property def robots(self): """ Returns: list of BaseRobot: Robots in the current scene """ return self.scene.robots @property def external_sensors(self): """ Returns: None or dict: If self.env_config["external_sensors"] is specified, returns the dict mapping sensor name to instantiated sensor. Otherwise, returns None """ return self._external_sensors @property def env_config(self): """ Returns: dict: Environment-specific configuration kwargs """ return self.config["env"] @property def render_config(self): """ Returns: dict: Render-specific configuration kwargs """ return self.config["render"] @property def scene_config(self): """ Returns: dict: Scene-specific configuration kwargs """ return self.config["scene"] @property def robots_config(self): """ Returns: dict: Robot-specific configuration kwargs """ return self.config["robots"] @property def objects_config(self): """ Returns: dict: Object-specific configuration kwargs """ return self.config["objects"] @property def task_config(self): """ Returns: dict: Task-specific configuration kwargs """ return self.config["task"] @property def wrapper_config(self): """ Returns: dict: Wrapper-specific configuration kwargs """ return self.config["wrapper"] @property def default_config(self): """ Returns: dict: Default configuration for this environment. May not be fully specified (i.e.: still requires @config to be specified during environment creation) """ return { # Environment kwargs "env": { "action_frequency": 30, "physics_frequency": 120, "device": None, "automatic_reset": False, "flatten_action_space": False, "flatten_obs_space": False, "initial_pos_z_offset": 0.1, "external_sensors": None, }, # Rendering kwargs "render": { "viewer_width": 1280, "viewer_height": 720, }, # Scene kwargs "scene": { # Traversibility map kwargs "waypoint_resolution": 0.2, "num_waypoints": 10, "trav_map_resolution": 0.1, "default_erosion_radius": 0.0, "trav_map_with_objects": True, "scene_instance": None, "scene_file": None, }, # Robot kwargs "robots": [], # no robots by default # Object kwargs "objects": [], # no objects by default # Task kwargs "task": { "type": "DummyTask", }, # Wrapper kwargs "wrapper": { "type": None, }, }
StanfordVL/OmniGibson/omnigibson/envs/env_wrapper.py
from omnigibson.utils.python_utils import Wrapper from omnigibson.utils.python_utils import Registerable, classproperty, create_class_from_registry_and_config from omnigibson.utils.ui_utils import create_module_logger from copy import deepcopy # Global dicts that will contain mappings REGISTERED_ENV_WRAPPERS = dict() # Create module logger log = create_module_logger(module_name=__name__) def create_wrapper(env): """ Wraps environment @env with wrapper defined by env.wrapper_config """ wrapper_cfg = deepcopy(env.wrapper_config) wrapper_type = wrapper_cfg.pop("type") wrapper_cfg["env"] = env return create_class_from_registry_and_config( cls_name=wrapper_type, cls_registry=REGISTERED_ENV_WRAPPERS, cfg=wrapper_cfg, cls_type_descriptor="wrapper", ) class EnvironmentWrapper(Wrapper, Registerable): """ Base class for all environment wrappers in OmniGibson. In general, reset(), step(), and observation_spec() should be overwritten Args: env (OmniGibsonEnv): The environment to wrap. """ def __init__(self, env): self.env = env # Run super super().__init__(obj=env) def step(self, action): """ By default, run the normal environment step() function Args: action (np.array): action to take in environment Returns: 4-tuple: - (dict) observations from the environment - (float) reward from the environment - (bool) whether the current episode is completed or not - (dict) misc information """ return self.env.step(action) def reset(self): """ By default, run the normal environment reset() function Returns: dict: Environment observation space after reset occurs """ return self.env.reset() def observation_spec(self): """ By default, grabs the normal environment observation_spec Returns: dict: Observations from the environment """ return self.env.observation_spec() @classproperty def _do_not_register_classes(cls): # Don't register this class since it's an abstract template classes = super()._do_not_register_classes classes.add("EnvironmentWrapper") return classes @classproperty def _cls_registry(cls): # Global robot registry global REGISTERED_ENV_WRAPPERS return REGISTERED_ENV_WRAPPERS