file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
StanfordVL/OmniGibson/omnigibson/controllers/osc_controller.py | import numpy as np
from numba import jit
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import ControlType, ManipulationController
from omnigibson.utils.control_utils import orientation_error
from omnigibson.utils.processing_utils import MovingAverageFilter
from omnigibson.utils.python_utils import nums2array, assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Different modes
OSC_MODE_COMMAND_DIMS = {
"absolute_pose": 6, # 6DOF (x,y,z,ax,ay,az) control of pose, whether both position and orientation is given in absolute coordinates
"pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
"pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose
"position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
"position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
}
OSC_MODES = set(OSC_MODE_COMMAND_DIMS.keys())
class OperationalSpaceController(ManipulationController):
"""
Controller class to convert (delta or absolute) EEF commands into joint efforts using Operational Space Control
This controller expects 6DOF delta commands (dx, dy, dz, dax, day, daz), where the delta orientation
commands are in axis-angle form, and outputs low-level torque commands.
Gains may also be considered part of the action space as well. In this case, the action space would be:
(
dx, dy, dz, dax, day, daz <-- 6DOF delta eef commands
[, kpx, kpy, kpz, kpax, kpay, kpaz] <-- kp gains
[, drx dry, drz, drax, dray, draz] <-- damping ratio gains
[, kpnx, kpny, kpnz, kpnax, kpnay, kpnaz] <-- kp null gains
)
Note that in this case, we ASSUME that the inputted gains are normalized to be in the range [-1, 1], and will
be mapped appropriately to their respective ranges, as defined by XX_limits
Alternatively, parameters (in this case, kp or damping_ratio) can either be set during initialization or provided
from an external source; if the latter, the control_dict should include the respective parameter(s) as
a part of its keys
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Run OSC to back out joint efforts for a desired task frame command
3. Clips the resulting command by the motor (effort) limits
"""
def __init__(
self,
task_name,
control_freq,
reset_joint_pos,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)),
kp=150.0,
kp_limits=(10.0, 300.),
damping_ratio=1.0,
damping_ratio_limits=(0.0, 2.0),
kp_null=10.0,
kp_null_limits=(0.0, 50.0),
mode="pose_delta_ori",
decouple_pos_ori=False,
workspace_pose_limiter=None,
):
"""
Args:
task_name (str): name assigned to this task frame for computing OSC control. During control calculations,
the inputted control_dict should include entries named <@task_name>_pos_relative and
<@task_name>_quat_relative. See self._command_to_control() for what these values should entail.
control_freq (int): controller loop frequency
reset_joint_pos (Array[float]): reset joint positions, used as part of nullspace controller in IK.
Note that this should correspond to ALL the joints; the exact indices will be extracted via @dof_idx
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
kp (None, int, float, or array): Gain values to apply to 6DOF error.
If None, will be variable (part of action space)
kp_limits (2-array): (min, max) values of kp
damping_ratio (None, int, float, or array): Damping ratio to apply to 6DOF error controller gain
If None, will be variable (part of action space)
damping_ratio_limits (2-array): (min, max) values of damping ratio
kp_null (None, int, float, or array): Gain applied when calculating null torques
If None, will be variable (part of action space)
kp_null_limits (2-array): (min, max) values of kp_null
mode (str): mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz)
cartesian values, relative to the robot base frame. Valid options are:
- "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where the orientation is given in absolute axis-angle coordinates
- "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose
- "position_fixed_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands being kept as fixed initial absolute orientation
- "position_compliant_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands automatically being sent as 0s (so can drift over time)
decouple_pos_ori (bool): Whether to decouple position and orientation control or not
workspace_pose_limiter (None or function): if specified, callback method that should clip absolute
target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace
range (i.e.: this can be unique to each robot, and implemented by each embodiment).
Function signature should be:
def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]]
where target_pos is (x,y,z) cartesian position values, target_quat is (x,y,z,w) quarternion orientation
values, and the returned tuple is the processed (pos, quat) command.
"""
# Store arguments
control_dim = len(dof_idx)
# Store gains
self.kp = nums2array(nums=kp, dim=6, dtype=np.float32) if kp is not None else None
self.damping_ratio = damping_ratio
self.kp_null = nums2array(nums=kp_null, dim=control_dim, dtype=np.float32) if kp_null is not None else None
self.kd_null = 2 * np.sqrt(self.kp_null) if kp_null is not None else None # critically damped
self.kp_limits = np.array(kp_limits, dtype=np.float32)
self.damping_ratio_limits = np.array(damping_ratio_limits, dtype=np.float32)
self.kp_null_limits = np.array(kp_null_limits, dtype=np.float32)
# Store settings for whether we're learning gains or not
self.variable_kp = self.kp is None
self.variable_damping_ratio = self.damping_ratio is None
self.variable_kp_null = self.kp_null is None
# TODO: Add support for variable gains -- for now, just raise an error
assert True not in {self.variable_kp, self.variable_damping_ratio, self.variable_kp_null}, \
"Variable gains with OSC is not supported yet!"
# If the mode is set as absolute orientation and using default config,
# change input and output limits accordingly.
# By default, the input limits are set as 1, so we modify this to have a correct range.
# The output orientation limits are also set to be values assuming delta commands, so those are updated too
assert_valid_key(key=mode, valid_keys=OSC_MODES, name="OSC mode")
self.mode = mode
if self.mode == "pose_absolute_ori":
if command_input_limits is not None:
if command_input_limits == "default":
command_input_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_input_limits[0][3:] = -np.pi
command_input_limits[1][3:] = np.pi
if command_output_limits is not None:
if command_output_limits == "default":
command_output_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_output_limits[0][3:] = -np.pi
command_output_limits[1][3:] = np.pi
is_input_limits_numeric = not (command_input_limits is None or isinstance(command_input_limits, str))
is_output_limits_numeric = not (command_output_limits is None or isinstance(command_output_limits, str))
command_input_limits = [nums2array(lim, dim=6, dtype=np.float32) for lim in command_input_limits] if is_input_limits_numeric else command_input_limits
command_output_limits = [nums2array(lim, dim=6, dtype=np.float32) for lim in command_output_limits] if is_output_limits_numeric else command_output_limits
# Modify input / output scaling based on whether we expect gains to be part of the action space
self._command_dim = OSC_MODE_COMMAND_DIMS[self.mode]
for variable_gain, gain_limits, dim in zip(
(self.variable_kp, self.variable_damping_ratio, self.variable_kp_null),
(self.kp_limits, self.damping_ratio_limits, self.kp_null_limits),
(6, 6, control_dim),
):
if variable_gain:
# Add this to input / output limits
if is_input_limits_numeric:
command_input_limits = [np.concatenate([lim, nums2array(nums=val, dim=dim, dtype=np.float32)]) for lim, val in zip(command_input_limits, (-1, 1))]
if is_output_limits_numeric:
command_output_limits = [np.concatenate([lim, nums2array(nums=val, dim=dim, dtype=np.float32)]) for lim, val in zip(command_output_limits, gain_limits)]
# Update command dim
self._command_dim += dim
# Other values
self.decouple_pos_ori = decouple_pos_ori
self.workspace_pose_limiter = workspace_pose_limiter
self.task_name = task_name
self.reset_joint_pos = reset_joint_pos[dof_idx].astype(np.float32)
# Other variables that will be filled in at runtime
self._fixed_quat_target = None
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Call super first
super().reset()
# Clear internal variables
self._fixed_quat_target = None
self._clear_variable_gains()
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# If self._goal is populated, then set fixed_quat_target as well if the mode uses it
if self.mode == "position_fixed_ori" and self._goal is not None:
self._fixed_quat_target = self._goal["target_quat"]
def _clear_variable_gains(self):
"""
Helper function to clear any gains that are variable and considered part of actions
"""
if self.variable_kp:
self.kp = None
if self.variable_damping_ratio:
self.damping_ratio = None
if self.variable_kp_null:
self.kp_null = None
self.kd_null = None
def _update_variable_gains(self, gains):
"""
Helper function to update any gains that are variable and considered part of actions
Args:
gains (n-array): array where n dim is parsed based on which gains are being learned
"""
idx = 0
if self.variable_kp:
self.kp = gains[:, idx:idx + 6].astype(np.float32)
idx += 6
if self.variable_damping_ratio:
self.damping_ratio = gains[:, idx:idx + 6].astype(np.float32)
idx += 6
if self.variable_kp_null:
self.kp_null = gains[:, idx:idx + self.control_dim].astype(np.float32)
self.kd_null = 2 * np.sqrt(self.kp_null) # critically damped
idx += self.control_dim
def _update_goal(self, command, control_dict):
"""
Updates the internal goal (ee pos and ee ori mat) based on the inputted delta command
Args:
command (n-array): Preprocessed command
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_lin_vel_relative: (x,y,z) relative linear velocity of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_ang_vel_relative: (ax, ay, az) relative angular velocity of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
"""
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
target_pos = command[:3]
else:
dpos = command[:3]
target_pos = pos_relative + dpos
# Compute orientation
if self.mode == "position_fixed_ori":
# We need to grab the current robot orientation as the commanded orientation if there is none saved
if self._fixed_quat_target is None:
self._fixed_quat_target = quat_relative.astype(np.float32) \
if (self._goal is None) else self._goal["target_quat"]
target_quat = self._fixed_quat_target
elif self.mode == "position_compliant_ori":
# Target quat is simply the current robot orientation
target_quat = quat_relative
elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose":
# Received "delta" ori is in fact the desired absolute orientation
target_quat = T.axisangle2quat(command[3:6])
else: # pose_delta_ori control
# Grab dori and compute target ori
dori = T.quat2mat(T.axisangle2quat(command[3:6]))
target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative))
# Possibly limit to workspace if specified
if self.workspace_pose_limiter is not None:
target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict)
gains = None # TODO! command[OSC_MODE_COMMAND_DIMS[self.mode]:]
if gains is not None:
self._update_variable_gains(gains=gains)
# Set goals and return
return dict(
target_pos=target_pos.astype(np.float32),
target_ori_mat=T.quat2mat(target_quat).astype(np.float32),
)
def compute_control(self, goal_dict, control_dict):
"""
Computes low-level torque controls using internal eef goal pos / ori.
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target_pos: robot-frame (x,y,z) desired end effector position
target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
mass_matrix: (N_dof, N_dof) Current mass matrix
<@self.task_name>_jacobian_relative: (6, N_dof) Current jacobian matrix for desired task frame
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_lin_vel_relative: (x,y,z) relative linear velocity of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_ang_vel_relative: (ax, ay, az) relative angular velocity of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
control_dict (dict): Dictionary of state tensors including relevant info for controller computation
Returns:
n-array: low-level effort control actions, NOT post-processed
"""
# TODO: Update to possibly grab parameters from dict
# For now, always use internal values
kp = self.kp
damping_ratio = self.damping_ratio
kd = 2 * np.sqrt(kp) * damping_ratio
# Extract relevant values from the control dict
dof_idxs_mat = tuple(np.meshgrid(self.dof_idx, self.dof_idx))
q = control_dict["joint_position"][self.dof_idx]
qd = control_dict["joint_velocity"][self.dof_idx]
mm = control_dict["mass_matrix"][dof_idxs_mat]
j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx]
ee_pos = control_dict[f"{self.task_name}_pos_relative"]
ee_quat = control_dict[f"{self.task_name}_quat_relative"]
ee_vel = np.concatenate([control_dict[f"{self.task_name}_lin_vel_relative"], control_dict[f"{self.task_name}_ang_vel_relative"]])
base_lin_vel = control_dict["root_rel_lin_vel"]
base_ang_vel = control_dict["root_rel_ang_vel"]
# Calculate torques
u = _compute_osc_torques(
q=q,
qd=qd,
mm=mm,
j_eef=j_eef,
ee_pos=ee_pos.astype(np.float32),
ee_mat=T.quat2mat(ee_quat).astype(np.float32),
ee_vel=ee_vel.astype(np.float32),
goal_pos=goal_dict["target_pos"],
goal_ori_mat=goal_dict["target_ori_mat"],
kp=kp,
kd=kd,
kp_null=self.kp_null,
kd_null=self.kd_null,
rest_qpos=self.reset_joint_pos,
control_dim=self.control_dim,
decouple_pos_ori=self.decouple_pos_ori,
base_lin_vel=base_lin_vel.astype(np.float32),
base_ang_vel=base_ang_vel.astype(np.float32),
).flatten()
# Apply gravity compensation from the control dict
u += control_dict["gravity_force"][self.dof_idx] + control_dict["cc_force"][self.dof_idx]
# Return the control torques
return u
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
target_pos = np.array(control_dict[f"{self.task_name}_pos_relative"])
target_quat = np.array(control_dict[f"{self.task_name}_quat_relative"])
# Convert quat into eef ori mat
return dict(
target_pos=target_pos.astype(np.float32),
target_ori_mat=T.quat2mat(target_quat).astype(np.float32),
)
def _get_goal_shapes(self):
return dict(
target_pos=(3,),
target_ori_mat=(3, 3),
)
@property
def control_type(self):
return ControlType.EFFORT
@property
def command_dim(self):
return self._command_dim
# Use numba since faster
@jit(nopython=True)
def _compute_osc_torques(
q,
qd,
mm,
j_eef,
ee_pos,
ee_mat,
ee_vel,
goal_pos,
goal_ori_mat,
kp,
kd,
kp_null,
kd_null,
rest_qpos,
control_dim,
decouple_pos_ori,
base_lin_vel,
base_ang_vel,
):
# Compute the inverse
mm_inv = np.linalg.inv(mm)
# Calculate error
pos_err = goal_pos - ee_pos
ori_err = orientation_error(goal_ori_mat, ee_mat).astype(np.float32)
err = np.concatenate((pos_err, ori_err))
# Vel target is the base velocity as experienced by the end effector
# For angular velocity, this is just the base angular velocity
# For linear velocity, this is the base linear velocity PLUS the net linear velocity experienced
# due to the base linear velocity
lin_vel_err = base_lin_vel + np.cross(base_ang_vel, ee_pos)
vel_err = np.concatenate((lin_vel_err, base_ang_vel)) - ee_vel
# Determine desired wrench
err = np.expand_dims(kp * err + kd * vel_err, axis=-1)
m_eef_inv = j_eef @ mm_inv @ j_eef.T
m_eef = np.linalg.inv(m_eef_inv)
if decouple_pos_ori:
# # More efficient, but numba doesn't support 3D tensor operations yet
# j_eef_batch = j_eef.reshape(2, 3, -1)
# m_eef_pose_inv = np.matmul(np.matmul(j_eef_batch, np.expand_dims(mm_inv, axis=0)), np.transpose(j_eef_batch, (0, 2, 1)))
# m_eef_pose = np.linalg.inv(m_eef_pose_inv) # Shape (2, 3, 3)
# wrench = np.matmul(m_eef_pose, err.reshape(2, 3, 1)).flatten()
m_eef_pos_inv = j_eef[:3, :] @ mm_inv @ j_eef[:3, :].T
m_eef_ori_inv = j_eef[3:, :] @ mm_inv @ j_eef[3:, :].T
m_eef_pos = np.linalg.inv(m_eef_pos_inv)
m_eef_ori = np.linalg.inv(m_eef_ori_inv)
wrench_pos = m_eef_pos @ err[:3, :]
wrench_ori = m_eef_ori @ err[3:, :]
wrench = np.concatenate((wrench_pos, wrench_ori))
else:
wrench = m_eef @ err
# Compute OSC torques
u = j_eef.T @ wrench
# Nullspace control torques `u_null` prevents large changes in joint configuration
# They are added into the nullspace of OSC so that the end effector orientation remains constant
# roboticsproceedings.org/rss07/p31.pdf
if rest_qpos is not None:
j_eef_inv = m_eef @ j_eef @ mm_inv
u_null = kd_null * -qd + kp_null * ((rest_qpos - q + np.pi) % (2 * np.pi) - np.pi)
u_null = mm @ np.expand_dims(u_null, axis=-1).astype(np.float32)
u += (np.eye(control_dim, dtype=np.float32) - j_eef.T @ j_eef_inv) @ u_null
return u
| 25,020 | Python | 49.649797 | 172 | 0.611791 |
StanfordVL/OmniGibson/omnigibson/controllers/null_joint_controller.py | import numpy as np
from omnigibson.controllers import JointController
class NullJointController(JointController):
"""
Dummy Controller class for a null-type of joint control (i.e.: no control or constant pass-through control).
This class has a zero-size command space, and returns either an empty array for control if dof_idx is None
else constant values as specified by @default_command (if not specified, uses zeros)
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
default_command=None,
kp=None,
damping_ratio=None,
use_impedances=False,
):
"""
Args:
control_freq (int): controller loop frequency
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
default_command (None or n-array): if specified, should be the same length as @dof_idx, specifying
the default control for this controller to output
kp (None or float): If @motor_type is "position" or "velocity" and @use_impedances=True, this is the
proportional gain applied to the joint controller. If None, a default value will be used.
damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the
damping ratio applied to the joint controller. If None, a default value will be used.
use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts
applied
"""
# Store values
self._default_command = np.zeros(len(dof_idx)) if default_command is None else np.array(default_command)
# Run super init
super().__init__(
control_freq=control_freq,
motor_type=motor_type,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
kp=kp,
damping_ratio=damping_ratio,
use_impedances=use_impedances,
use_delta_commands=False,
)
def compute_no_op_goal(self, control_dict):
# Set the goal to be internal stored default value
return dict(target=self._default_command)
def _preprocess_command(self, command):
# Override super and force the processed command to be internal stored default value
return np.array(self._default_command)
def update_default_goal(self, target):
"""
Updates the internal default command value.
Args:
target (n-array): New default command values to set for this controller.
Should be of dimension @command_dim
"""
assert len(target) == self.control_dim, \
f"Default control must be length: {self.control_dim}, got length: {len(target)}"
self._default_command = np.array(target)
| 4,495 | Python | 46.829787 | 116 | 0.625362 |
StanfordVL/OmniGibson/omnigibson/controllers/__init__.py | from omnigibson.controllers.controller_base import (
REGISTERED_CONTROLLERS,
REGISTERED_LOCOMOTION_CONTROLLERS,
REGISTERED_MANIPULATION_CONTROLLERS,
IsGraspingState,
ControlType,
LocomotionController,
ManipulationController,
GripperController,
)
from omnigibson.controllers.dd_controller import DifferentialDriveController
from omnigibson.controllers.osc_controller import OperationalSpaceController
from omnigibson.controllers.ik_controller import InverseKinematicsController
from omnigibson.controllers.joint_controller import JointController
from omnigibson.controllers.multi_finger_gripper_controller import MultiFingerGripperController
from omnigibson.controllers.null_joint_controller import NullJointController
from omnigibson.utils.python_utils import assert_valid_key
def create_controller(name, **kwargs):
"""
Creates a controller of type @name with corresponding necessary keyword arguments @kwargs
Args:
name (str): type of controller to use (e.g. JointController, InverseKinematicsController, etc.)
**kwargs: Any relevant keyword arguments to pass to the controller
Returns:
Controller: created controller
"""
assert_valid_key(key=name, valid_keys=REGISTERED_CONTROLLERS, name="controller")
controller_cls = REGISTERED_CONTROLLERS[name]
return controller_cls(**kwargs)
| 1,375 | Python | 38.314285 | 103 | 0.794182 |
StanfordVL/OmniGibson/omnigibson/controllers/dd_controller.py | import numpy as np
from omnigibson.controllers import ControlType, LocomotionController
class DifferentialDriveController(LocomotionController):
"""
Differential drive (DD) controller for controlling two independently controlled wheeled joints.
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Convert desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control signals
3. Clips the resulting command by the joint velocity limits
"""
def __init__(
self,
wheel_radius,
wheel_axle_length,
control_freq,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
):
"""
Args:
wheel_radius (float): radius of the wheels (both assumed to be same radius)
wheel_axle_length (float): perpendicular distance between the two wheels
control_freq (int): controller loop frequency
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the maximum linear and angular velocities calculated from @wheel_radius, @wheel_axle_length, and
@control_limits velocity limits entry
"""
# Store internal variables
self._wheel_radius = wheel_radius
self._wheel_axle_halflength = wheel_axle_length / 2.0
# If we're using default command output limits, map this to maximum linear / angular velocities
if command_output_limits == "default":
min_vels = control_limits["velocity"][0][dof_idx]
assert (
min_vels[0] == min_vels[1]
), "Differential drive requires both wheel joints to have same min velocities!"
max_vels = control_limits["velocity"][1][dof_idx]
assert (
max_vels[0] == max_vels[1]
), "Differential drive requires both wheel joints to have same max velocities!"
assert abs(min_vels[0]) == abs(
max_vels[0]
), "Differential drive requires both wheel joints to have same min and max absolute velocities!"
max_lin_vel = max_vels[0] * wheel_radius
max_ang_vel = max_lin_vel * 2.0 / wheel_axle_length
command_output_limits = ((-max_lin_vel, -max_ang_vel), (max_lin_vel, max_ang_vel))
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def _update_goal(self, command, control_dict):
# Directly store command as the velocity goal
return dict(vel=command)
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
This processes converts the desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control
signals.
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
vel: desired (lin_vel, ang_vel) of the controlled body
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
Returns:
Array[float]: outputted (non-clipped!) velocity control signal to deploy
to the [left, right] wheel joints
"""
lin_vel, ang_vel = goal_dict["vel"]
# Convert to wheel velocities
left_wheel_joint_vel = (lin_vel - ang_vel * self._wheel_axle_halflength) / self._wheel_radius
right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius
# Return desired velocities
return np.array([left_wheel_joint_vel, right_wheel_joint_vel])
def compute_no_op_goal(self, control_dict):
# This is zero-vector, since we want zero linear / angular velocity
return dict(vel=np.zeros(2))
def _get_goal_shapes(self):
# Add (2, )-array representing linear, angular velocity
return dict(vel=(2,))
@property
def control_type(self):
return ControlType.VELOCITY
@property
def command_dim(self):
# [lin_vel, ang_vel]
return 2
| 6,042 | Python | 46.210937 | 118 | 0.625455 |
StanfordVL/OmniGibson/omnigibson/controllers/multi_finger_gripper_controller.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.controllers import IsGraspingState, ControlType, GripperController
from omnigibson.utils.python_utils import assert_valid_key
VALID_MODES = {
"binary",
"smooth",
"independent",
}
# Create settings for this module
m = create_module_macros(module_path=__file__)
# is_grasping heuristics parameters
m.POS_TOLERANCE = 0.002 # arbitrary heuristic
m.VEL_TOLERANCE = 0.01 # arbitrary heuristic
class MultiFingerGripperController(GripperController):
"""
Controller class for multi finger gripper control. This either interprets an input as a binary
command (open / close), continuous command (open / close with scaled velocities), or per-joint continuous command
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2a. Convert command into gripper joint control signals
2b. Clips the resulting control by the motor limits
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
inverted=False,
mode="binary",
open_qpos=None,
closed_qpos=None,
limit_tolerance=0.001,
):
"""
Args:
control_freq (int): controller loop frequency
motor_type (str): type of motor being controlled, one of {position, velocity, effort}
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
inverted (bool): whether or not the command direction (grasp is negative) and the control direction are
inverted, e.g. to grasp you need to move the joint in the positive direction.
mode (str): mode for this controller. Valid options are:
"binary": 1D command, if preprocessed value > 0 is interpreted as an max open
(send max pos / vel / tor signal), otherwise send max close control signals
"smooth": 1D command, sends symmetric signal to both finger joints equal to the preprocessed commands
"independent": 2D command, sends independent signals to each finger joint equal to the preprocessed command
open_qpos (None or Array[float]): If specified, the joint positions representing a fully-opened gripper.
This is to allow representing the open state as a partially opened gripper, rather than the full
opened gripper. If None, will simply use the native joint limits of the gripper joints. Only relevant
if using @mode=binary and @motor_type=position
closed_qpos (None or Array[float]): If specified, the joint positions representing a fully-closed gripper.
This is to allow representing the closed state as a partially closed gripper, rather than the full
closed gripper. If None, will simply use the native joint limits of the gripper joints. Only relevant
if using @mode=binary and @motor_type=position
limit_tolerance (float): sets the tolerance from the joint limit ends, below which controls will be zeroed
out if the control is using velocity or torque control
"""
# Store arguments
assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
self._motor_type = motor_type.lower()
assert_valid_key(key=mode, valid_keys=VALID_MODES, name="mode for multi finger gripper")
self._inverted = inverted
self._mode = mode
self._limit_tolerance = limit_tolerance
self._open_qpos = open_qpos if open_qpos is None else np.array(open_qpos)
self._closed_qpos = closed_qpos if closed_qpos is None else np.array(closed_qpos)
# Create other args to be filled in at runtime
self._is_grasping = IsGraspingState.FALSE
# If we're using binary signal, we override the command output limits
if mode == "binary":
command_output_limits = (-1.0, 1.0)
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Call super first
super().reset()
# reset grasping state
self._is_grasping = IsGraspingState.FALSE
def _preprocess_command(self, command):
# We extend this method to make sure command is always 2D
if self._mode != "independent":
command = (
np.array([command] * self.command_dim)
if type(command) in {int, float}
else np.array([command[0]] * self.command_dim)
)
# Flip the command if the direction is inverted.
if self._inverted:
command = self._command_input_limits[1] - (command - self._command_input_limits[0])
# Return from super method
return super()._preprocess_command(command=command)
def _update_goal(self, command, control_dict):
# Directly store command as the goal
return dict(target=command)
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper
joint control signal
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target: desired gripper target
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
Returns:
Array[float]: outputted (non-clipped!) control signal to deploy
"""
target = goal_dict["target"]
joint_pos = control_dict["joint_position"][self.dof_idx]
# Choose what to do based on control mode
if self._mode == "binary":
# Use max control signal
if target[0] >= 0.0:
u = self._control_limits[ControlType.get_type(self._motor_type)][1][self.dof_idx] \
if self._open_qpos is None else self._open_qpos
else:
u = self._control_limits[ControlType.get_type(self._motor_type)][0][self.dof_idx] \
if self._closed_qpos is None else self._closed_qpos
else:
# Use continuous signal
u = target
# If we're near the joint limits and we're using velocity / torque control, we zero out the action
if self._motor_type in {"velocity", "torque"}:
violate_upper_limit = (
joint_pos > self._control_limits[ControlType.POSITION][1][self.dof_idx] - self._limit_tolerance
)
violate_lower_limit = (
joint_pos < self._control_limits[ControlType.POSITION][0][self.dof_idx] + self._limit_tolerance
)
violation = np.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0))
u *= ~violation
# Update whether we're grasping or not
self._update_grasping_state(control_dict=control_dict)
# Return control
return u
def _update_grasping_state(self, control_dict):
"""
Updates internal inferred grasping state of the gripper being controlled by this gripper controller
Args:
control_dict (dict): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
"""
# Calculate grasping state based on mode of this controller
# Independent mode of MultiFingerGripperController does not have any good heuristics to determine is_grasping
if self._mode == "independent":
is_grasping = IsGraspingState.UNKNOWN
# No control has been issued before -- we assume not grasping
elif self._control is None:
is_grasping = IsGraspingState.FALSE
else:
assert np.all(
self._control == self._control[0]
), f"MultiFingerGripperController has different values in the command for non-independent mode: {self._control}"
assert m.POS_TOLERANCE > self._limit_tolerance, (
"Joint position tolerance for is_grasping heuristics checking is smaller than or equal to the "
"gripper controller's tolerance of zero-ing out velocities, which makes the heuristics invalid."
)
finger_pos = control_dict["joint_position"][self.dof_idx]
# For joint position control, if the desired positions are the same as the current positions, is_grasping unknown
if (
self._motor_type == "position"
and np.mean(np.abs(finger_pos - self._control)) < m.POS_TOLERANCE
):
is_grasping = IsGraspingState.UNKNOWN
# For joint velocity / torque control, if the desired velocities / torques are zeros, is_grasping unknown
elif (
self._motor_type in {"velocity", "torque"}
and np.mean(np.abs(self._control)) < m.VEL_TOLERANCE
):
is_grasping = IsGraspingState.UNKNOWN
# Otherwise, the last control signal intends to "move" the gripper
else:
finger_vel = control_dict["joint_velocity"][self.dof_idx]
min_pos = self._control_limits[ControlType.POSITION][0][self.dof_idx]
max_pos = self._control_limits[ControlType.POSITION][1][self.dof_idx]
# Make sure we don't have any invalid values (i.e.: fingers should be within the limits)
assert np.all(
(min_pos <= finger_pos) * (finger_pos <= max_pos)
), f"Got invalid finger joint positions when checking for grasp! " \
f"min: {min_pos}, max: {max_pos}, finger_pos: {finger_pos}"
# Check distance from both ends of the joint limits
dist_from_lower_limit = finger_pos - min_pos
dist_from_upper_limit = max_pos - finger_pos
# If the joint positions are not near the joint limits with some tolerance (m.POS_TOLERANCE)
valid_grasp_pos = (
np.mean(dist_from_lower_limit) > m.POS_TOLERANCE
and np.mean(dist_from_upper_limit) > m.POS_TOLERANCE
)
# And the joint velocities are close to zero with some tolerance (m.VEL_TOLERANCE)
valid_grasp_vel = np.all(np.abs(finger_vel) < m.VEL_TOLERANCE)
# Then the gripper is grasping something, which stops the gripper from reaching its desired state
is_grasping = (
IsGraspingState.TRUE if valid_grasp_pos and valid_grasp_vel else IsGraspingState.FALSE
)
# Store calculated state
self._is_grasping = is_grasping
def compute_no_op_goal(self, control_dict):
# Just use a zero vector
return dict(target=np.zeros(self.command_dim))
def _get_goal_shapes(self):
return dict(target=(self.command_dim,))
def is_grasping(self):
# Return cached value
return self._is_grasping
@property
def control_type(self):
return ControlType.get_type(type_str=self._motor_type)
@property
def command_dim(self):
return len(self.dof_idx) if self._mode == "independent" else 1
| 13,703 | Python | 46.255172 | 125 | 0.619353 |
StanfordVL/OmniGibson/omnigibson/controllers/controller_base.py | from collections.abc import Iterable
from enum import IntEnum
import numpy as np
from omnigibson.utils.python_utils import classproperty, assert_valid_key, Serializable, Registerable, Recreatable
# Global dicts that will contain mappings
REGISTERED_CONTROLLERS = dict()
REGISTERED_LOCOMOTION_CONTROLLERS = dict()
REGISTERED_MANIPULATION_CONTROLLERS = dict()
REGISTERED_GRIPPER_CONTROLLERS = dict()
def register_locomotion_controller(cls):
if cls.__name__ not in REGISTERED_LOCOMOTION_CONTROLLERS:
REGISTERED_LOCOMOTION_CONTROLLERS[cls.__name__] = cls
def register_manipulation_controller(cls):
if cls.__name__ not in REGISTERED_MANIPULATION_CONTROLLERS:
REGISTERED_MANIPULATION_CONTROLLERS[cls.__name__] = cls
def register_gripper_controller(cls):
if cls.__name__ not in REGISTERED_GRIPPER_CONTROLLERS:
REGISTERED_GRIPPER_CONTROLLERS[cls.__name__] = cls
class IsGraspingState(IntEnum):
TRUE = 1
UNKNOWN = 0
FALSE = -1
# Define macros
class ControlType:
NONE = -1
POSITION = 0
VELOCITY = 1
EFFORT = 2
_MAPPING = {
"none": NONE,
"position": POSITION,
"velocity": VELOCITY,
"effort": EFFORT,
}
VALID_TYPES = set(_MAPPING.values())
VALID_TYPES_STR = set(_MAPPING.keys())
@classmethod
def get_type(cls, type_str):
"""
Args:
type_str (str): One of "position", "velocity", or "effort" (any case), and maps it
to the corresponding type
Returns:
ControlType: control type corresponding to the associated string
"""
assert_valid_key(key=type_str.lower(), valid_keys=cls._MAPPING, name="control type")
return cls._MAPPING[type_str.lower()]
class BaseController(Serializable, Registerable, Recreatable):
"""
An abstract class with interface for mapping specific types of commands to deployable control signals.
"""
def __init__(
self,
control_freq,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
):
"""
Args:
control_freq (int): controller loop frequency
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
"""
# Store arguments
self._control_freq = control_freq
self._control_limits = {}
for motor_type in {"position", "velocity", "effort"}:
if motor_type not in control_limits:
continue
self._control_limits[ControlType.get_type(motor_type)] = [
np.array(control_limits[motor_type][0]),
np.array(control_limits[motor_type][1]),
]
assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
self._dof_has_limits = control_limits["has_limit"]
self._dof_idx = np.array(dof_idx, dtype=int)
# Generate goal information
self._goal_shapes = self._get_goal_shapes()
self._goal_dim = int(np.sum([np.product(shape) for shape in self._goal_shapes.values()]))
# Initialize some other variables that will be filled in during runtime
self._control = None
self._goal = None
self._command_scale_factor = None
self._command_output_transform = None
self._command_input_transform = None
# Standardize command input / output limits to be (min_array, max_array)
command_input_limits = (-1.0, 1.0) if command_input_limits == "default" else command_input_limits
command_output_limits = (
(
np.array(self._control_limits[self.control_type][0])[self.dof_idx],
np.array(self._control_limits[self.control_type][1])[self.dof_idx],
)
if command_output_limits == "default"
else command_output_limits
)
self._command_input_limits = (
None
if command_input_limits is None
else (
self.nums2array(command_input_limits[0], self.command_dim),
self.nums2array(command_input_limits[1], self.command_dim),
)
)
self._command_output_limits = (
None
if command_output_limits is None
else (
self.nums2array(command_output_limits[0], self.command_dim),
self.nums2array(command_output_limits[1], self.command_dim),
)
)
def _preprocess_command(self, command):
"""
Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits.
If self.command_input_limits is None, then no clipping will occur. If either self.command_input_limits
or self.command_output_limits is None, then no scaling will occur.
Args:
command (Array[float] or float): Inputted command vector
Returns:
Array[float]: Processed command vector
"""
# Make sure command is a np.array
command = np.array([command]) if type(command) in {int, float} else np.array(command)
# We only clip and / or scale if self.command_input_limits exists
if self._command_input_limits is not None:
# Clip
command = command.clip(*self._command_input_limits)
if self._command_output_limits is not None:
# If we haven't calculated how to scale the command, do that now (once)
if self._command_scale_factor is None:
self._command_scale_factor = abs(
self._command_output_limits[1] - self._command_output_limits[0]
) / abs(self._command_input_limits[1] - self._command_input_limits[0])
self._command_output_transform = (
self._command_output_limits[1] + self._command_output_limits[0]
) / 2.0
self._command_input_transform = (self._command_input_limits[1] + self._command_input_limits[0]) / 2.0
# Scale command
command = (
command - self._command_input_transform
) * self._command_scale_factor + self._command_output_transform
# Return processed command
return command
def update_goal(self, command, control_dict):
"""
Updates inputted @command internally, writing any necessary internal variables as needed.
Args:
command (Array[float]): inputted command to preprocess and extract relevant goal(s) to store
internally in this controller
control_dict (dict): Current state
"""
# Sanity check the command
assert len(command) == self.command_dim, \
f"Commands must be dimension {self.command_dim}, got dim {len(command)} instead."
# Preprocess and run internal command
self._goal = self._update_goal(command=self._preprocess_command(np.array(command)), control_dict=control_dict)
def _update_goal(self, command, control_dict):
"""
Updates inputted @command internally, writing any necessary internal variables as needed.
Args:
command (Array[float]): inputted (preprocessed!) command and extract relevant goal(s) to store
internally in this controller
control_dict (dict): Current state
Returns:
dict: Keyword-mapped goals to store internally in this controller
"""
raise NotImplementedError
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) control signal.
Should be implemented by subclass.
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation
Returns:
Array[float]: outputted (non-clipped!) control signal to deploy
"""
raise NotImplementedError
def clip_control(self, control):
"""
Clips the inputted @control signal based on @control_limits.
Args:
control (Array[float]): control signal to clip
Returns:
Array[float]: Clipped control signal
"""
clipped_control = control.clip(
self._control_limits[self.control_type][0][self.dof_idx],
self._control_limits[self.control_type][1][self.dof_idx],
)
idx = (
self._dof_has_limits[self.dof_idx]
if self.control_type == ControlType.POSITION
else [True] * self.control_dim
)
control[idx] = clipped_control[idx]
return control
def step(self, control_dict):
"""
Take a controller step.
Args:
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation
Returns:
Array[float]: numpy array of outputted control signals
"""
# Generate no-op goal if not specified
if self._goal is None:
self._goal = self.compute_no_op_goal(control_dict=control_dict)
# Compute control, then clip and return
control = self.compute_control(goal_dict=self._goal, control_dict=control_dict)
self._control = self.clip_control(control=control)
return self._control
def reset(self):
"""
Resets this controller. Can be extended by subclass
"""
self._goal = None
def compute_no_op_goal(self, control_dict):
"""
Compute no-op goal given the current state @control_dict
Args:
control_dict (dict): Current state
Returns:
dict: Maps relevant goal keys (from self._goal_shapes.keys()) to relevant goal data to be used
in controller computations
"""
raise NotImplementedError
def _dump_state(self):
# Default is just the command
return dict(
goal_is_valid=self._goal is not None,
goal=self._goal,
)
def _load_state(self, state):
# Make sure every entry in goal is a numpy array
# Load goal
self._goal = None if state["goal"] is None else {name: np.array(goal_state) for name, goal_state in state["goal"].items()}
def _serialize(self, state):
# Make sure size of the state is consistent, even if we have no goal
goal_state_flattened = np.concatenate([goal_state.flatten() for goal_state in self._goal.values()]) if (
state)["goal_is_valid"] else np.zeros(self.goal_dim)
return np.concatenate([
[state["goal_is_valid"]],
goal_state_flattened,
])
def _deserialize(self, state):
goal_is_valid = bool(state[0])
if goal_is_valid:
# Un-flatten all the keys
idx = 1
goal = dict()
for key, shape in self._goal_shapes.items():
length = np.product(shape)
goal[key] = state[idx:idx+length].reshape(shape)
idx += length
else:
goal = None
state_dict = dict(
goal_is_valid=goal_is_valid,
goal=goal,
)
return state_dict, self.goal_dim + 1
def _get_goal_shapes(self):
"""
Returns:
dict: Maps keyword in @self.goal to its corresponding numerical shape. This should be static
and analytically computed prior to any controller steps being taken
"""
raise NotImplementedError
@staticmethod
def nums2array(nums, dim):
"""
Convert input @nums into numpy array of length @dim. If @nums is a single number, broadcasts it to the
corresponding dimension size @dim before converting into a numpy array
Args:
nums (numeric or Iterable): Either single value or array of numbers
dim (int): Size of array to broadcast input to
Returns:
np.array: Array filled with values specified in @nums
"""
# First run sanity check to make sure no strings are being inputted
if isinstance(nums, str):
raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!")
# Check if input is an Iterable, if so, we simply convert the input to np.array and return
# Else, input is a single value, so we map to a numpy array of correct size and return
return np.array(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums
@property
def state_size(self):
# Default is goal dim + 1 (for whether the goal is valid or not)
return self.goal_dim + 1
@property
def goal(self):
"""
Returns:
dict: Current goal for this controller. Maps relevant goal keys to goal values to be
used during controller step computations
"""
return self._goal
@property
def goal_dim(self):
"""
Returns:
int: Expected size of flattened, internal goals
"""
return self._goal_dim
@property
def control(self):
"""
Returns:
n-array: Array of most recent controls deployed by this controller
"""
return self._control
@property
def control_freq(self):
"""
Returns:
float: Control frequency (Hz) of this controller
"""
return self._control_freq
@property
def control_dim(self):
"""
Returns:
int: Expected size of outputted controls
"""
return len(self.dof_idx)
@property
def control_type(self):
"""
Returns:
ControlType: Type of control returned by this controller
"""
raise NotImplementedError
@property
def command_input_limits(self):
"""
Returns:
None or 2-tuple: If specified, returns (min, max) command input limits for this controller, where
@min and @max are numpy float arrays of length self.command_dim. Otherwise, returns None
"""
return self._command_input_limits
@property
def command_output_limits(self):
"""
Returns:
None or 2-tuple: If specified, returns (min, max) command output limits for this controller, where
@min and @max are numpy float arrays of length self.command_dim. Otherwise, returns None
"""
return self._command_output_limits
@property
def command_dim(self):
"""
Returns:
int: Expected size of inputted commands
"""
raise NotImplementedError
@property
def dof_idx(self):
"""
Returns:
Array[int]: DOF indices corresponding to the specific DOFs being controlled by this robot
"""
return np.array(self._dof_idx)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseController")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_CONTROLLERS
return REGISTERED_CONTROLLERS
class LocomotionController(BaseController):
"""
Controller to control locomotion. All implemented controllers that encompass locomotion capabilities should extend
from this class.
"""
def __init_subclass__(cls, **kwargs):
# Register as part of locomotion controllers
super().__init_subclass__(**kwargs)
register_locomotion_controller(cls)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("LocomotionController")
return classes
class ManipulationController(BaseController):
"""
Controller to control manipulation. All implemented controllers that encompass manipulation capabilities
should extend from this class.
"""
def __init_subclass__(cls, **kwargs):
# Register as part of manipulation controllers
super().__init_subclass__(**kwargs)
register_manipulation_controller(cls)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ManipulationController")
return classes
class GripperController(BaseController):
"""
Controller to control a gripper. All implemented controllers that encompass gripper capabilities
should extend from this class.
"""
def __init_subclass__(cls, **kwargs):
# Register as part of gripper controllers
super().__init_subclass__(**kwargs)
register_gripper_controller(cls)
def is_grasping(self):
"""
Checks whether the current state of this gripper being controlled is in a grasping state.
Should be implemented by subclass.
Returns:
IsGraspingState: Grasping state of gripper
"""
raise NotImplementedError()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("GripperController")
return classes
| 19,354 | Python | 35.313321 | 130 | 0.608195 |
StanfordVL/OmniGibson/omnigibson/systems/macro_particle_system.py | import os
import matplotlib.pyplot as plt
import trimesh
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.systems.system_base import BaseSystem, VisualParticleSystem, PhysicalParticleSystem, REGISTERED_SYSTEMS
from omnigibson.utils.constants import PrimType
from omnigibson.utils.python_utils import classproperty, subclass_factory, snake_case_to_camel_case
from omnigibson.utils.sampling_utils import sample_cuboid_on_object_symmetric_bimodal_distribution
import omnigibson.utils.transform_utils as T
from omnigibson.utils.usd_utils import FlatcacheAPI
from omnigibson.prims.geom_prim import VisualGeomPrim, CollisionVisualGeomPrim
import numpy as np
from scipy.spatial.transform import Rotation as R
from omnigibson.utils.ui_utils import create_module_logger, suppress_omni_log
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.MIN_PARTICLE_RADIUS = 0.01 # Minimum particle radius for physical macro particles -- this reduces the chance of omni physx crashing
class MacroParticleSystem(BaseSystem):
"""
Global system for modeling "macro" level particles, e.g.: dirt, dust, etc.
"""
# Template object to use -- class particle objet is assumed to be the first and only visual mesh belonging to the
# root link of this template object, which symbolizes a single particle, and will be duplicated to generate the
# particle system. Note that this object is NOT part of the actual particle system itself!
_particle_template = None
# dict, array of particle objects, mapped by their prim names
particles = None
# Counter to increment monotonically as we add more particles
_particle_counter = None
# Color associated with this system (NOTE: external queries should call cls.color)
_color = None
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Initialize mutable class variables so they don't automatically get overridden by children classes
cls.particles = dict()
cls._particle_counter = 0
# Create the system prim -- this is merely a scope prim
og.sim.stage.DefinePrim(f"/World/{cls.name}", "Scope")
# Load the particle template, and make it kinematic only because it's not interacting with anything
particle_template = cls._create_particle_template()
og.sim.import_object(obj=particle_template, register=False)
# Make sure template scaling is [1, 1, 1] -- any particle scaling should be done via cls.min/max_scale
assert np.all(particle_template.scale == 1.0)
# Make sure there is no ambiguity about which mesh to use as the particle from this template
assert len(particle_template.links) == 1, "MacroParticleSystem particle template has more than one link"
assert len(particle_template.root_link.visual_meshes) == 1, "MacroParticleSystem particle template has more than one visual mesh"
cls._particle_template = particle_template
# Class particle objet is assumed to be the first and only visual mesh belonging to the root link
cls.particle_object.material.shader_force_populate(render=True)
cls.process_particle_object()
@classproperty
def particle_object(cls):
return list(cls._particle_template.root_link.visual_meshes.values())[0]
@classproperty
def particle_idns(cls):
"""
Returns:
set: idn of all the particles across all groups.
"""
return {cls.particle_name2idn(particle_name) for particle_name in cls.particles}
@classproperty
def next_available_particle_idn(cls):
"""
Returns:
int: the next available particle idn across all groups.
"""
return cls._particle_counter
@classmethod
def _create_particle_template(cls):
"""
Creates the particle template to be used for this system.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
Returns:
EntityPrim: Particle template that will be duplicated when generating future particle groups
"""
raise NotImplementedError()
@classmethod
def remove_all_particles(cls):
# Use list explicitly to prevent mid-loop mutation of dict
for particle_name in tuple(cls.particles.keys()):
cls.remove_particle_by_name(name=particle_name)
@classmethod
def reset(cls):
# Call super first
super().reset()
# Reset the particle counter
cls._particle_counter = 0
@classmethod
def _clear(cls):
# Clear all internal state
og.sim.remove_object(cls._particle_template)
super()._clear()
cls._particle_template = None
cls.particles = None
cls._color = None
@classproperty
def n_particles(cls):
return len(cls.particles)
@classproperty
def material(cls):
return cls.particle_object.material
@classproperty
def particle_name_prefix(cls):
"""
Returns:
str: Naming prefix used for all generated particles. This is coupled with the unique particle ID to generate
the full particle name
"""
return f"{cls.name}Particle"
@classproperty
def state_size(cls):
# In additon to super, we have:
# scale (3*n), and particle counter (1)
return super().state_size + 3 * cls.n_particles + 1
@classmethod
def _dump_state(cls):
state = super()._dump_state()
state["scales"] = np.array([particle.scale for particle in cls.particles.values()])
state["particle_counter"] = cls._particle_counter
return state
@classmethod
def _load_state(cls, state):
# Run super first
super()._load_state(state=state)
# Set particle scales
for particle, scale in zip(cls.particles.values(), state["scales"]):
particle.scale = scale
# Set particle counter
cls._particle_counter = state["particle_counter"]
@classmethod
def _serialize(cls, state):
# Run super first
states_flat = super()._serialize(state=state)
# Add particle scales, then the template info
return np.concatenate([
states_flat,
state["scales"].flatten(),
[state["particle_counter"]],
], dtype=float)
@classmethod
def _deserialize(cls, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Infer how many scales we have, then deserialize
n_particles = state_dict["n_particles"]
len_scales = n_particles * 3
state_dict["scales"] = state[idx:idx+len_scales].reshape(-1, 3)
state_dict["particle_counter"] = int(state[idx+len_scales])
return state_dict, idx + len_scales + 1
@classmethod
def process_particle_object(cls):
"""
Perform any necessary processing on the particle object to extract further information.
"""
# Update color if the particle object has any material
color = np.ones(3)
if cls.particle_object.has_material():
if cls.particle_object.material.is_glass:
color = cls.particle_object.material.glass_color
else:
diffuse_texture = cls.particle_object.material.diffuse_texture
color = plt.imread(diffuse_texture).mean(axis=(0, 1)) if diffuse_texture else cls.particle_object.material.diffuse_color_constant
cls._color = color
@classmethod
def add_particle(cls, prim_path, scale, idn=None):
"""
Adds a particle to this system.
Args:
prim_path (str): Absolute path to the newly created particle, minus the name for this particle
scale (3-array): (x,y,z) scale to set for the added particle
idn (None or int): If specified, should be unique identifier to assign to this particle. If not, will
automatically generate a new unique one
Returns:
XFormPrim: Newly created particle instance, which is added internally as well
"""
# Generate the new particle
name = cls.particle_idn2name(idn=cls.next_available_particle_idn if idn is None else idn)
# Make sure name doesn't already exist
assert name not in cls.particles.keys(), f"Cannot create particle with name {name} because it already exists!"
new_particle = cls._load_new_particle(prim_path=f"{prim_path}/{name}", name=name)
# Set the scale and make sure the particle is visible
new_particle.scale *= scale
new_particle.visible = True
# Track this particle as well
cls.particles[new_particle.name] = new_particle
# Increment counter
cls._particle_counter += 1
return new_particle
@classmethod
def remove_particle_by_name(cls, name):
assert name in cls.particles, f"Got invalid name for particle to remove {name}"
particle = cls.particles.pop(name)
og.sim.remove_prim(particle)
@classmethod
def remove_particles(
cls,
idxs,
**kwargs,
):
particle_names = tuple(cls.particles.keys())
for idx in idxs:
cls.remove_particle_by_name(particle_names[idx])
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
scales=None,
**kwargs,
):
# Grab pre-existing tfs
current_positions, current_orientations = cls.get_particles_position_orientation()
# Update the tensors
n_particles = len(positions)
orientations = R.random(num=n_particles).as_quat() if orientations is None else orientations
scales = cls.sample_scales(n=n_particles) if scales is None else scales
positions = np.concatenate([current_positions, positions], axis=0)
orientations = np.concatenate([current_orientations, orientations], axis=0)
# Add particles
for scale in scales:
cls.add_particle(prim_path=f"{cls.prim_path}/particles", scale=scale)
# Set the tfs
cls.set_particles_position_orientation(positions=positions, orientations=orientations)
@classmethod
def _load_new_particle(cls, prim_path, name):
"""
Loads a new particle into the current stage, leveraging @cls.particle_object as a template for the new particle
to load. This function should be implemented by any subclasses.
Args:
prim_path (str): The absolute stage path at which to create the new particle
name (str): The name to assign to this new particle at the path
Returns:
XFormPrim: Loaded particle
"""
raise NotImplementedError()
@classmethod
def particle_name2idn(cls, name):
"""
Args:
name (str): Particle name to grab its corresponding unique id number for
Returns:
int: Unique ID assigned to the particle based on its name
"""
assert cls.particle_name_prefix in name, \
f"Particle name should have '{cls.particle_name_prefix}' in it when checking ID! Got: {name}"
return int(name.split(cls.particle_name_prefix)[-1])
@classmethod
def particle_idn2name(cls, idn):
"""
Args:
idn (int): Unique ID number assigned to the particle to grab the name for
Returns:
str: Particle name corresponding to its unique id number
"""
assert isinstance(idn, int), \
f"Particle idn must be an integer when checking name! Got: {idn}. Type: {type(idn)}"
return f"{cls.particle_name_prefix}{idn}"
@classproperty
def color(cls):
return np.array(cls._color)
class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
"""
Particle system class that procedurally generates individual particles that are not subject to physics
"""
# Maps particle name to dict of {obj, link, face_id}
# NOTE: link will only exist for particles on rigid bodies
# NOTE: face_id will only exist for particles on cloths
_particles_info = None
# Pre-cached information about visual particles so that we have efficient runtime computations
# Maps particle name to local pose matrix for computing global poses for the particle
_particles_local_mat = None
# Maps group name to array of face_ids where particles are located if the group object is a cloth type
# Maps group name to np.array of face IDs (int) that particles are attached to
_cloth_face_ids = None
# Default behavior for this class -- whether to clip generated particles halfway into objects when sampling
# their locations on the surface of the given object
_CLIP_INTO_OBJECTS = False
# Default parameters for sampling particle locations
# See omnigibson/utils/sampling_utils.py for how they are used.
_SAMPLING_AXIS_PROBABILITIES = (0.25, 0.25, 0.5)
_SAMPLING_AABB_OFFSET = 0.01
_SAMPLING_BIMODAL_MEAN_FRACTION = 0.9
_SAMPLING_BIMODAL_STDEV_FRACTION = 0.2
_SAMPLING_MAX_ATTEMPTS = 20
_SAMPLING_HIT_PROPORTION = 0.4
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Initialize mutable class variables so they don't automatically get overridden by children classes
cls._particles_info = dict()
cls._particles_local_mat = dict()
cls._cloth_face_ids = dict()
@classmethod
def update(cls):
# Run super first
super().update()
z_extent = cls.particle_object.aabb_extent[2]
# Iterate over all objects, and update all particles belonging to any cloth objects
for name, obj in cls._group_objects.items():
group = cls.get_group_name(obj=obj)
if obj.prim_type == PrimType.CLOTH and cls.num_group_particles(group=group) > 0:
# Update the transforms
cloth = obj.root_link
face_ids = cls._cloth_face_ids[group]
idxs = cloth.faces[face_ids].flatten()
positions = cloth.compute_particle_positions(idxs=idxs).reshape(-1, 3, 3)
normals = cloth.compute_face_normals_from_particle_positions(positions=positions)
# The actual positions we want are the face centroids, or the mean of all the positions
positions = positions.mean(axis=1)
# Orientations are the normals
z_up = np.zeros_like(normals)
z_up[:, 2] = 1.0
orientations = T.axisangle2quat(T.vecs2axisangle(z_up, normals))
if not cls._CLIP_INTO_OBJECTS and z_extent > 0:
z_offsets = np.array([z_extent * particle.scale[2] for particle in cls._group_particles[group].values()]) / 2.0
# Shift the particles halfway up
positions += normals * z_offsets.reshape(-1, 1)
# Set the group particle poses
cls.set_group_particles_position_orientation(group=group, positions=positions, orientations=orientations)
@classmethod
def _load_new_particle(cls, prim_path, name):
# We copy the template prim and generate the new object if the prim doesn't already exist, otherwise we
# reference the pre-existing one
if not lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path):
lazy.omni.kit.commands.execute(
"CopyPrim",
path_from=cls.particle_object.prim_path,
path_to=prim_path,
)
prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path)
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prim,
semantic_label=cls.name,
type_label="class",
)
return VisualGeomPrim(prim_path=prim_path, name=name)
@classmethod
def _clear(cls):
# Run super method first
super()._clear()
# Clear all groups as well
cls._particles_info = dict()
cls._particles_local_mat = dict()
cls._cloth_face_ids = dict()
@classmethod
def remove_attachment_group(cls, group):
# Call super first
super().remove_attachment_group(group=group)
# If the group is a cloth, also remove the cloth face ids
if group in cls._cloth_face_ids:
cls._cloth_face_ids.pop(group)
return group
@classmethod
def remove_particle_by_name(cls, name):
# Run super first
super().remove_particle_by_name(name=name)
# Remove this particle from its respective group as well
parent_obj = cls._particles_info[name]["obj"]
group = cls.get_group_name(obj=parent_obj)
cls._group_particles[group].pop(name)
cls._particles_local_mat.pop(name)
particle_info = cls._particles_info.pop(name)
if cls._is_cloth_obj(obj=parent_obj):
# Also remove from cloth face ids
face_ids = cls._cloth_face_ids[group]
idx_mapping = {face_id: i for i, face_id in enumerate(face_ids)}
cls._cloth_face_ids[group] = np.delete(face_ids, idx_mapping[particle_info["face_id"]])
@classmethod
def generate_group_particles(
cls,
group,
positions,
orientations=None,
scales=None,
link_prim_paths=None,
):
# Make sure the group exists
cls._validate_group(group=group)
# Standardize orientations and links
obj = cls._group_objects[group]
is_cloth = cls._is_cloth_obj(obj=obj)
# If cloth, run the following sanity checks:
# (1) make sure link prim paths are not specified -- we can ONLY apply particles under the object xform prim
# (2) make sure object prim path exists at /World/<NAME> -- global pose inference assumes this is the case
if is_cloth:
assert link_prim_paths is None, "link_prim_paths should not be specified for cloth object group!"
assert obj.prim.GetParent().GetPath().pathString == "/World", \
"cloth object should exist as direct child of /World prim!"
n_particles = positions.shape[0]
if orientations is None:
orientations = np.zeros((n_particles, 4))
orientations[:, -1] = 1.0
link_prim_paths = [None] * n_particles if is_cloth else link_prim_paths
scales = cls.sample_scales_by_group(group=group, n=n_particles) if scales is None else scales
bbox_extents_local = [(cls.particle_object.aabb_extent * scale).tolist() for scale in scales]
# If we're using flatcache, we need to update the object's pose on the USD manually
if gm.ENABLE_FLATCACHE:
FlatcacheAPI.sync_raw_object_transforms_in_usd(prim=obj)
# Generate particles
z_up = np.zeros((3, 1))
z_up[-1] = 1.0
for position, orientation, scale, bbox_extent_local, link_prim_path in \
zip(positions, orientations, scales, bbox_extents_local, link_prim_paths):
link = None if is_cloth else obj.links[link_prim_path.split("/")[-1]]
# Possibly shift the particle slightly away from the object if we're not clipping into objects
# Note: For particles tied to rigid objects, the given position is on the surface of the object,
# so clipping would move the particle INTO the object surface, whereas for particles tied to cloth objects,
# the given position is at the particle location (i.e.: already clipped), so NO clipping would move the
# particle AWAY from the object surface
if (is_cloth and not cls._CLIP_INTO_OBJECTS) or (not is_cloth and cls._CLIP_INTO_OBJECTS):
# Shift the particle halfway down
base_to_center = bbox_extent_local[2] / 2.0
normal = (T.quat2mat(orientation) @ z_up).flatten()
offset = normal * base_to_center if is_cloth else -normal * base_to_center
position += offset
# Create particle
particle = cls.add_particle(
prim_path=obj.prim_path if is_cloth else link_prim_path,
scale=scale,
)
# Add to group
cls._group_particles[group][particle.name] = particle
cls._particles_info[particle.name] = dict(obj=cls._group_objects[group], link=link)
# Set the pose
cls.set_particle_position_orientation(idx=-1, position=position, orientation=orientation)
@classmethod
def generate_group_particles_on_object(cls, group, max_samples=None, min_samples_for_success=1):
# This function does not support max_samples=None. Must be explicitly specified
assert max_samples is not None, f"max_samples must be specified for {cls.name}'s generate_group_particles_on_object!"
assert max_samples >= min_samples_for_success, "number of particles to sample should exceed the min for success"
# Make sure the group exists
cls._validate_group(group=group)
# Remove all stale particles
cls.remove_all_group_particles(group=group)
# Generate requested number of particles
obj = cls._group_objects[group]
# Sample scales and corresponding bbox extents
scales = cls.sample_scales_by_group(group=group, n=max_samples)
# For sampling particle positions, we need the global bbox extents, NOT the local extents
# which is what we would get naively if we directly use @scales
avg_scale = np.cbrt(np.product(obj.scale))
bbox_extents_global = scales * cls.particle_object.aabb_extent.reshape(1, 3) * avg_scale
if obj.prim_type == PrimType.CLOTH:
# Sample locations based on randomly sampled keyfaces
cloth = obj.root_link
n_faces = len(cloth.faces)
face_ids = np.random.choice(n_faces, min(max_samples, n_faces), replace=False)
# Positions are the midpoints of each requested face
normals = cloth.compute_face_normals(face_ids=face_ids)
positions = cloth.compute_particle_positions(idxs=cloth.faces[face_ids].flatten()).reshape(-1, 3, 3).mean(axis=1)
# Orientations are the normals
z_up = np.zeros_like(normals)
z_up[:, 2] = 1.0
orientations = T.axisangle2quat(T.vecs2axisangle(z_up, normals))
link_prim_paths = None
cls._cloth_face_ids[group] = face_ids
else:
# Sample locations for all particles
results = sample_cuboid_on_object_symmetric_bimodal_distribution(
obj=obj,
num_samples=max_samples,
cuboid_dimensions=bbox_extents_global,
bimodal_mean_fraction=cls._SAMPLING_BIMODAL_MEAN_FRACTION,
bimodal_stdev_fraction=cls._SAMPLING_BIMODAL_STDEV_FRACTION,
axis_probabilities=cls._SAMPLING_AXIS_PROBABILITIES,
undo_cuboid_bottom_padding=True,
verify_cuboid_empty=False,
aabb_offset=cls._SAMPLING_AABB_OFFSET,
max_sampling_attempts=cls._SAMPLING_MAX_ATTEMPTS,
refuse_downwards=True,
hit_proportion=cls._SAMPLING_HIT_PROPORTION,
)
# Use sampled points
positions, orientations, particle_scales, link_prim_paths = [], [], [], []
for result, scale in zip(results, scales):
position, normal, quaternion, hit_link, reasons = result
if position is not None:
positions.append(position)
orientations.append(quaternion)
particle_scales.append(scale)
link_prim_paths.append(hit_link)
scales = particle_scales
success = len(positions) >= min_samples_for_success
# If we generated a sufficient number of points, generate them in the simulator
if success:
cls.generate_group_particles(
group=group,
positions=np.array(positions),
orientations=np.array(orientations),
scales=np.array(scales),
link_prim_paths=link_prim_paths,
)
# If we're a cloth, store the face_id as well
if obj.prim_type == PrimType.CLOTH:
for particle_name, face_id in zip(cls._group_particles[group].keys(), cls._cloth_face_ids[group]):
cls._particles_info[particle_name]["face_id"] = int(face_id)
return success
@classmethod
def _compute_batch_particles_position_orientation(cls, particles, local=False):
"""
Computes all @particles' positions and orientations
Args:
particles (Iterable of str): Names of particles to compute batched position orientation for
local (bool): Whether to compute particles' poses in local frame or not
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
n_particles = len(particles)
if n_particles == 0:
return (np.array([]).reshape(0, 3), np.array([]).reshape(0, 4))
if local:
poses = np.zeros((n_particles, 4, 4))
for i, name in enumerate(particles):
poses[i] = T.pose2mat(cls.particles[name].get_local_pose())
else:
# Iterate over all particles and compute link tfs programmatically, then batch the matrix transform
link_tfs = dict()
link_tfs_batch = np.zeros((n_particles, 4, 4))
particle_local_poses_batch = np.zeros_like(link_tfs_batch)
for i, name in enumerate(particles):
obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=obj)
if is_cloth:
if obj not in link_tfs:
# We want World --> obj transform, NOT the World --> root_link transform, since these particles
# do NOT exist under a link but rather the object prim itself. So we use XFormPrim to directly
# get the transform, and not obj.get_local_pose() which will give us the local pose of the
# root link!
link_tfs[obj] = T.pose2mat(XFormPrim.get_local_pose(obj))
link = obj
else:
link = cls._particles_info[name]["link"]
if link not in link_tfs:
link_tfs[link] = T.pose2mat(link.get_position_orientation())
link_tfs_batch[i] = link_tfs[link]
particle_local_poses_batch[i] = cls._particles_local_mat[name]
# Compute once
poses = np.matmul(link_tfs_batch, particle_local_poses_batch)
# Decompose back into positions and orientations
return poses[:, :3, 3], T.mat2quat(poses[:, :3, :3])
@classmethod
def get_particles_position_orientation(cls):
return cls._compute_batch_particles_position_orientation(particles=cls.particles, local=False)
@classmethod
def get_particles_local_pose(cls):
return cls._compute_batch_particles_position_orientation(particles=cls.particles, local=True)
@classmethod
def get_group_particles_position_orientation(cls, group):
return cls._compute_batch_particles_position_orientation(particles=cls._group_particles[group], local=False)
@classmethod
def get_group_particles_local_pose(cls, group):
return cls._compute_batch_particles_position_orientation(particles=cls._group_particles[group], local=True)
@classmethod
def get_particle_position_orientation(cls, idx):
name = list(cls.particles.keys())[idx]
# First, get local pose, scale it by the parent link's scale, and then convert into a matrix
# Note that particles_local_mat already takes the parent scale into account when computing the transform!
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
local_mat = cls._particles_local_mat[name]
link_tf = T.pose2mat(XFormPrim.get_local_pose(parent_obj)) if is_cloth else \
T.pose2mat(cls._particles_info[name]["link"].get_position_orientation())
# Multiply the local pose by the link's global transform, then return as pos, quat tuple
return T.mat2pose(link_tf @ local_mat)
@classmethod
def get_particle_local_pose(cls, idx):
name = list(cls.particles.keys())[idx]
return cls.particles[name].get_local_pose()
@classmethod
def _modify_batch_particles_position_orientation(cls, particles, positions=None, orientations=None, local=False):
"""
Modifies all @particles' positions and orientations with @positions and @orientations
Args:
particles (Iterable of str): Names of particles to modify
positions (None or (n, 3)-array): New positions to set for the particles
orientations (None or (n, 4)-array): New orientations to set for the particles
local (bool): Whether to modify particles' poses in local frame or not
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
n_particles = len(particles)
if n_particles == 0:
return
if positions is None or orientations is None:
pos, ori = cls._compute_batch_particles_position_orientation(particles=particles, local=local)
positions = pos if positions is None else positions
orientations = ori if orientations is None else orientations
lens = np.array([len(particles), len(positions), len(orientations)])
assert lens.min() == lens.max(), "Got mismatched particles, positions, and orientations!"
particle_local_poses_batch = np.zeros((n_particles, 4, 4))
particle_local_poses_batch[:, -1, -1] = 1.0
particle_local_poses_batch[:, :3, 3] = positions
particle_local_poses_batch[:, :3, :3] = T.quat2mat(orientations)
if not local:
# Iterate over all particles and compute link tfs programmatically, then batch the matrix transform
link_tfs = dict()
link_tfs_batch = np.zeros((n_particles, 4, 4))
for i, name in enumerate(particles):
obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=obj)
if is_cloth:
if obj not in link_tfs:
# We want World --> obj transform, NOT the World --> root_link transform, since these particles
# do NOT exist under a link but rather the object prim itself. So we use XFormPrim to directly
# get the transform, and not obj.get_local_pose() which will give us the local pose of the
# root link!
link_tfs[obj] = T.pose2mat(XFormPrim.get_local_pose(obj))
link_tf = link_tfs[obj]
else:
link = cls._particles_info[name]["link"]
if link not in link_tfs:
link_tfs[link] = T.pose2mat(link.get_position_orientation())
link_tf = link_tfs[link]
link_tfs_batch[i] = link_tf
# particle_local_poses_batch = np.matmul(np.linalg.inv(link_tfs_batch), particle_local_poses_batch)
particle_local_poses_batch = np.linalg.solve(link_tfs_batch, particle_local_poses_batch)
for i, name in enumerate(particles):
cls._modify_particle_local_mat(name=name, mat=particle_local_poses_batch[i], ignore_scale=local)
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls.particles, positions=positions, orientations=orientations, local=False)
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls.particles, positions=positions, orientations=orientations, local=True)
@classmethod
def set_group_particles_position_orientation(cls, group, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls._group_particles[group], positions=positions, orientations=orientations, local=False)
@classmethod
def set_group_particles_local_pose(cls, group, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls._group_particles[group], positions=positions, orientations=orientations, local=True)
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
if position is None or orientation is None:
pos, ori = cls.get_particle_position_orientation(idx=idx)
position = pos if position is None else position
orientation = ori if orientation is None else orientation
name = list(cls.particles.keys())[idx]
global_mat = np.zeros((4, 4))
global_mat[-1, -1] = 1.0
global_mat[:3, 3] = position
global_mat[:3, :3] = T.quat2mat(orientation)
# First, get global pose, scale it by the parent link's scale, and then convert into a matrix
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
link_tf = T.pose2mat(XFormPrim.get_local_pose(parent_obj)) if is_cloth else \
T.pose2mat(cls._particles_info[name]["link"].get_position_orientation())
local_mat = np.linalg.inv(link_tf) @ global_mat
cls._modify_particle_local_mat(name=name, mat=local_mat, ignore_scale=False)
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
if position is None or orientation is None:
pos, ori = cls.get_particle_local_pose(idx=idx)
position = pos if position is None else position
orientation = ori if orientation is None else orientation
name = list(cls.particles.keys())[idx]
local_mat = np.zeros((4, 4))
local_mat[-1, -1] = 1.0
local_mat[:3, 3] = position
local_mat[:3, :3] = T.quat2mat(orientation)
cls._modify_particle_local_mat(name=name, mat=local_mat, ignore_scale=True)
@classmethod
def _is_cloth_obj(cls, obj):
"""
Checks whether object @obj is a cloth or not
Args:
obj (BaseObject): Object to check
Returns:
bool: True if the object is cloth type, otherwise False
"""
return obj.prim_type == PrimType.CLOTH
@classmethod
def _compute_particle_local_mat(cls, name, ignore_scale=False):
"""
Computes particle @name's local transform as a homogeneous 4x4 matrix
Args:
name (str): Name of the particle to compute local transform matrix for
ignore_scale (bool): Whether to ignore the parent_link scale when computing the local transform
Returns:
np.array: (4, 4) homogeneous transform matrix
"""
particle = cls.particles[name]
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
scale = np.ones(3) if is_cloth else cls._particles_info[name]["link"].scale
local_pos, local_quat = particle.get_local_pose()
local_pos = local_pos if ignore_scale else local_pos * scale
return T.pose2mat((local_pos, local_quat))
@classmethod
def _modify_particle_local_mat(cls, name, mat, ignore_scale=False):
"""
Sets particle @name's local transform as a homogeneous 4x4 matrix
Args:
name (str): Name of the particle to compute local transform matrix for
mat (n-array): (4, 4) homogeneous transform matrix
ignore_scale (bool): Whether to ignore the parent_link scale when setting the local transform
"""
particle = cls.particles[name]
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
scale = np.ones(3) if is_cloth else cls._particles_info[name]["link"].scale
local_pos, local_quat = T.mat2pose(mat)
local_pos = local_pos if ignore_scale else local_pos / scale
particle.set_local_pose(local_pos, local_quat)
# Store updated value
cls._particles_local_mat[name] = mat
@classmethod
def _sync_particle_groups(
cls,
group_objects,
particle_idns,
particle_attached_references,
):
"""
Synchronizes the particle groups based on desired identification numbers @group_idns
Args:
group_objects (list of BaseObject): Desired unique group objects that should be active for
this particle system.
particle_idns (list of list of int): Per-group unique id numbers for the particles assigned to that group.
List should be same length as @group_idns with sub-entries corresponding to the desired number of
particles assigned to that group
particle_attached_references (list of list of str or int): Per-group reference info relevant for each
particle. List should be same length as @group_idns with sub-entries corresponding to the desired
number of particles assigned to that group. If a given group is a cloth object, the entries should be
integers corresponding to the individual face IDs that each particle is attached to for the group.
Otherwise, the group is assumed to be a rigid object, in which case the entries should be link
names corresponding to the specific links each particle is attached for each group.
"""
# We have to be careful here -- some particle groups may have been deleted / are mismatched, so we need
# to update accordingly, potentially deleting stale groups and creating new groups as needed
name_to_info_mapping = {obj.name: {
"n_particles": len(p_idns),
"particle_idns": p_idns,
"references": references,
}
for obj, p_idns, references in
zip(group_objects, particle_idns, particle_attached_references)}
current_group_names = cls.groups
desired_group_names = set(obj.name for obj in group_objects)
groups_to_delete = current_group_names - desired_group_names
groups_to_create = desired_group_names - current_group_names
common_groups = current_group_names.intersection(desired_group_names)
# Sanity check the common groups, we will recreate any where there is a mismatch
for name in common_groups:
info = name_to_info_mapping[name]
if cls.num_group_particles(group=name) != info["n_particles"]:
log.debug(f"Got mismatch in particle group {name} when syncing, "
f"deleting and recreating group now.")
# Add this group to both the delete and creation pile
groups_to_delete.add(name)
groups_to_create.add(name)
# Delete any groups we no longer want
for name in groups_to_delete:
cls.remove_attachment_group(group=name)
# Create any groups we don't already have
for name in groups_to_create:
obj = og.sim.scene.object_registry("name", name)
info = name_to_info_mapping[name]
cls.create_attachment_group(obj=obj)
is_cloth = cls._is_cloth_obj(obj=obj)
for particle_idn, reference in zip(info["particle_idns"], info["references"]):
# Reference is either the face ID (int) if cloth group or link name (str) if rigid body group
# Create the necessary particles
# Use scale (1,1,1) since it will get overridden anyways when loading state
particle = cls.add_particle(
prim_path=obj.prim_path if is_cloth else obj.links[reference].prim_path,
scale=np.ones(3),
idn=int(particle_idn),
)
cls._group_particles[name][particle.name] = particle
cls._particles_info[particle.name] = dict(obj=obj)
# Add face_id if is_cloth, otherwise, add link
if is_cloth:
cls._particles_info[particle.name]["face_id"] = int(reference)
else:
cls._particles_info[particle.name]["link"] = obj.links[reference]
# Also store the cloth face IDs as a vector
if is_cloth:
cls._cloth_face_ids[cls.get_group_name(obj)] = \
np.array([cls._particles_info[particle_name]["face_id"] for particle_name in cls._group_particles[name]])
@classmethod
def create(cls, name, create_particle_template, min_scale=None, max_scale=None, scale_relative_to_parent=False, **kwargs):
"""
Utility function to programmatically generate monolithic visual particle system classes.
Note: If using super() calls in any functions, we have to use slightly esoteric syntax in order to
accommodate this procedural method for using super calls
cf. https://stackoverflow.com/questions/22403897/what-does-it-mean-by-the-super-object-returned-is-unbound-in-python
Use: super(cls).__get__(cls).<METHOD_NAME>(<KWARGS>)
Args:
name (str): Name of the visual particles, in snake case.
min_scale (None or 3-array): If specified, sets the minumum bound for the visual particles' relative scale.
Else, defaults to 1
max_scale (None or 3-array): If specified, sets the maximum bound for the visual particles' relative scale.
Else, defaults to 1
scale_relative_to_parent (bool): If True, will scale generated particles relative to the corresponding
group's object
create_particle_template (function): Method for generating the visual particle template that will be duplicated
when generating groups of particles.
Expected signature:
create_particle_template(prim_path: str, name: str) --> EntityPrim
where @prim_path and @name are the parameters to assign to the generated EntityPrim.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
VisualParticleSystem: Generated visual particle system class
"""
# Override the necessary parameters
@classproperty
def cp_register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def cp_scale_relative_to_parent(cls):
return scale_relative_to_parent
@classmethod
def cm_create_particle_template(cls):
return create_particle_template(prim_path=f"{cls.prim_path}/template", name=f"{cls.name}_template")
# Add to any other params specified
kwargs["_register_system"] = cp_register_system
kwargs["scale_relative_to_parent"] = cp_scale_relative_to_parent
kwargs["_create_particle_template"] = cm_create_particle_template
# Run super
return super().create(name=name, min_scale=min_scale, max_scale=max_scale, **kwargs)
@classmethod
def _dump_state(cls):
state = super()._dump_state()
particle_names = list(cls.particles.keys())
# Add in per-group information
groups_dict = dict()
name2idx = {name: idx for idx, name in enumerate(particle_names)}
for group_name, group_particles in cls._group_particles.items():
obj = cls._group_objects[group_name]
is_cloth = cls._is_cloth_obj(obj=obj)
groups_dict[group_name] = dict(
particle_attached_obj_uuid=obj.uuid,
n_particles=cls.num_group_particles(group=group_name),
particle_idns=[cls.particle_name2idn(name=name) for name in group_particles.keys()],
particle_indices=[name2idx[name] for name in group_particles.keys()],
# If the attached object is a cloth, store the face_id, otherwise, store the link name
particle_attached_references=[cls._particles_info[name]["face_id"] for name in group_particles.keys()]
if is_cloth else [cls._particles_info[name]["link"].prim_path.split("/")[-1] for name in group_particles.keys()],
)
state["n_groups"] = len(cls._group_particles)
state["groups"] = groups_dict
return state
@classmethod
def _load_state(cls, state):
# First, we sync our particle systems
"""
Load the internal state to this object as specified by @state. Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to set
"""
# Synchronize particle groups
group_objects = []
particle_idns = []
particle_attached_references = []
indices_to_remove = np.array([], dtype=int)
for info in state["groups"].values():
obj = og.sim.scene.object_registry("uuid", info["particle_attached_obj_uuid"])
# obj will be None if an object with an attachment group is removed between dump_state() and load_state()
if obj is not None:
group_objects.append(obj)
particle_idns.append(info["particle_idns"])
particle_attached_references.append(info["particle_attached_references"])
else:
indices_to_remove = np.append(indices_to_remove, np.array(info["particle_indices"], dtype=int))
cls._sync_particle_groups(
group_objects=group_objects,
particle_idns=particle_idns,
particle_attached_references=particle_attached_references,
)
state["n_particles"] -= len(indices_to_remove)
state["positions"] = np.delete(state["positions"], indices_to_remove, axis=0)
state["orientations"] = np.delete(state["orientations"], indices_to_remove, axis=0)
state["scales"] = np.delete(state["scales"], indices_to_remove, axis=0)
# Run super
super()._load_state(state=state)
@classmethod
def _serialize(cls, state):
# Run super first
state_flat = super()._serialize(state=state)
groups_dict = state["groups"]
state_group_flat = [[state["n_groups"]]]
for group_name, group_dict in groups_dict.items():
obj = cls._group_objects[group_name]
is_cloth = cls._is_cloth_obj(obj=obj)
group_obj_link2id = {link_name: i for i, link_name in enumerate(obj.links.keys())}
state_group_flat += [
[group_dict["particle_attached_obj_uuid"]],
[group_dict["n_particles"]],
group_dict["particle_idns"],
group_dict["particle_indices"],
(group_dict["particle_attached_references"] if is_cloth else
[group_obj_link2id[reference] for reference in group_dict["particle_attached_references"]]),
]
return np.concatenate([*state_group_flat, state_flat]).astype(float)
@classmethod
def _deserialize(cls, state):
# Synchronize the particle groups
n_groups = int(state[0])
groups_dict = dict()
group_objs = []
# Index starts at 1 because index 0 is n_groups
idx = 1
for i in range(n_groups):
obj_uuid, n_particles = int(state[idx]), int(state[idx + 1])
obj = og.sim.scene.object_registry("uuid", obj_uuid)
assert obj is not None, f"Object with UUID {obj_uuid} not found in the scene"
is_cloth = cls._is_cloth_obj(obj=obj)
group_obj_id2link = {i: link_name for i, link_name in enumerate(obj.links.keys())}
group_objs.append(obj)
groups_dict[obj.name] = dict(
particle_attached_obj_uuid=obj_uuid,
n_particles=n_particles,
particle_idns=[int(idn) for idn in state[idx + 2 : idx + 2 + n_particles]], # Idx + 2 because the first two are obj_uuid and n_particles
particle_indices=[int(idn) for idn in state[idx + 2 + n_particles: idx + 2 + n_particles * 2]],
particle_attached_references=[int(idn) for idn in state[idx + 2 + n_particles * 2: idx + 2 + n_particles * 3]]
if is_cloth else [group_obj_id2link[int(idn)] for idn in state[idx + 2 + n_particles * 2: idx + 2 + n_particles * 3]],
)
idx += 2 + n_particles * 3
log.debug(f"Syncing {cls.name} particles with {n_groups} groups..")
cls._sync_particle_groups(
group_objects=group_objs,
particle_idns=[group_info["particle_idns"] for group_info in groups_dict.values()],
particle_attached_references=[group_info["particle_attached_references"] for group_info in groups_dict.values()],
)
# Get super method
state_dict, idx_super = super()._deserialize(state=state[idx:])
state_dict["n_groups"] = n_groups
state_dict["groups"] = groups_dict
return state_dict, idx + idx_super
class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
"""
Particle system class that procedurally generates individual particles that are subject to physics
"""
# Physics rigid body view for keeping track of all particles' state
particles_view = None
# Approximate radius of the macro particle, and distance from particle frame to approximate center
_particle_radius = None
_particle_offset = None
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Create the particles head prim -- this is merely a scope prim
og.sim.stage.DefinePrim(f"{cls.prim_path}/particles", "Scope")
# A new view needs to be created every time once sim is playing, so we add a callback now
og.sim.add_callback_on_play(name=f"{cls.name}_particles_view", callback=cls.refresh_particles_view)
# If sim is already playing, refresh particles immediately
if og.sim.is_playing():
cls.refresh_particles_view()
@classmethod
def _load_new_particle(cls, prim_path, name):
# We copy the template prim and generate the new object if the prim doesn't already exist, otherwise we
# reference the pre-existing one
if not lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path):
lazy.omni.kit.commands.execute(
"CopyPrim",
path_from=cls.particle_object.prim_path,
path_to=prim_path,
)
# Apply RigidBodyAPI to it so it is subject to physics
prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path)
lazy.pxr.UsdPhysics.RigidBodyAPI.Apply(prim)
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prim,
semantic_label=cls.name,
type_label="class",
)
return CollisionVisualGeomPrim(prim_path=prim_path, name=name)
@classmethod
def process_particle_object(cls):
# Run super method
super().process_particle_object()
# Compute particle radius
vertices = np.array(cls.particle_object.get_attribute("points")) * cls.particle_object.scale * cls.max_scale.reshape(1, 3)
particle_offset, particle_radius = trimesh.nsphere.minimum_nsphere(trimesh.Trimesh(vertices=vertices))
if particle_radius < m.MIN_PARTICLE_RADIUS:
ratio = m.MIN_PARTICLE_RADIUS / particle_radius
cls.particle_object.scale *= ratio
particle_offset *= ratio
particle_radius = m.MIN_PARTICLE_RADIUS
cls._particle_offset = particle_offset
cls._particle_radius = particle_radius
@classmethod
def refresh_particles_view(cls):
"""
Internal helper method to refresh the particles' rigid body view to grab state
Should be called every time sim.play() is called
"""
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
with suppress_omni_log(channels=["omni.physx.tensors.plugin"]):
cls.particles_view = og.sim.physics_sim_view.create_rigid_body_view(pattern=f"{cls.prim_path}/particles/*")
@classmethod
def _clear(cls):
# Run super method first
super()._clear()
# Clear internal variables
cls.particles_view = None
cls._particle_radius = None
cls._particle_offset = None
@classmethod
def remove_particle_by_name(cls, name):
# Run super first
super().remove_particle_by_name(name=name)
# Refresh particles view
cls.refresh_particles_view()
@classmethod
def add_particle(cls, prim_path, scale, idn=None):
# Run super first
particle = super().add_particle(prim_path=prim_path, scale=scale, idn=idn)
# Refresh particles view
cls.refresh_particles_view()
return particle
@classmethod
def get_particles_position_orientation(cls):
# Note: This gets the center of the sphere approximation of the particles, NOT the actual particle frames!
if cls.n_particles > 0:
tfs = cls.particles_view.get_transforms()
pos, ori = tfs[:, :3], tfs[:, 3:]
pos = pos + T.quat2mat(ori) @ cls._particle_offset
else:
pos, ori = np.array([]).reshape(0, 3), np.array([]).reshape(0, 4)
return pos, ori
@classmethod
def get_particles_local_pose(cls):
return cls.get_particles_position_orientation()
@classmethod
def get_particle_position_orientation(cls, idx):
assert idx <= cls.n_particles, \
f"Got invalid idx for getting particle pose! N particles: {cls.n_particles}, got idx: {idx}"
positions, orientations = cls.get_particles_position_orientation()
return (positions[idx], orientations[idx]) if cls.n_particles > 0 else (positions, orientations)
@classmethod
def get_particle_local_pose(cls, idx):
return cls.get_particle_position_orientation(idx=idx)
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
if cls.n_particles == 0:
return
# Note: This sets the center of the sphere approximation of the particles, NOT the actual particle frames!
if positions is None or orientations is None:
pos, ori = cls.get_particles_position_orientation()
orientations = ori if orientations is None else orientations
positions = pos if positions is None else (positions - T.quat2mat(orientations) @ cls._particle_offset)
cls.particles_view.set_transforms(np.concatenate([positions, orientations], axis=1), indices=np.arange(len(positions)))
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
cls.set_particles_position_orientation(positions=positions, orientations=orientations)
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
assert idx <= cls.n_particles, \
f"Got invalid idx for setting particle pose! N particles: {cls.n_particles}, got idx: {idx}"
if position is None or orientation is None:
pos, ori = cls.get_particle_position_orientation(idx=idx)
orientation = ori if orientation is None else orientation
position = pos if position is None else (position - T.quat2mat(orientation) @ cls._particle_offset)
cls.particles_view.set_transforms(np.concatenate([position, orientation]).reshape(1, -1), indices=np.array([idx]))
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
cls.set_particle_position_orientation(idx=idx, position=position, orientation=orientation)
@classmethod
def get_particles_velocities(cls):
"""
Grab particles' global linear and angular velocities
Returns:
2-tuple:
- (n, 3)-array: per-particle (x, y, z) linear velocities in the world frame
- (n, 3)-array: per-particle (ax, ay, az) angular velocities in the world frame
"""
if cls.n_particles > 0:
vels = cls.particles_view.get_velocities()
lin_vel, ang_vel = vels[:, :3], vels[:, 3:]
else:
lin_vel, ang_vel = np.array([]).reshape(0, 3), np.array([]).reshape(0, 3)
return lin_vel, ang_vel
@classmethod
def get_particle_velocities(cls, idx):
"""
Grab particle @idx's global linear and angular velocities
Returns:
2-tuple:
- 3-array: particle (x, y, z) linear velocity in the world frame
- 3-array: particle (ax, ay, az) angular velocity in the world frame
"""
assert idx <= cls.n_particles, \
f"Got invalid idx for getting particle velocity! N particles: {cls.n_particles}, got idx: {idx}"
lin_vel, ang_vel = cls.get_particles_velocities()
return (lin_vel[idx], ang_vel[idx]) if cls.n_particles > 0 else lin_vel, ang_vel
@classmethod
def set_particles_velocities(cls, lin_vels=None, ang_vels=None):
if cls.n_particles == 0:
return
if lin_vels is None or ang_vels is None:
l_vels, a_vels = cls.get_particles_velocities()
lin_vels = l_vels if lin_vels is None else lin_vels
ang_vels = a_vels if ang_vels is None else ang_vels
cls.particles_view.set_velocities(np.concatenate([lin_vels, ang_vels], axis=1), indices=np.arange(len(lin_vels)))
@classmethod
def set_particle_velocities(cls, idx, lin_vel=None, ang_vel=None):
assert idx <= cls.n_particles, \
f"Got invalid idx for setting particle velocity! N particles: {cls.n_particles}, got idx: {idx}"
if lin_vel is None or ang_vel is None:
l_vel, a_vel = cls.get_particles_velocities()
lin_vel = l_vel if lin_vel is None else lin_vel
ang_vel = a_vel if ang_vel is None else ang_vel
cls.particles_view.set_velocities(np.concatenate([lin_vel, ang_vel]).reshape(1, -1), indices=np.array([idx]))
@classproperty
def particle_radius(cls):
return cls._particle_radius
@classproperty
def particle_contact_radius(cls):
# This is simply the normal radius
return cls.particle_radius
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
velocities=None,
angular_velocities=None,
scales=None,
**kwargs,
):
"""
Generates new particles
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be sampled randomly
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
angular_velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (ax,ay,az)
angular velocities. If not specified, all will be set to 0
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
# Call super first
super().generate_particles(
positions=positions,
orientations=orientations,
scales=scales,
**kwargs,
)
# Grab pre-existing vels -- note that this already includes the newly included particles, so we will only
# keep the first (N - n_new) values
current_lin_vels, current_ang_vels = cls.get_particles_velocities()
# Update the tensors
n_particles = len(positions)
velocities = np.zeros((n_particles, 3)) if velocities is None else velocities
angular_velocities = np.zeros_like(velocities) if angular_velocities is None else angular_velocities
velocities = np.concatenate([current_lin_vels[:-n_particles], velocities], axis=0)
angular_velocities = np.concatenate([current_ang_vels[:-n_particles], angular_velocities], axis=0)
# Set the vels
cls.set_particles_velocities(lin_vels=velocities, ang_vels=angular_velocities)
@classmethod
def create(cls, name, create_particle_template, particle_density, scale=None, **kwargs):
"""
Utility function to programmatically generate monolithic visual particle system classes.
Note: If using super() calls in any functions, we have to use slightly esoteric syntax in order to
accommodate this procedural method for using super calls
cf. https://stackoverflow.com/questions/22403897/what-does-it-mean-by-the-super-object-returned-is-unbound-in-python
Use: super(cls).__get__(cls).<METHOD_NAME>(<KWARGS>)
Args:
name (str): Name of the macro physical particles, in snake case.
particle_density (float): Particle density for the generated system
create_particle_template (function): Method for generating the visual particle template that will be duplicated
when generating groups of particles.
Expected signature:
create_particle_template(prim_path: str, name: str) --> EntityPrim
where @prim_path and @name are the parameters to assign to the generated EntityPrim.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual mesh used for duplication
scale (None or 3-array): If specified, sets the scaling factor for the particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
VisualParticleSystem: Generated visual particle system class
"""
# Override the necessary parameters
@classproperty
def cp_register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def cp_particle_density(cls):
return particle_density
@classmethod
def cm_create_particle_template(cls):
return create_particle_template(prim_path=f"{cls.prim_path}/template", name=f"{cls.name}_template")
# Add to any other params specified
kwargs["_register_system"] = cp_register_system
kwargs["particle_density"] = cp_particle_density
kwargs["_create_particle_template"] = cm_create_particle_template
# Run super
return super().create(name=name, min_scale=scale, max_scale=scale, **kwargs)
@classmethod
def _sync_particles(cls, n_particles):
"""
Synchronizes the number of particles seen in the scene with @n_particles
Args:
n_particles (int): Desired number of particles to force simulator to have
"""
# Get the difference between current and desired particles
n_particles_to_generate = n_particles - cls.n_particles
# If positive, add particles
if n_particles_to_generate > 0:
for i in range(n_particles_to_generate):
# Min scale == max scale, so no need for sampling
cls.add_particle(prim_path=f"{cls.prim_path}/particles", scale=cls.max_scale)
else:
# Remove excess particles
cls.remove_particles(idxs=np.arange(-n_particles_to_generate))
@classproperty
def state_size(cls):
# In additon to super, we have:
# velocities (6*n)
return super().state_size + 6 * cls.n_particles
@classmethod
def _dump_state(cls):
state = super()._dump_state()
# Store all particles' velocities as well
state["lin_velocities"], state["ang_velocities"] = cls.get_particles_velocities()
return state
@classmethod
def _load_state(cls, state):
# Sync the number of particles first
cls._sync_particles(n_particles=state["n_particles"])
super()._load_state(state=state)
# Make sure view is refreshed
cls.refresh_particles_view()
# Make sure we update all the velocities
cls.set_particles_velocities(state["lin_velocities"], state["ang_velocities"])
@classmethod
def _serialize(cls, state):
# Run super first
state_flat = super()._serialize(state=state)
# Add velocities
return np.concatenate([state_flat, state["lin_velocities"].flatten(), state["ang_velocities"].flatten()], dtype=float)
@classmethod
def _deserialize(cls, state):
# Sync the number of particles first
cls._sync_particles(n_particles=int(state[0]))
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize velocities
len_velocities = 3 * state_dict["n_particles"]
for vel in ("lin_velocities", "ang_velocities"):
state_dict[vel] = state[idx:idx+len_velocities].reshape(-1, 3)
idx += len_velocities
return state_dict, idx
| 68,091 | Python | 43.650492 | 163 | 0.62885 |
StanfordVL/OmniGibson/omnigibson/systems/system_base.py | import os
import json
import numpy as np
import omnigibson as og
from omnigibson.macros import gm, create_module_macros
from omnigibson.utils.asset_utils import get_all_system_categories
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function
from omnigibson.utils.python_utils import classproperty, assert_valid_key, get_uuid, camel_case_to_snake_case, \
snake_case_to_camel_case, subclass_factory, SerializableNonInstance, UniquelyNamedNonInstance
from omnigibson.utils.registry_utils import SerializableRegistry
from omnigibson.utils.sampling_utils import sample_cuboid_on_object_full_grid_topdown
from omnigibson.utils.ui_utils import create_module_logger
import omnigibson.lazy as lazy
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Parameters used if scaling particles relative to its parent object's scale
m.BBOX_LOWER_LIMIT_FRACTION_OF_AABB = 0.06
m.BBOX_LOWER_LIMIT_MIN = 0.002
m.BBOX_LOWER_LIMIT_MAX = 0.02
m.BBOX_UPPER_LIMIT_FRACTION_OF_AABB = 0.1
m.BBOX_UPPER_LIMIT_MIN = 0.01
m.BBOX_UPPER_LIMIT_MAX = 0.1
_CALLBACKS_ON_SYSTEM_INIT = dict()
_CALLBACKS_ON_SYSTEM_CLEAR = dict()
# Modifiers denoting a semantic difference in the system
SYSTEM_PREFIXES = {"diced", "cooked", "melted"}
class BaseSystem(SerializableNonInstance, UniquelyNamedNonInstance):
"""
Base class for all systems. These are non-instance objects that should be used globally for a given environment.
This is useful for items in a scene that are non-discrete / cannot be distinguished into individual instances,
e.g.: water, particles, etc. While we keep the python convention of the system class name being camel case
(e.g. StrawberrySmoothie), we adopt the snake case for the system registry to unify with the category of BaseObject.
For example, get_system("strawberry_smoothie") will return the StrawberrySmoothie class.
"""
# Scaling factor to sample from when generating a new particle
min_scale = None # (x,y,z) scaling
max_scale = None # (x,y,z) scaling
# Whether this system has been initialized or not
initialized = False
# Internal variables used for bookkeeping
_uuid = None
_snake_case_name = None
def __init_subclass__(cls, **kwargs):
# While class names are camel case, we convert them to snake case to be consistent with object categories.
name = camel_case_to_snake_case(cls.__name__)
# Make sure prefixes preserve their double underscore
for prefix in SYSTEM_PREFIXES:
name = name.replace(f"{prefix}_", f"{prefix}__")
cls._snake_case_name = name
cls.min_scale = np.ones(3)
cls.max_scale = np.ones(3)
# Run super init
super().__init_subclass__(**kwargs)
# Register this system if requested
if cls._register_system:
global REGISTERED_SYSTEMS, UUID_TO_SYSTEMS
REGISTERED_SYSTEMS[cls._snake_case_name] = cls
cls._uuid = get_uuid(cls._snake_case_name)
UUID_TO_SYSTEMS[cls._uuid] = cls
@classproperty
def name(cls):
# Class name is the unique name assigned
return cls._snake_case_name
@classproperty
def uuid(cls):
return cls._uuid
@classproperty
def prim_path(cls):
"""
Returns:
str: Path to this system's prim in the scene stage
"""
return f"/World/{cls.name}"
@classproperty
def n_particles(cls):
"""
Returns:
int: Number of particles belonging to this system
"""
raise NotImplementedError()
@classproperty
def material(cls):
"""
Returns:
None or MaterialPrim: Material belonging to this system, if there is any
"""
return None
@classproperty
def _register_system(cls):
"""
Returns:
bool: True if this system should be registered (i.e.: it is not an intermediate class but a "final" subclass
representing a system we'd actually like to use, e.g.: water, dust, etc. Should be set by the subclass
"""
# We assume we aren't registering by default
return False
@classproperty
def _store_local_poses(cls):
"""
Returns:
bool: Whether to store local particle poses or not when state is saved. Default is False
"""
return False
@classmethod
def initialize(cls):
"""
Initializes this system
"""
global _CALLBACKS_ON_SYSTEM_INIT
assert not cls.initialized, f"Already initialized system {cls.name}!"
og.sim.stage.DefinePrim(cls.prim_path, "Scope")
cls.initialized = True
# Add to registry
SYSTEM_REGISTRY.add(obj=cls)
# Avoid circular import
if og.sim.is_playing():
from omnigibson.transition_rules import TransitionRuleAPI
TransitionRuleAPI.refresh_all_rules()
# Run any callbacks
for callback in _CALLBACKS_ON_SYSTEM_INIT.values():
callback(cls)
@classmethod
def update(cls):
"""
Executes any necessary system updates, once per og.sim._non_physics_step
"""
# Default is no-op
pass
@classmethod
def remove_all_particles(cls):
"""
Removes all particles and deletes them from the simulator
"""
raise NotImplementedError()
@classmethod
def remove_particles(
cls,
idxs,
**kwargs,
):
"""
Removes pre-existing particles
Args:
idxs (np.array): (n_particles,) shaped array specifying IDs of particles to delete
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
raise NotImplementedError()
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
scales=None,
**kwargs,
):
"""
Generates new particles
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
raise NotImplementedError()
@classmethod
def clear(cls):
"""
Clears this system, so that it may possibly be re-initialized. Useful for, e.g., when loading from a new
scene during the same sim instance
"""
if cls.initialized:
cls._clear()
@classmethod
def _clear(cls):
global SYSTEM_REGISTRY, _CALLBACKS_ON_SYSTEM_CLEAR
# Run any callbacks
for callback in _CALLBACKS_ON_SYSTEM_CLEAR.values():
callback(cls)
cls.reset()
lazy.omni.isaac.core.utils.prims.delete_prim(cls.prim_path)
cls.initialized = False
# Remove from active registry
SYSTEM_REGISTRY.remove(obj=cls)
# Avoid circular import
if og.sim.is_playing():
from omnigibson.transition_rules import TransitionRuleAPI
TransitionRuleAPI.refresh_all_rules()
@classmethod
def reset(cls):
"""
Reset this system
"""
cls.remove_all_particles()
@classmethod
def create(cls, name, min_scale=None, max_scale=None, **kwargs):
"""
Helper function to programmatically generate systems
Args:
name (str): Name of the visual particles, in snake case.
min_scale (None or 3-array): If specified, sets the minumum bound for particles' relative scale.
Else, defaults to 1
max_scale (None or 3-array): If specified, sets the maximum bound for particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
BaseSystem: Generated system class given input arguments
"""
@classmethod
def cm_initialize(cls):
# Potentially override the min / max scales
if min_scale is not None:
cls.min_scale = np.array(min_scale)
if max_scale is not None:
cls.max_scale = np.array(max_scale)
# Run super (we have to use a bit esoteric syntax in order to accommodate this procedural method for
# using super calls -- cf. https://stackoverflow.com/questions/22403897/what-does-it-mean-by-the-super-object-returned-is-unbound-in-python
super(cls).__get__(cls).initialize()
kwargs["initialize"] = cm_initialize
# Create and return the class
return subclass_factory(name=snake_case_to_camel_case(name), base_classes=cls, **kwargs)
@classmethod
def get_active_systems(cls):
"""
Returns:
dict: Mapping from system name to system for all systems that are subclasses of this system AND active (initialized)
"""
return {system.name: system for system in SYSTEM_REGISTRY.objects if issubclass(system, cls)}
@classmethod
def sample_scales(cls, n):
"""
Samples scales uniformly based on @cls.min_scale and @cls.max_scale
Args:
n (int): Number of scales to sample
Returns:
(n, 3) array: Array of sampled scales
"""
return np.random.uniform(cls.min_scale, cls.max_scale, (n, 3))
@classmethod
def get_particles_position_orientation(cls):
"""
Computes all particles' positions and orientations that belong to this system in the world frame
Note: This is more optimized than doing a for loop with self.get_particle_position_orientation()
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def get_particle_position_orientation(cls, idx):
"""
Compute particle's position and orientation. This automatically takes into account the relative
pose w.r.t. its parent link and the global pose of that parent link.
Args:
idx (int): Index of the particle to compute position and orientation for. Note: this is
equivalent to grabbing the corresponding idx'th entry from @get_particles_position_orientation()
Returns:
2-tuple:
- 3-array: (x,y,z) position
- 4-array: (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to this system in the world frame
Note: This is more optimized than doing a for loop with self.set_particle_position_orientation()
Args:
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
"""
Sets particle's position and orientation. This automatically takes into account the relative
pose w.r.t. its parent link and the global pose of that parent link.
Args:
idx (int): Index of the particle to set position and orientation for. Note: this is
equivalent to setting the corresponding idx'th entry from @set_particles_position_orientation()
position (3-array): particle (x,y,z) position
orientation (4-array): particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def get_particles_local_pose(cls):
"""
Computes all particles' positions and orientations that belong to this system in the particles' parent frames
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def get_particle_local_pose(cls, idx):
"""
Compute particle's position and orientation in the particle's parent frame
Args:
idx (int): Index of the particle to compute position and orientation for. Note: this is
equivalent to grabbing the corresponding idx'th entry from @get_particles_local_pose()
Returns:
2-tuple:
- 3-array: (x,y,z) position
- 4-array: (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to this system in the particles' parent frames
Args:
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
"""
Sets particle's position and orientation in the particle's parent frame
Args:
idx (int): Index of the particle to set position and orientation for. Note: this is
equivalent to setting the corresponding idx'th entry from @set_particles_local_pose()
position (3-array): particle (x,y,z) position
orientation (4-array): particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
def __init__(self):
raise ValueError("System classes should not be created!")
@classproperty
def state_size(cls):
# We have n_particles (1), min / max scale (3*2), each particle pose (7*n)
return 7 + 7 * cls.n_particles
@classmethod
def _dump_state(cls):
positions, orientations = cls.get_particles_local_pose() if \
cls._store_local_poses else cls.get_particles_position_orientation()
return dict(
n_particles=cls.n_particles,
min_scale=cls.min_scale,
max_scale=cls.max_scale,
positions=positions,
orientations=orientations,
)
@classmethod
def _load_state(cls, state):
# Sanity check loading particles
assert cls.n_particles == state["n_particles"], f"Inconsistent number of particles found when loading " \
f"particles state! Current number: {cls.n_particles}, " \
f"loaded number: {state['n_particles']}"
# Load scale
cls.min_scale = state["min_scale"]
cls.max_scale = state["max_scale"]
# Load the poses
setter = cls.set_particles_local_pose if cls._store_local_poses else cls.set_particles_position_orientation
setter(positions=state["positions"], orientations=state["orientations"])
@classmethod
def _serialize(cls, state):
# Array is n_particles, then min_scale and max_scale, then poses for all particles
return np.concatenate([
[state["n_particles"]],
state["min_scale"],
state["max_scale"],
state["positions"].flatten(),
state["orientations"].flatten(),
], dtype=float)
@classmethod
def _deserialize(cls, state):
# First index is number of particles, then min_scale and max_scale, then the individual particle poses
state_dict = dict()
n_particles = int(state[0])
len_positions = n_particles * 3
len_orientations = n_particles * 4
state_dict["n_particles"] = n_particles
state_dict["min_scale"] = state[1:4]
state_dict["max_scale"] = state[4:7]
state_dict["positions"] = state[7:7+len_positions].reshape(-1, 3)
state_dict["orientations"] = state[7+len_positions:7+len_positions+len_orientations].reshape(-1, 4)
return state_dict, 7 + len_positions + len_orientations
# Global dict that contains mappings of all the systems
REGISTERED_SYSTEMS = dict()
UUID_TO_SYSTEMS = dict()
# Serializable registry of systems that are active on the stage (initialized)
SYSTEM_REGISTRY = SerializableRegistry(
name="system_registry",
class_types=BaseSystem,
default_key="name",
unique_keys=["name", "prim_path", "uuid"],
)
class VisualParticleSystem(BaseSystem):
"""
Particle system class for generating particles not subject to physics, and are attached to individual objects
"""
# Maps group name to the particles associated with it
# This is an ordered dict of ordered dict (nested ordered dict maps particle names to particle instance)
_group_particles = None
# Maps group name to the parent object (the object with particles attached to it) of the group
_group_objects = None
# Maps group name to tuple (min_scale, max_scale) to apply to sampled particles for that group
_group_scales = None
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Initialize mutable class variables so they don't automatically get overridden by children classes
cls._group_particles = dict()
cls._group_objects = dict()
cls._group_scales = dict()
@classproperty
def particle_object(cls):
"""
Returns:
XFormPrim: Particle object to be used as a template for duplication
"""
raise NotImplementedError()
@classproperty
def groups(cls):
"""
Returns:
set of str: Current attachment particle group names
"""
return set(cls._group_particles.keys())
@classproperty
def _store_local_poses(cls):
# Store local poses since particles are attached to moving bodies
return True
@classproperty
def scale_relative_to_parent(cls):
"""
Returns:
bool: Whether or not particles should be scaled relative to the group's parent object. NOTE: If True,
this will OVERRIDE cls.min_scale and cls.max_scale when sampling particles!
"""
return False
@classproperty
def state_size(cls):
# Get super size first
state_size = super().state_size
# Additionally, we have n_groups (1), with m_particles for each group (n), attached_obj_uuids (n), and
# particle ids, particle indices, and corresponding link info for each particle (m * 3)
return state_size + 1 + 2 * len(cls._group_particles) + \
sum(3 * cls.num_group_particles(group) for group in cls.groups)
@classmethod
def _clear(cls):
super()._clear()
# Clear all groups as well
cls._group_particles = dict()
cls._group_objects = dict()
cls._group_scales = dict()
@classmethod
def remove_all_group_particles(cls, group):
"""
Remove particle with name @name from both the simulator as well as internally
Args:
group (str): Name of the attachment group to remove all particles from
"""
# Make sure the group exists
cls._validate_group(group=group)
# Remove all particles from the group
for particle_name in tuple(cls._group_particles[group].keys()):
cls.remove_particle_by_name(name=particle_name)
@classmethod
def num_group_particles(cls, group):
"""
Gets the number of particles for the given group in the simulator
Args:
group (str): Name of the attachment group to remove all particles from.
Returns:
int: Number of particles allocated to this group in the scene. Note that if @group does not
exist, this will return 0
"""
# Make sure the group exists
cls._validate_group(group=group)
return len(cls._group_particles[group])
@classmethod
def get_group_name(cls, obj):
"""
Grabs the corresponding group name for object @obj
Args:
obj (BaseObject): Object for which its procedurally generated particle attachment name should be grabbed
Returns:
str: Name of the attachment group to use when executing commands from this class on
that specific attachment group
"""
return obj.name
@classmethod
def create_attachment_group(cls, obj):
"""
Creates an attachment group internally for object @obj. Note that this does NOT automatically generate particles
for this object (should call generate_group_particles(...) ).
Args:
obj (BaseObject): Object for which a new particle attachment group will be created for
Returns:
str: Name of the attachment group to use when executing commands from this class on
that specific attachment group
"""
group = cls.get_group_name(obj=obj)
# This should only happen once for a single attachment group, so we explicitly check to make sure the object
# doesn't already exist
assert group not in cls.groups, \
f"Cannot create new attachment group because group with name {group} already exists!"
# Create the group
cls._group_particles[group] = dict()
cls._group_objects[group] = obj
# Compute the group scale if we're scaling relative to parent
if cls.scale_relative_to_parent:
cls._group_scales[group] = cls._compute_relative_group_scales(group=group)
return group
@classmethod
def remove_attachment_group(cls, group):
"""
Removes an attachment group internally for object @obj. Note that this will automatically remove any particles
currently assigned to that group
Args:
group (str): Name of the attachment group to remove
Returns:
str: Name of the attachment group to use when executing commands from this class on
that specific attachment group
"""
# Make sure the group exists
cls._validate_group(group=group)
# Remove all particles from the group
cls.remove_all_group_particles(group=group)
# Remove the actual groups
cls._group_particles.pop(group)
cls._group_objects.pop(group)
if cls.scale_relative_to_parent:
cls._group_scales.pop(group)
return group
@classmethod
def _compute_relative_group_scales(cls, group):
"""
Computes relative particle scaling for group @group required when @cls.scale_relative_to_parent is True
Args:
group (str): Specific group for which to compute the relative particle scaling
Returns:
2-tuple:
- 3-array: min scaling factor
- 3-array: max scaling factor
"""
# First set the bbox ranges -- depends on the object's bounding box
obj = cls._group_objects[group]
median_aabb_dim = np.median(obj.aabb_extent)
# Compute lower and upper limits to bbox
bbox_lower_limit_from_aabb = m.BBOX_LOWER_LIMIT_FRACTION_OF_AABB * median_aabb_dim
bbox_lower_limit = np.clip(
bbox_lower_limit_from_aabb,
m.BBOX_LOWER_LIMIT_MIN,
m.BBOX_LOWER_LIMIT_MAX,
)
bbox_upper_limit_from_aabb = m.BBOX_UPPER_LIMIT_FRACTION_OF_AABB * median_aabb_dim
bbox_upper_limit = np.clip(
bbox_upper_limit_from_aabb,
m.BBOX_UPPER_LIMIT_MIN,
m.BBOX_UPPER_LIMIT_MAX,
)
# Convert these into scaling factors for the x and y axes for our particle object
particle_bbox = cls.particle_object.aabb_extent
minimum = np.array([bbox_lower_limit / particle_bbox[0], bbox_lower_limit / particle_bbox[1], 1.0])
maximum = np.array([bbox_upper_limit / particle_bbox[0], bbox_upper_limit / particle_bbox[1], 1.0])
return minimum, maximum
@classmethod
def sample_scales_by_group(cls, group, n):
"""
Samples @n particle scales for group @group.
Args:
group (str): Specific group for which to sample scales
n (int): Number of scales to sample
Returns:
(n, 3) array: Array of sampled scales
"""
# Make sure the group exists
cls._validate_group(group=group)
# Sample based on whether we're scaling relative to parent or not
scales = np.random.uniform(*cls._group_scales[group], (n, 3)) if cls.scale_relative_to_parent else cls.sample_scales(n=n)
# Since the particles will be placed under the object, it will be affected/stretched by obj.scale. In order to
# preserve the absolute size of the particles, we need to scale the particle by obj.scale in some way. However,
# since the particles have a relative rotation w.r.t the object, the scale between the two don't align. As a
# heuristics, we divide it by the avg_scale, which is the cubic root of the product of the scales along 3 axes.
obj = cls._group_objects[group]
avg_scale = np.cbrt(np.product(obj.scale))
return scales / avg_scale
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
scales=None,
**kwargs,
):
# Should not be called, since particles must be tied to a group!
raise ValueError("Cannot call generate_particles for a VisualParticleSystem! "
"Call generate_group_particles() instead.")
@classmethod
def generate_group_particles(
cls,
group,
positions,
orientations=None,
scales=None,
link_prim_paths=None,
):
"""
Generates new particle objects within group @group at the specified pose (@positions, @orientations) with
corresponding scales @scales.
NOTE: Assumes positions are the exact contact point on @group object's surface. If cls._CLIP_INTO_OBJECTS
is not True, then the positions will be offset away from the object by half of its bbox
Args:
group (str): Object on which to sample particle locations
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scaling in its
local frame. If not specified, all we randomly sampled based on @cls.min_scale and @cls.max_scale
link_prim_paths (None or list of str): Determines which link each generated particle will
be attached to. If not specified, all will be attached to the group object's prim, NOT a link
"""
raise NotImplementedError
@classmethod
def generate_group_particles_on_object(cls, group, max_samples=None, min_samples_for_success=1):
"""
Generates @max_samples new particle objects and samples their locations on the surface of object @obj. Note
that if any particles are in the group already, they will be removed
Args:
group (str): Object on which to sample particle locations
max_samples (None or int): If specified, maximum number of particles to sample
min_samples_for_success (int): Minimum number of particles required to be sampled successfully in order
for this generation process to be considered successful
Returns:
bool: True if enough particles were generated successfully (number of successfully sampled points >=
min_samples_for_success), otherwise False
"""
raise NotImplementedError
@classmethod
def get_group_particles_position_orientation(cls, group):
"""
Computes all particles' positions and orientations that belong to @group
Note: This is more optimized than doing a for loop with self.get_particle_position_orientation()
Args:
group (str): Group name whose particle positions and orientations should be computed
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def set_group_particles_position_orientation(cls, group, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to @group
Note: This is more optimized than doing a for loop with self.set_particle_position_orientation()
Args:
group (str): Group name whose particle positions and orientations should be computed
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def get_group_particles_local_pose(cls, group):
"""
Computes all particles' positions and orientations that belong to @group in the particles' parent frame
Args:
group (str): Group name whose particle positions and orientations should be computed
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def set_group_particles_local_pose(cls, group, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to @group in the particles' parent frame
Args:
group (str): Group name whose particle positions and orientations should be computed
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def _validate_group(cls, group):
"""
Checks if particle attachment group @group exists. (If not, can create the group via create_attachment_group).
This will raise a ValueError if it doesn't exist.
Args:
group (str): Name of the group to check for
"""
if group not in cls.groups:
raise ValueError(f"Particle attachment group {group} does not exist!")
class PhysicalParticleSystem(BaseSystem):
"""
System whose generated particles are subject to physics
"""
@classmethod
def initialize(cls):
# Run super first
super().initialize()
# Make sure min and max scale are identical
assert np.all(cls.min_scale == cls.max_scale), \
"Min and max scale should be identical for PhysicalParticleSystem!"
@classproperty
def particle_density(cls):
"""
Returns:
float: The per-particle density, in kg / m^3
"""
raise NotImplementedError()
@classproperty
def particle_radius(cls):
"""
Returns:
float: Radius for the particles to be generated, for the purpose of sampling
"""
raise NotImplementedError()
@classproperty
def particle_contact_radius(cls):
"""
Returns:
float: Contact radius for the particles to be generated, for the purpose of estimating contacts
"""
raise NotImplementedError()
@classproperty
def particle_particle_rest_distance(cls):
"""
Returns:
The minimum distance between individual particles at rest
"""
return cls.particle_radius * 2.0
@classmethod
def check_in_contact(cls, positions):
"""
Checks whether each particle specified by @particle_positions are in contact with any rigid body.
NOTE: This is a rough proxy for contact, given @positions. Should not be taken as ground truth.
This is because for efficiency and underlying physics reasons, it's easier to treat particles as spheres
for fast checking. For particles directly spawned from Omniverse's underlying ParticleSystem API, it is a
rough proxy semantically, though it is accurate in sim-physics since all spawned particles interact as spheres.
For particles spawned manually as rigid bodies, it is a rough proxy both semantically and physically, as the
object physically interacts with its non-uniform geometry.
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
Returns:
n-array: (n_particles,) boolean array, True if in contact, otherwise False
"""
in_contact = np.zeros(len(positions), dtype=bool)
for idx, pos in enumerate(positions):
# TODO: Maybe multiply particle contact radius * 2?
in_contact[idx] = og.sim.psqi.overlap_sphere_any(cls.particle_contact_radius, pos)
return in_contact
@classmethod
def generate_particles_from_link(
cls,
obj,
link,
use_visual_meshes=True,
mesh_name_prefixes=None,
check_contact=True,
sampling_distance=None,
max_samples=None,
**kwargs,
):
"""
Generates a new particle instancer with unique identification number @idn, with particles sampled from the mesh
located at @mesh_prim_path, and registers it internally. This will also check for collision with other rigid
objects before spawning in individual particles
Args:
obj (EntityPrim): Object whose @link's visual meshes will be converted into sampled particles
link (RigidPrim): @obj's link whose visual meshes will be converted into sampled particles
use_visual_meshes (bool): Whether to use visual meshes of the link to generate particles
mesh_name_prefixes (None or str): If specified, specifies the substring that must exist in @link's
mesh names in order for that mesh to be included in the particle generator function.
If None, no filtering will be used.
check_contact (bool): If True, will only spawn in particles that do not collide with other rigid bodies
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
**kwargs (dict): Any additional keyword-mapped arguments required by subclass implementation
"""
# Run sanity checks
assert cls.initialized, "Must initialize system before generating particle instancers!"
# Generate a checker function to see if particles are within the link's volumes
check_in_volume, _ = generate_points_in_volume_checker_function(
obj=obj,
volume_link=link,
use_visual_meshes=use_visual_meshes,
mesh_name_prefixes=mesh_name_prefixes,
)
# Grab the link's AABB (or fallback to obj AABB if link does not have a valid AABB),
# and generate a grid of points based on the sampling distance
try:
low, high = link.visual_aabb
extent = link.visual_aabb_extent
except ValueError:
low, high = obj.aabb
extent = obj.aabb_extent
# We sample the range of each extent minus
sampling_distance = 2 * cls.particle_radius if sampling_distance is None else sampling_distance
n_particles_per_axis = (extent / sampling_distance).astype(int)
assert np.all(n_particles_per_axis), f"link {link.name} is too small to sample any particle of radius {cls.particle_radius}."
# 1e-10 is added because the extent might be an exact multiple of particle radius
arrs = [np.arange(l + cls.particle_radius, h - cls.particle_radius + 1e-10, cls.particle_particle_rest_distance)
for l, h, n in zip(low, high, n_particles_per_axis)]
# Generate 3D-rectangular grid of points
particle_positions = np.stack([arr.flatten() for arr in np.meshgrid(*arrs)]).T
# Check which points are inside the volume and only keep those
particle_positions = particle_positions[np.where(check_in_volume(particle_positions))[0]]
# Also prune any that in contact with anything if requested
if check_contact:
particle_positions = particle_positions[np.where(cls.check_in_contact(particle_positions) == 0)[0]]
# Also potentially sub-sample if we're past our limit
if max_samples is not None and len(particle_positions) > max_samples:
particle_positions = particle_positions[
np.random.choice(len(particle_positions), size=(int(max_samples),), replace=False)]
return cls.generate_particles(
positions=particle_positions,
**kwargs,
)
@classmethod
def generate_particles_on_object(
cls,
obj,
sampling_distance=None,
max_samples=None,
min_samples_for_success=1,
**kwargs,
):
"""
Generates @n_particles new particle objects and samples their locations on the top surface of object @obj
Args:
obj (BaseObject): Object on which to generate a particle instancer with sampled particles on the object's
top surface
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
min_samples_for_success (int): Minimum number of particles required to be sampled successfully in order
for this generation process to be considered successful
**kwargs (dict): Any additional keyword-mapped arguments required by subclass implementation
Returns:
bool: True if enough particles were generated successfully (number of successfully sampled points >=
min_samples_for_success), otherwise False
"""
assert max_samples >= min_samples_for_success, "number of particles to sample should exceed the min for success"
# We densely sample a grid of points by ray-casting from top to bottom to find the valid positions
radius = cls.particle_radius
results = sample_cuboid_on_object_full_grid_topdown(
obj,
# the grid is fully dense - particles are sitting next to each other
ray_spacing=radius * 2 if sampling_distance is None else sampling_distance,
# assume the particles are extremely small - sample cuboids of size 0 for better performance
cuboid_dimensions=np.zeros(3),
# raycast start inside the aabb in x-y plane and outside the aabb in the z-axis
aabb_offset=np.array([-radius, -radius, radius]),
# bottom padding should be the same as the particle radius
cuboid_bottom_padding=radius,
# undo_cuboid_bottom_padding should be False - the sampled positions are above the surface by its radius
undo_cuboid_bottom_padding=False,
)
particle_positions = np.array([result[0] for result in results if result[0] is not None])
# Also potentially sub-sample if we're past our limit
if max_samples is not None and len(particle_positions) > max_samples:
particle_positions = particle_positions[
np.random.choice(len(particle_positions), size=(max_samples,), replace=False)]
n_particles = len(particle_positions)
success = n_particles >= min_samples_for_success
# If we generated a sufficient number of points, generate them in the simulator
if success:
cls.generate_particles(
positions=particle_positions,
**kwargs,
)
return success
def _create_system_from_metadata(system_name):
"""
Internal helper function to programmatically create a system from dataset metadata
NOTE: This only creates the system, and does NOT initialize the system
Args:
system_name (str): Name of the system to create, e.g.: "water", "stain", etc.
Returns:
BaseSystem: Created system class
"""
# Avoid circular imports
from omnigibson import systems
# Search for the appropriate system, if not found, fallback
# TODO: Once dataset is fully constructed, DON'T fallback, and assert False instead
all_systems = set(get_all_system_categories())
if system_name not in all_systems:
# Use default config -- assume @system_name is a fluid that uses the same params as water
return systems.FluidSystem.create(
name=system_name.replace("-", "_"),
particle_contact_offset=0.012,
particle_density=500.0,
is_viscous=False,
material_mtl_name="DeepWater",
)
else:
"""
This is not defined yet, but one proposal:
Metadata = .json dict, with format:
{
"type": one of {"visual", "fluid", "granular"},
}
if visual, include:
"relative_particle_scaling" : ...,
if visual or granular, also includes:
--> note: create_particle_template should be deterministic, configured via:
lambda prim_path, name: og.objects.DatasetObject(
prim_path=prim_path,
name=name,
usd_path=os.path.join(gm.DATASET_PATH, "systems", system_name, f"{system_name}.usd"),
category=system_name,
visible=False,
fixed_base=False,
visual_only=True,
include_default_states=False,
abilities={},
)
if fluid / granular, also include:
"particle_contact_offset": ...,
"particle_density": ...,
if fluid, also include:
"is_viscous": bool
"material_mtl_name": ..., # Base material config to use
"customize_particle_kwargs": { # Maps property/ies from @MaterialPrim to value to set
"opacity_constant": ...,
"albedo_add": ...,
"diffuse_color_constant": ...,
...,
}
--> This will be programmatically constructed into a function:
def _customize_particle_material(mat: MaterialPrim): --> None
for attr, val in metadata["customize_particle_kwargs"].items():
mat.__setattr__(attr, val)
Then, compile the necessary kwargs and generate the requested system
"""
# Parse information
system_dir = os.path.join(gm.DATASET_PATH, "systems", system_name)
with open(os.path.join(system_dir, "metadata.json"), "r") as f:
metadata = json.load(f)
system_type = metadata["type"]
system_kwargs = dict(name=system_name)
particle_assets = set(os.listdir(system_dir))
particle_assets.remove("metadata.json")
has_asset = len(particle_assets) > 0
if has_asset:
model = sorted(particle_assets)[0]
asset_path = os.path.join(system_dir, model, "usd", f"{model}.usd")
else:
asset_path = None
if not has_asset:
if system_type == "macro_visual_particle":
# Fallback to stain asset
asset_path = os.path.join(gm.DATASET_PATH, "systems", "stain", "ahkjul", "usd", "ahkjul.usd")
has_asset = True
if has_asset:
def generate_particle_template_fcn():
return lambda prim_path, name: \
og.objects.USDObject(
prim_path=prim_path,
name=name,
usd_path=asset_path,
encrypted=True,
category=system_name,
visible=False,
fixed_base=True,
visual_only=True,
kinematic_only=True,
include_default_states=False,
abilities={},
)
else:
def generate_particle_template_fcn():
return lambda prim_path, name: \
og.objects.PrimitiveObject(
prim_path=prim_path,
name=name,
primitive_type="Sphere",
category=system_name,
radius=0.015,
visible=False,
fixed_base=True,
visual_only=True,
kinematic_only=True,
include_default_states=False,
abilities={},
)
def generate_customize_particle_material_fcn(mat_kwargs):
def customize_mat(mat):
for attr, val in mat_kwargs.items():
setattr(mat, attr, np.array(val) if isinstance(val, list) else val)
return customize_mat
if system_type == "macro_visual_particle":
system_kwargs["create_particle_template"] = generate_particle_template_fcn()
system_kwargs["scale_relative_to_parent"] = metadata["relative_particle_scaling"]
elif system_type == "granular" or system_type == "macro_physical_particle":
system_kwargs["create_particle_template"] = generate_particle_template_fcn()
system_kwargs["particle_density"] = metadata["particle_density"]
elif system_type == "fluid":
system_kwargs["particle_contact_offset"] = metadata["particle_contact_offset"]
system_kwargs["particle_density"] = metadata["particle_density"]
system_kwargs["is_viscous"] = metadata["is_viscous"]
system_kwargs["material_mtl_name"] = metadata["material_mtl_name"]
system_kwargs["customize_particle_material"] = \
generate_customize_particle_material_fcn(mat_kwargs=metadata["customize_material_kwargs"])
else:
raise ValueError(f"{system_name} system's type {system_type} is invalid! Must be one of "
f"{{ 'macro_visual_particle', 'macro_physical_particle', 'granular', or 'fluid' }}")
# Generate the requested system
system_cls = "".join([st.capitalize() for st in system_type.split("_")])
return getattr(systems, f"{system_cls}System").create(**system_kwargs)
def import_og_systems():
system_dir = os.path.join(gm.DATASET_PATH, "systems")
if os.path.exists(system_dir):
system_names = os.listdir(system_dir)
for system_name in system_names:
if system_name not in REGISTERED_SYSTEMS:
_create_system_from_metadata(system_name=system_name)
def is_system_active(system_name):
if system_name not in REGISTERED_SYSTEMS:
return False
# assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
return system.initialized
def is_visual_particle_system(system_name):
assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
return issubclass(system, VisualParticleSystem)
def is_physical_particle_system(system_name):
assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
return issubclass(system, PhysicalParticleSystem)
def is_fluid_system(system_name):
assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
# Avoid circular imports
from omnigibson.systems.micro_particle_system import FluidSystem
return issubclass(system, FluidSystem)
def get_system(system_name, force_active=True):
# Make sure scene exists
assert og.sim.scene is not None, "Cannot get systems until scene is imported!"
# If system_name is not in REGISTERED_SYSTEMS, create from metadata
system = REGISTERED_SYSTEMS[system_name] if system_name in REGISTERED_SYSTEMS \
else _create_system_from_metadata(system_name=system_name)
if not system.initialized and force_active:
system.initialize()
return system
def clear_all_systems():
global _CALLBACKS_ON_SYSTEM_INIT, _CALLBACKS_ON_SYSTEM_CLEAR
_CALLBACKS_ON_SYSTEM_INIT = dict()
_CALLBACKS_ON_SYSTEM_CLEAR = dict()
for system in SYSTEM_REGISTRY.objects:
system.clear()
def add_callback_on_system_init(name, callback):
global _CALLBACKS_ON_SYSTEM_INIT
_CALLBACKS_ON_SYSTEM_INIT[name] = callback
def add_callback_on_system_clear(name, callback):
global _CALLBACKS_ON_SYSTEM_CLEAR
_CALLBACKS_ON_SYSTEM_CLEAR[name] = callback
def remove_callback_on_system_init(name):
global _CALLBACKS_ON_SYSTEM_INIT
_CALLBACKS_ON_SYSTEM_INIT.pop(name)
def remove_callback_on_system_clear(name):
global _CALLBACKS_ON_SYSTEM_CLEAR
_CALLBACKS_ON_SYSTEM_CLEAR.pop(name)
| 51,397 | Python | 39.092044 | 151 | 0.623694 |
StanfordVL/OmniGibson/omnigibson/systems/__init__.py | from omnigibson.systems.system_base import get_system, is_system_active, is_visual_particle_system, \
is_physical_particle_system, SYSTEM_REGISTRY, add_callback_on_system_init, add_callback_on_system_clear, \
remove_callback_on_system_init, remove_callback_on_system_clear, import_og_systems
from omnigibson.systems.micro_particle_system import *
from omnigibson.systems.macro_particle_system import *
# Import all OG systems from dataset
import_og_systems()
| 468 | Python | 51.111105 | 110 | 0.792735 |
StanfordVL/OmniGibson/omnigibson/systems/micro_particle_system.py | import uuid
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.geom_prim import VisualGeomPrim
from omnigibson.prims.material_prim import MaterialPrim
from omnigibson.systems.system_base import BaseSystem, PhysicalParticleSystem, REGISTERED_SYSTEMS
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function
from omnigibson.utils.python_utils import classproperty, assert_valid_key, subclass_factory, snake_case_to_camel_case
from omnigibson.utils.sampling_utils import sample_cuboid_on_object_full_grid_topdown
from omnigibson.utils.usd_utils import mesh_prim_to_trimesh_mesh, PoseAPI
from omnigibson.utils.physx_utils import create_physx_particle_system, create_physx_particleset_pointinstancer
from omnigibson.utils.ui_utils import disclaimer, create_module_logger
from pathlib import Path
import os
import tempfile
import datetime
import trimesh
import pymeshlab
import numpy as np
from collections import defaultdict
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# TODO: Tune these default values!
# TODO (eric): figure out whether one offset can fit all
m.MAX_CLOTH_PARTICLES = 20000 # Comes from a limitation in physx - do not increase
m.CLOTH_PARTICLE_CONTACT_OFFSET = 0.0075
m.CLOTH_REMESHING_ERROR_THRESHOLD = 0.05
m.CLOTH_STRETCH_STIFFNESS = 10000.0
m.CLOTH_BEND_STIFFNESS = 200.0
m.CLOTH_SHEAR_STIFFNESS = 100.0
m.CLOTH_DAMPING = 0.2
m.CLOTH_FRICTION = 0.4
m.CLOTH_DRAG = 0.001
m.CLOTH_LIFT = 0.003
m.MIN_PARTICLE_CONTACT_OFFSET = 0.005 # Minimum particle contact offset for physical micro particles
m.FLUID_PARTICLE_PARTICLE_DISTANCE_SCALE = 0.8 # How much overlap expected between fluid particles at rest
m.MICRO_PARTICLE_SYSTEM_MAX_VELOCITY = None # If set, the maximum particle velocity for micro particle systems
def set_carb_settings_for_fluid_isosurface():
"""
Sets relevant rendering settings in the carb settings in order to use isosurface effectively
"""
# Settings for Isosurface
isregistry = lazy.carb.settings.acquire_settings_interface()
# disable grid and lights
dOptions = isregistry.get_as_int("persistent/app/viewport/displayOptions")
dOptions &= ~(1 << 6 | 1 << 8)
isregistry.set_int("persistent/app/viewport/displayOptions", dOptions)
isregistry.set_bool(lazy.omni.physx.bindings._physx.SETTING_UPDATE_TO_USD, True)
isregistry.set_int(lazy.omni.physx.bindings._physx.SETTING_NUM_THREADS, 8)
isregistry.set_bool(lazy.omni.physx.bindings._physx.SETTING_UPDATE_VELOCITIES_TO_USD, True)
isregistry.set_bool(lazy.omni.physx.bindings._physx.SETTING_UPDATE_PARTICLES_TO_USD, True) # TODO: Why does setting this value --> True result in no isosurface being rendered?
isregistry.set_int("persistent/simulation/minFrameRate", 60)
isregistry.set_bool("rtx-defaults/pathtracing/lightcache/cached/enabled", False)
isregistry.set_bool("rtx-defaults/pathtracing/cached/enabled", False)
isregistry.set_int("rtx-defaults/pathtracing/fireflyFilter/maxIntensityPerSample", 10000)
isregistry.set_int("rtx-defaults/pathtracing/fireflyFilter/maxIntensityPerSampleDiffuse", 50000)
isregistry.set_float("rtx-defaults/pathtracing/optixDenoiser/blendFactor", 0.09)
isregistry.set_int("rtx-defaults/pathtracing/aa/op", 2)
isregistry.set_int("rtx-defaults/pathtracing/maxBounces", 32)
isregistry.set_int("rtx-defaults/pathtracing/maxSpecularAndTransmissionBounces", 16)
isregistry.set_int("rtx-defaults/post/dlss/execMode", 1)
isregistry.set_int("rtx-defaults/translucency/maxRefractionBounces", 12)
class PhysxParticleInstancer(BasePrim):
"""
Simple class that wraps the raw omniverse point instancer prim and provides convenience functions for
particle access
"""
def __init__(self, prim_path, name, idn):
"""
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
idn (int): Unique identification number to assign to this particle instancer. This is used to
deterministically reproduce individual particle instancer states dynamically, even if we
delete / add additional ones at runtime during simulation.
"""
# Store inputs
self._idn = idn
# Run super method directly
super().__init__(prim_path=prim_path, name=name)
self._parent_prim = BasePrim(prim_path=self.prim.GetParent().GetPath().pathString, name=f"{name}_parent")
def _load(self):
# We raise an error, this should NOT be created from scratch
raise NotImplementedError("PhysxPointInstancer should NOT be loaded via this class! Should be created before.")
def remove(self):
super().remove()
self._parent_prim.remove()
def add_particles(
self,
positions,
velocities=None,
orientations=None,
scales=None,
prototype_indices=None,
):
"""
Adds particles to this particle instancer.
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be scale [1, 1, 1] by default
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
"""
n_new_particles = len(positions)
velocities = np.zeros((n_new_particles, 3)) if velocities is None else velocities
if orientations is None:
orientations = np.zeros((n_new_particles, 4))
orientations[:, -1] = 1.0
scales = np.ones((n_new_particles, 3)) * np.ones((1, 3)) if scales is None else scales
prototype_indices = np.zeros(n_new_particles, dtype=int) if prototype_indices is None else prototype_indices
self.particle_positions = np.vstack([self.particle_positions, positions])
self.particle_velocities = np.vstack([self.particle_velocities, velocities])
self.particle_orientations = np.vstack([self.particle_orientations, orientations])
self.particle_scales = np.vstack([self.particle_scales, scales])
self.particle_prototype_ids = np.hstack([self.particle_prototype_ids, prototype_indices])
def remove_particles(self, idxs):
"""
Remove particles from this instancer, specified by their indices @idxs in the data array
Args:
idxs (list or np.array of int): IDs corresponding to the indices of specific particles to remove from this
instancer
"""
if len(idxs) > 0:
# Remove all requested indices and write to all the internal data arrays
self.particle_positions = np.delete(self.particle_positions, idxs, axis=0)
self.particle_velocities = np.delete(self.particle_velocities, idxs, axis=0)
self.particle_orientations = np.delete(self.particle_orientations, idxs, axis=0)
self.particle_scales = np.delete(self.particle_scales, idxs, axis=0)
self.particle_prototype_ids = np.delete(self.particle_prototype_ids, idxs, axis=0)
def remove_all_particles(self):
self.remove_particles(idxs=np.arange(self.n_particles))
@property
def n_particles(self):
"""
Returns:
int: Number of particles owned by this instancer
"""
return len(self.particle_positions)
@property
def idn(self):
"""
Returns:
int: Identification number of this particle instancer
"""
return self._idn
@property
def particle_group(self):
"""
Returns:
int: Particle group this instancer belongs to
"""
return self.get_attribute(attr="physxParticle:particleGroup")
@particle_group.setter
def particle_group(self, group):
"""
Args:
group (int): Particle group this instancer belongs to
"""
self.set_attribute(attr="physxParticle:particleGroup", val=group)
@property
def particle_positions(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="positions"))
@particle_positions.setter
def particle_positions(self, pos):
"""
Set the particle positions for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
self.set_attribute(attr="positions", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(pos.astype(float)))
@property
def particle_orientations(self):
"""
Returns:
np.array: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="orientations"))
@particle_orientations.setter
def particle_orientations(self, quat):
"""
Set the particle positions for this instancer
Args:
np.array: (N, 4) numpy array, where each of the N particles' desired orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
assert quat.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {quat.shape[0]}, vs. number of particles {self.n_particles}!"
# If the number of particles is nonzero, swap w position, since Quath takes (w,x,y,z)
quat = quat.astype(float)
if self.n_particles > 0:
quat = quat[:, [3, 0, 1, 2]]
self.set_attribute(attr="orientations", val=lazy.pxr.Vt.QuathArray.FromNumpy(quat))
@property
def particle_velocities(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
"""
Set the particle velocities for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert vel.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {vel.shape[0]}, vs. number of particles {self.n_particles}!"
self.set_attribute(attr="velocities", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(vel.astype(float)))
@property
def particle_scales(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="scales"))
@particle_scales.setter
def particle_scales(self, scales):
"""
Set the particle scales for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert scales.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {scales.shape[0]}, vs. number of particles {self.n_particles}!"
self.set_attribute(attr="scales", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(scales.astype(float)))
@property
def particle_prototype_ids(self):
"""
Returns:
np.array: (N,) numpy array, where each of the N particles' prototype_id (i.e.: which prototype is being used
for that particle)
"""
return np.array(self.get_attribute(attr="protoIndices"))
@particle_prototype_ids.setter
def particle_prototype_ids(self, prototype_ids):
"""
Set the particle prototype_ids for this instancer
Args:
np.array: (N,) numpy array, where each of the N particles' desired prototype_id
(i.e.: which prototype is being used for that particle)
"""
assert prototype_ids.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {prototype_ids.shape[0]}, vs. number of particles {self.n_particles}!"
self.set_attribute(attr="protoIndices", val=prototype_ids.astype(np.int32))
@property
def state_size(self):
# idn (1), particle_group (1), n_particles (1), and the corresponding states for each particle
# N * (pos (3) + vel (3) + orn (4) + scale (3) + prototype_id (1))
return 3 + self.n_particles * 14
def _dump_state(self):
return dict(
idn=self._idn,
particle_group=self.particle_group,
n_particles=self.n_particles,
particle_positions=self.particle_positions,
particle_velocities=self.particle_velocities,
particle_orientations=self.particle_orientations,
particle_scales=self.particle_scales,
particle_prototype_ids=self.particle_prototype_ids,
)
def _load_state(self, state):
# Sanity check the identification number and particle group
assert self._idn == state["idn"], f"Got mismatch in identification number for this particle instancer when " \
f"loading state! Should be: {self._idn}, got: {state['idn']}."
assert self.particle_group == state["particle_group"], f"Got mismatch in particle group for this particle " \
f"instancer when loading state! Should be: {self.particle_group}, got: {state['particle_group']}."
# Set values appropriately
keys = ("particle_positions", "particle_velocities", "particle_orientations", "particle_scales", "particle_prototype_ids")
for key in keys:
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
val = np.array(state[key]) if not isinstance(state[key], np.ndarray) else state[key]
setattr(self, key, val)
def _serialize(self, state):
# Compress into a 1D array
return np.concatenate([
[state["idn"], state["particle_group"], state["n_particles"]],
state["particle_positions"].reshape(-1),
state["particle_velocities"].reshape(-1),
state["particle_orientations"].reshape(-1),
state["particle_scales"].reshape(-1),
state["particle_prototype_ids"],
]).astype(float)
def _deserialize(self, state):
# Sanity check the identification number
assert self._idn == state[0], f"Got mismatch in identification number for this particle instancer when " \
f"deserializing state! Should be: {self._idn}, got: {state[0]}."
assert self.particle_group == state[1], f"Got mismatch in particle group for this particle " \
f"instancer when deserializing state! Should be: {self.particle_group}, got: {state[1]}."
# De-compress from 1D array
n_particles = int(state[2])
state_dict = dict(
idn=int(state[0]),
particle_group=int(state[1]),
n_particles=n_particles,
)
# Process remaining keys and reshape automatically
keys = ("particle_positions", "particle_velocities", "particle_orientations", "particle_scales", "particle_prototype_ids")
sizes = ((n_particles, 3), (n_particles, 3), (n_particles, 4), (n_particles, 3), (n_particles,))
idx = 3
for key, size in zip(keys, sizes):
length = np.product(size)
state_dict[key] = state[idx: idx + length].reshape(size)
idx += length
return state_dict, idx
class MicroParticleSystem(BaseSystem):
"""
Global system for modeling "micro" level particles, e.g.: water, seeds, cloth. This system leverages
Omniverse's native physx particle systems
"""
# Particle system prim in the scene, should be generated at runtime
system_prim = None
# Material -- MaterialPrim associated with this particle system
_material = None
# Color of the generated material. Default is white [1.0, 1.0, 1.0]
# (NOTE: external queries should call cls.color)
_color = np.array([1.0, 1.0, 1.0])
@classmethod
def initialize(cls):
# Run super first
super().initialize()
# Run sanity checks
if not gm.USE_GPU_DYNAMICS:
raise ValueError(f"Failed to initialize {cls.name} system. Please set gm.USE_GPU_DYNAMICS to be True.")
# Make sure flatcache is not being used OR isosurface is enabled -- otherwise, raise an error, since
# non-isosurface particles don't get rendered properly when flatcache is enabled
assert cls.use_isosurface or not gm.ENABLE_FLATCACHE, \
f"Cannot use flatcache with MicroParticleSystem {cls.name} when no isosurface is used!"
cls.system_prim = cls._create_particle_system()
# Get material
material = cls._get_particle_material_template()
# Load the material if it's newly created and has never been loaded before
if not material.loaded:
material.load()
material.add_user(cls)
cls._material = material
# Bind the material to the particle system (for isosurface) and the prototypes (for non-isosurface)
cls._material.bind(cls.system_prim_path)
# Also apply physics to this material
lazy.omni.physx.scripts.particleUtils.add_pbd_particle_material(og.sim.stage, cls.mat_path, **cls._pbd_material_kwargs)
# Force populate inputs and outputs of the shader
cls._material.shader_force_populate()
# Potentially modify the material
cls._customize_particle_material()
@classmethod
def _clear(cls):
cls._material.remove_user(cls)
super()._clear()
cls.system_prim = None
cls._material = None
cls._color = np.array([1.0, 1.0, 1.0])
@classproperty
def particle_radius(cls):
# Magic number from omni tutorials
# See https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#offset-autocomputation
# Also https://nvidia-omniverse.github.io/PhysX/physx/5.1.3/docs/ParticleSystem.html#particle-system-configuration
return 0.99 * cls.particle_contact_offset
@classproperty
def color(cls):
"""
Returns:
None or 3-array: If @cls._material exists, this will be its corresponding RGB color. Otherwise,
will return None
"""
return cls._color
@classproperty
def material(cls):
return cls._material
@classproperty
def mat_path(cls):
"""
Returns:
str: Path to this system's material in the scene stage
"""
return f"{cls.prim_path}/material"
@classproperty
def mat_name(cls):
"""
Returns:
str: Name of this system's material
"""
return f"{cls.name}:material"
@classproperty
def _pbd_material_kwargs(cls):
"""
Returns:
dict: Any PBD material kwargs to pass to the PBD material method particleUtils.add_pbd_particle_material
used to define physical properties associated with this particle system
"""
# Default is empty dictionary
return dict()
@classmethod
def _get_particle_material_template(cls):
"""
Creates the particle material template to be used for this particle system. Prim path does not matter,
as it will be overridden internally such that it is a child prim of this particle system's prim.
NOTE: This material is a template because it is loading an Omni material preset. It can then be customized (in
addition to modifying its physical material properties) via @_customize_particle_material
Returns:
MaterialPrim: The material to apply to all particles
"""
# Default is PBR material
return MaterialPrim.get_material(
prim_path=cls.mat_path,
name=cls.mat_name,
load_config={
"mdl_name": f"OmniPBR.mdl",
"mtl_name": f"OmniPBR",
}
)
@classmethod
def _customize_particle_material(cls):
"""
Modifies this particle system's particle material once it is loaded. Default is a no-op
"""
pass
@classproperty
def system_prim_path(cls):
return f"{cls.prim_path}/system"
@classproperty
def visual_only(cls):
"""
Returns:
bool: Whether this particle system should be visual-only, i.e.: not subject to collisions and physics. If True,
the generated particles will not move or collide
"""
return False
@classproperty
def particle_contact_offset(cls):
"""
Returns:
float: Contact offset value to use for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#particle-system-configuration
for more information
"""
raise NotImplementedError()
@classproperty
def use_smoothing(cls):
"""
Returns:
bool: Whether to use smoothing or not for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#smoothing
for more information
"""
return False
@classproperty
def use_anisotropy(cls):
"""
Returns:
bool: Whether to use anisotropy or not for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#anisotropy
for more information
"""
return False
@classproperty
def use_isosurface(cls):
"""
Returns:
bool: Whether to use isosurface or not for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#isosurface
for more information
"""
return False
@classmethod
def _create_particle_system(cls):
"""
Creates the single, global particle system. This should only be ever called once, and during initialize()
Returns:
Usd.Prim: Particle system prim created
"""
return create_physx_particle_system(
prim_path=cls.system_prim_path,
physics_scene_path=og.sim.get_physics_context().get_current_physics_scene_prim().GetPrimPath().pathString,
particle_contact_offset=cls.particle_contact_offset,
visual_only=cls.visual_only,
smoothing=cls.use_smoothing and gm.ENABLE_HQ_RENDERING,
anisotropy=cls.use_anisotropy and gm.ENABLE_HQ_RENDERING,
isosurface=cls.use_isosurface and gm.ENABLE_HQ_RENDERING,
).GetPrim()
class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
"""
Global system for modeling physical "micro" level particles, e.g.: water, seeds, rice, etc. This system leverages
Omniverse's native physx particle systems
"""
# Particle prototypes -- will be list of mesh prims to use as particle prototypes for this system
particle_prototypes = None
# Particle instancers -- maps name to particle instancer prims (dict)
particle_instancers = None
@classproperty
def n_particles(cls):
return sum([instancer.n_particles for instancer in cls.particle_instancers.values()])
@classproperty
def n_instancers(cls):
"""
Returns:
int: Number of active particles in this system
"""
return len(cls.particle_instancers)
@classproperty
def instancer_idns(cls):
"""
Returns:
int: Number of active particles in this system
"""
return [inst.idn for inst in cls.particle_instancers.values()]
@classproperty
def self_collision(cls):
"""
Returns:
bool: Whether this system's particle should have self collisions enabled or not
"""
# Default is True
return True
@classmethod
def _sync_particle_prototype_ids(cls):
"""
Synchronizes the particle prototype IDs across all particle instancers when sim is stopped.
Omniverse has a bug where all particle positions, orientations, velocities, and scales are correctly reset
when sim is stopped, but not the prototype IDs. This function is a workaround for that.
"""
if cls.initialized:
for instancer in cls.particle_instancers.values():
instancer.particle_prototype_ids = np.zeros(instancer.n_particles, dtype=np.int32)
@classmethod
def initialize(cls):
# Create prototype before running super!
cls.particle_prototypes = cls._create_particle_prototypes()
# Run super
super().initialize()
# Potentially set system prim's max velocity value
if m.MICRO_PARTICLE_SYSTEM_MAX_VELOCITY is not None:
cls.system_prim.GetProperty("maxVelocity").Set(m.MICRO_PARTICLE_SYSTEM_MAX_VELOCITY)
# Initialize class variables that are mutable so they don't get overridden by children classes
cls.particle_instancers = dict()
# TODO: remove this hack once omniverse fixes the issue (now we assume prototype IDs are all 0 always)
og.sim.add_callback_on_stop(name=f"{cls.name}_sync_particle_prototype_ids", callback=cls._sync_particle_prototype_ids)
@classmethod
def _clear(cls):
for prototype in cls.particle_prototypes:
og.sim.remove_prim(prototype)
super()._clear()
cls.particle_prototypes = None
cls.particle_instancers = None
@classproperty
def next_available_instancer_idn(cls):
"""
Updates the max instancer identification number based on the current internal state
"""
if cls.n_instancers == 0:
return cls.default_instancer_idn
else:
for idn in range(max(cls.instancer_idns) + 2):
if idn not in cls.instancer_idns:
return idn
@classproperty
def default_instancer_idn(cls):
return 0
@classproperty
def state_size(cls):
# We have the number of particle instancers (1), the instancer groups, particle groups, and,
# number of particles in each instancer (3n),
# and the corresponding states in each instancer (X)
return 1 + 3 * len(cls.particle_instancers) + sum(inst.state_size for inst in cls.particle_instancers.values())
@classproperty
def default_particle_instancer(cls):
"""
Returns:
PhysxParticleInstancer: Default particle instancer for this particle system
"""
# Default instancer is the 0th ID instancer
name = cls.particle_instancer_idn_to_name(idn=cls.default_instancer_idn)
# NOTE: Cannot use dict.get() call for some reason; it messes up IDE introspection
return cls.particle_instancers[name] if name in cls.particle_instancers \
else cls.generate_particle_instancer(n_particles=0, idn=cls.default_instancer_idn)
@classproperty
def particle_contact_radius(cls):
# This is simply the contact offset
return cls.particle_contact_offset
@classproperty
def is_fluid(cls):
"""
Returns:
bool: Whether this system is modeling fluid or not
"""
raise NotImplementedError()
@classmethod
def _create_particle_prototypes(cls):
"""
Creates any relevant particle prototypes to be used by this particle system.
Returns:
list of VisualGeomPrim: Visual mesh prim(s) to use as this system's particle prototype(s)
"""
raise NotImplementedError()
@classmethod
def remove_particles(
cls,
idxs,
instancer_idn=None,
):
"""
Removes pre-existing particles from instancer @instancer_idn
Args:
idxs (np.array): (n_particles,) shaped array specifying IDs of particles to delete
instancer_idn (None or int): Unique identification number of the particle instancer to delete the particles
from. If None, this system will delete particles from the default particle instancer
"""
# Create a new particle instancer if a new idn is requested, otherwise use the pre-existing one
inst = cls.default_particle_instancer if instancer_idn is None else \
cls.particle_instancers.get(cls.particle_instancer_idn_to_name(idn=instancer_idn), None)
assert inst is not None, f"No instancer with ID {inst} exists!"
inst.remove_particles(idxs=idxs)
@classmethod
def generate_particles(
cls,
positions,
instancer_idn=None,
particle_group=0,
velocities=None,
orientations=None,
scales=None,
prototype_indices=None,
):
"""
Generates new particles, either as part of a pre-existing instancer corresponding to @instancer_idn or as part
of a newly generated instancer.
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
active instancer that matches the requested idn, a new one will be created.
If None, this system will add particles to the default particle instancer
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@cls.self_collision
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
Returns:
PhysxParticleInstancer: Particle instancer that includes the generated particles
"""
# Create a new particle instancer if a new idn is requested, otherwise use the pre-existing one
inst = cls.default_particle_instancer if instancer_idn is None else \
cls.particle_instancers.get(cls.particle_instancer_idn_to_name(idn=instancer_idn), None)
n_particles = len(positions)
if prototype_indices is not None:
prototype_indices = np.ones(n_particles, dtype=int) * prototype_indices if \
isinstance(prototype_indices, int) else np.array(prototype_indices, dtype=int)
else:
prototype_indices = np.random.choice(np.arange(len(cls.particle_prototypes)), size=(n_particles,))
if inst is None:
inst = cls.generate_particle_instancer(
idn=instancer_idn,
particle_group=particle_group,
n_particles=len(positions),
positions=positions,
velocities=velocities,
orientations=orientations,
scales=scales,
prototype_indices=prototype_indices,
)
else:
inst.add_particles(
positions=positions,
velocities=velocities,
orientations=orientations,
scales=scales,
prototype_indices=prototype_indices,
)
# Update semantics
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=cls.prim_path),
semantic_label=cls.name,
type_label="class",
)
return inst
@classmethod
def generate_particle_instancer(
cls,
n_particles,
idn=None,
particle_group=0,
positions=None,
velocities=None,
orientations=None,
scales=None,
prototype_indices=None,
):
"""
Generates a new particle instancer with unique identification number @idn, and registers it internally
Args:
n_particles (int): Number of particles to generate for this instancer
idn (None or int): Unique identification number to assign to this particle instancer. This is used to
deterministically reproduce individual particle instancer states dynamically, even if we
delete / add additional ones at runtime during simulation. If None, this system will generate a unique
identifier automatically.
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@cls.self_collision
positions (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
If not specified, will be set to the origin by default
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
Returns:
PhysxParticleInstancer: Generated particle instancer
"""
# Run sanity checks
assert cls.initialized, "Must initialize system before generating particle instancers!"
# Multiple particle instancers is NOT supported currently, since there is no clear use case for multiple
assert cls.n_instancers == 0, f"Cannot create multiple instancers for the same system! " \
f"There is already {cls.n_instancers} pre-existing instancers."
# Automatically generate an identification number for this instancer if none is specified
if idn is None:
idn = cls.next_available_instancer_idn
assert idn not in cls.instancer_idns, f"instancer idn {idn} already exists."
# Generate standardized prim path for this instancer
name = cls.particle_instancer_idn_to_name(idn=idn)
# Create the instancer
instance = create_physx_particleset_pointinstancer(
name=name,
particle_system_path=cls.prim_path,
physx_particle_system_path=cls.system_prim_path,
particle_group=particle_group,
positions=np.zeros((n_particles, 3)) if positions is None else positions,
self_collision=cls.self_collision,
fluid=cls.is_fluid,
particle_mass=None,
particle_density=cls.particle_density,
orientations=orientations,
velocities=velocities,
angular_velocities=None,
scales=cls.sample_scales(n=n_particles) if scales is None else scales,
prototype_prim_paths=[pp.prim_path for pp in cls.particle_prototypes],
prototype_indices=prototype_indices,
enabled=not cls.visual_only,
)
# Create the instancer object that wraps the raw prim
instancer = PhysxParticleInstancer(
prim_path=instance.GetPrimPath().pathString,
name=name,
idn=idn,
)
instancer.initialize()
cls.particle_instancers[name] = instancer
return instancer
@classmethod
def generate_particles_from_link(
cls,
obj,
link,
use_visual_meshes=True,
mesh_name_prefixes=None,
check_contact=True,
instancer_idn=None,
particle_group=0,
sampling_distance=None,
max_samples=None,
prototype_indices=None,
):
"""
Generates a new particle instancer with unique identification number @idn, with particles sampled from the mesh
located at @mesh_prim_path, and registers it internally. This will also check for collision with other rigid
objects before spawning in individual particles
Args:
obj (EntityPrim): Object whose @link's visual meshes will be converted into sampled particles
link (RigidPrim): @obj's link whose visual meshes will be converted into sampled particles
use_visual_meshes (bool): Whether to use visual meshes of the link to generate particles
mesh_name_prefixes (None or str): If specified, specifies the substring that must exist in @link's
mesh names in order for that mesh to be included in the particle generator function.
If None, no filtering will be used.
check_contact (bool): If True, will only spawn in particles that do not collide with other rigid bodies
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
active instancer that matches the requested idn, a new one will be created.
If None, this system will add particles to the default particle instancer
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@cls.self_collision.
Only used if a new particle instancer is created!
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
"""
return super().generate_particles_from_link(
obj=obj,
link=link,
use_visual_meshes=use_visual_meshes,
mesh_name_prefixes=mesh_name_prefixes,
check_contact=check_contact,
instancer_idn=instancer_idn,
particle_group=particle_group,
sampling_distance=sampling_distance,
max_samples=max_samples,
prototype_indices=prototype_indices,
)
@classmethod
def generate_particles_on_object(
cls,
obj,
instancer_idn=None,
particle_group=0,
sampling_distance=None,
max_samples=None,
min_samples_for_success=1,
prototype_indices=None,
):
"""
Generates @n_particles new particle objects and samples their locations on the top surface of object @obj
Args:
obj (BaseObject): Object on which to generate a particle instancer with sampled particles on the object's
top surface
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
active instancer that matches the requested idn, a new one will be created.
If None, this system will add particles to the default particle instancer
particle_group (int): ID for this particle set. Particles from different groups will automatically collide.
Only used if a new particle instancer is created!
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
min_samples_for_success (int): Minimum number of particles required to be sampled successfully in order
for this generation process to be considered successful
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
Returns:
bool: True if enough particles were generated successfully (number of successfully sampled points >=
min_samples_for_success), otherwise False
"""
return super().generate_particles_on_object(
obj=obj,
instancer_idn=instancer_idn,
particle_group=particle_group,
sampling_distance=sampling_distance,
max_samples=max_samples,
min_samples_for_success=min_samples_for_success,
prototype_indices=prototype_indices,
)
@classmethod
def remove_particle_instancer(cls, name):
"""
Removes particle instancer with name @name from this system.
Args:
name (str): Particle instancer name to remove. If it does not exist, then an error will be raised
"""
# Make sure the instancer actually exists
assert_valid_key(key=name, valid_keys=cls.particle_instancers, name="particle instancer")
# Remove instancer from our tracking and delete its prim
instancer = cls.particle_instancers.pop(name)
og.sim.remove_prim(instancer)
@classmethod
def particle_instancer_name_to_idn(cls, name):
"""
Args:
name (str): Particle instancer name
Returns:
int: Particle instancer identification number
"""
return int(name.split(f"{cls.name}Instancer")[-1])
@classmethod
def particle_instancer_idn_to_name(cls, idn):
"""
Args:
idn (idn): Particle instancer identification number
Returns:
str: Name of the particle instancer auto-generated from its unique identification number
"""
return f"{cls.name}Instancer{idn}"
@classmethod
def get_particles_position_orientation(cls):
return cls.default_particle_instancer.particle_positions, cls.default_particle_instancer.particle_orientations
@classmethod
def get_particles_local_pose(cls):
return cls.get_particles_position_orientation()
@classmethod
def get_particle_position_orientation(cls, idx):
pos, ori = cls.get_particles_position_orientation()
return pos[idx], ori[idx]
@classmethod
def get_particle_local_pose(cls, idx):
return cls.get_particle_position_orientation(idx=idx)
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
if positions is not None:
cls.default_particle_instancer.particle_positions = positions
if orientations is not None:
cls.default_particle_instancer.particle_orientations = orientations
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
cls.set_particles_position_orientation(positions=positions, orientations=orientations)
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
if position is not None:
positions = cls.default_particle_instancer.particle_positions
positions[idx] = position
cls.default_particle_instancer.particle_positions = positions
if orientation is not None:
orientations = cls.default_particle_instancer.particle_orientations
orientations[idx] = orientation
cls.default_particle_instancer.particle_orientations = orientations
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
cls.set_particle_position_orientation(idx=idx, position=position, orientation=orientation)
@classmethod
def _sync_particle_instancers(cls, idns, particle_groups, particle_counts):
"""
Synchronizes the particle instancers based on desired identification numbers @idns
Args:
idns (list of int): Desired unique instancers that should be active for this particle system
particle_groups (list of int): Desired particle groups that each instancer should be. Length of this
list should be the same length as @idns
particle_counts (list of int): Desired particle counts that should exist per instancer. Length of this
list should be the same length as @idns
"""
# We have to be careful here -- some particle instancers may have been deleted / are mismatched, so we need
# to update accordingly, potentially deleting stale instancers and creating new instancers as needed
idn_to_info_mapping = {idn: {"group": group, "count": count}
for idn, group, count in zip(idns, particle_groups, particle_counts)}
current_instancer_names = set(cls.particle_instancers.keys())
desired_instancer_names = set(cls.particle_instancer_idn_to_name(idn=idn) for idn in idns)
instancers_to_delete = current_instancer_names - desired_instancer_names
instancers_to_create = desired_instancer_names - current_instancer_names
common_instancers = current_instancer_names.intersection(desired_instancer_names)
# Sanity check the common instancers, we will recreate any where there is a mismatch
for name in common_instancers:
idn = cls.particle_instancer_name_to_idn(name=name)
info = idn_to_info_mapping[idn]
instancer = cls.particle_instancers[name]
if instancer.particle_group != info["group"]:
instancer.particle_group = info["group"]
count_diff = info["count"] - instancer.n_particles
if count_diff > 0:
# We need to add more particles to this group
instancer.add_particles(positions=np.zeros((count_diff, 3)))
elif count_diff < 0:
# We need to remove particles from this group
instancer.remove_particles(idxs=np.arange(-count_diff))
# Delete any instancers we no longer want
for name in instancers_to_delete:
cls.remove_particle_instancer(name=name)
# Create any instancers we don't already have
for name in instancers_to_create:
idn = cls.particle_instancer_name_to_idn(name=name)
info = idn_to_info_mapping[idn]
cls.generate_particle_instancer(idn=idn, particle_group=info["group"], n_particles=info["count"])
@classmethod
def _dump_state(cls):
return dict(
n_instancers=cls.n_instancers,
instancer_idns=cls.instancer_idns,
instancer_particle_groups=[inst.particle_group for inst in cls.particle_instancers.values()],
instancer_particle_counts=[inst.n_particles for inst in cls.particle_instancers.values()],
particle_states=dict(((name, inst.dump_state(serialized=False))
for name, inst in cls.particle_instancers.items())),
)
@classmethod
def _load_state(cls, state):
# Synchronize the particle instancers
cls._sync_particle_instancers(
idns=state["instancer_idns"],
particle_groups=state["instancer_particle_groups"],
particle_counts=state["instancer_particle_counts"],
)
# Iterate over all particle states and load their respective states
for name, inst_state in state["particle_states"].items():
cls.particle_instancers[name].load_state(inst_state, serialized=False)
@classmethod
def _serialize(cls, state):
# Array is number of particle instancers, then the corresponding states for each particle instancer
return np.concatenate([
[state["n_instancers"]],
state["instancer_idns"],
state["instancer_particle_groups"],
state["instancer_particle_counts"],
*[cls.particle_instancers[name].serialize(inst_state)
for name, inst_state in state["particle_states"].items()],
]).astype(float)
@classmethod
def _deserialize(cls, state):
# Synchronize the particle instancers
n_instancers = int(state[0])
instancer_info = dict()
idx = 1
for info_name in ("instancer_idns", "instancer_particle_groups", "instancer_particle_counts"):
instancer_info[info_name] = state[idx: idx + n_instancers].astype(int).tolist()
idx += n_instancers
# Syncing is needed so that each particle instancer can further deserialize its own state
log.debug(f"Syncing {cls.name} particles with {n_instancers} instancers..")
cls._sync_particle_instancers(
idns=instancer_info["instancer_idns"],
particle_groups=instancer_info["instancer_particle_groups"],
particle_counts=instancer_info["instancer_particle_counts"],
)
# Procedurally deserialize the particle states
particle_states = dict()
for idn in instancer_info["instancer_idns"]:
name = cls.particle_instancer_idn_to_name(idn=idn)
state_size = cls.particle_instancers[name].state_size
particle_states[name] = cls.particle_instancers[name].deserialize(state[idx: idx + state_size])
idx += state_size
return dict(
n_instancers=n_instancers,
**instancer_info,
particle_states=particle_states,
), idx
@classmethod
def remove_all_particles(cls):
cls._sync_particle_instancers(idns=[], particle_groups=[], particle_counts=[])
@classmethod
def create(
cls,
name,
particle_density,
min_scale=None,
max_scale=None,
**kwargs,
):
"""
Utility function to programmatically generate monolithic fluid system classes.
Args:
name (str): Name of the system, in snake case.
particle_density (float): Particle density for the generated system
min_scale (None or 3-array): If specified, sets the minumum bound for particles' relative scale.
Else, defaults to 1
max_scale (None or 3-array): If specified, sets the maximum bound for particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
MicroPhysicalParticleSystem: Generated system class
"""
# Override the necessary parameters
@classproperty
def cp_register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def cp_particle_density(cls):
return particle_density
# Add to any other params specified
kwargs["_register_system"] = cp_register_system
kwargs["particle_density"] = cp_particle_density
# Run super
return super().create(name=name, min_scale=min_scale, max_scale=max_scale, **kwargs)
class FluidSystem(MicroPhysicalParticleSystem):
"""
Particle system class simulating fluids, leveraging isosurface feature in omniverse to render nice PBR fluid
texture. Individual particles are composed of spheres.
"""
@classmethod
def initialize(cls):
# Run super first
super().initialize()
# Bind the material to the particle system (for isosurface) and the prototypes (for non-isosurface)
cls._material.bind(cls.system_prim_path)
for prototype in cls.particle_prototypes:
cls._material.bind(prototype.prim_path)
# Apply the physical material preset based on whether or not this fluid is viscous
apply_mat_physics = lazy.omni.physx.scripts.particleUtils.AddPBDMaterialViscous if cls.is_viscous else lazy.omni.physx.scripts.particleUtils.AddPBDMaterialWater
apply_mat_physics(p=cls._material.prim)
# Compute the overall color of the fluid system
base_color_weight = cls._material.diffuse_reflection_weight
transmission_weight = cls._material.enable_specular_transmission * cls._material.specular_transmission_weight
total_weight = base_color_weight + transmission_weight
if total_weight == 0.0:
# If the fluid doesn't have any color, we add a "blue" tint by default
color = np.array([0.0, 0.0, 1.0])
else:
base_color_weight /= total_weight
transmission_weight /= total_weight
# Weighted sum of base color and transmission color
color = base_color_weight * cls._material.diffuse_reflection_color + \
transmission_weight * (0.5 * cls._material.specular_transmission_color + \
0.5 * cls._material.specular_transmission_scattering_color)
cls._color = color
# Set custom isosurface rendering settings if we are using high-quality rendering
if gm.ENABLE_HQ_RENDERING:
set_carb_settings_for_fluid_isosurface()
# We also modify the grid smoothing radius to avoid "blobby" appearances
cls.system_prim.GetAttribute("physxParticleIsosurface:gridSmoothingRadius").Set(0.0001)
@classproperty
def is_fluid(cls):
return True
@classproperty
def use_isosurface(cls):
return True
@classproperty
def is_viscous(cls):
"""
Returns:
bool: True if this material is viscous or not. Default is False
"""
raise NotImplementedError()
@classproperty
def particle_radius(cls):
# Magic number from omni tutorials
# See https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#offset-autocomputation
return 0.99 * 0.6 * cls.particle_contact_offset
@classproperty
def particle_particle_rest_distance(cls):
# Magic number, based on intuition from https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/physics-particles.html#particle-particle-interaction
return cls.particle_radius * 2.0 * m.FLUID_PARTICLE_PARTICLE_DISTANCE_SCALE
@classproperty
def _material_mtl_name(cls):
"""
Returns:
None or str: Material mdl preset name to use for generating this fluid material. NOTE: Should be an
entry from OmniSurfacePresets.mdl, minus the "OmniSurface_" string. If None if specified, will default
to the generic OmniSurface material
"""
return None
@classmethod
def _create_particle_prototypes(cls):
# Simulate particles with simple spheres
prototype = lazy.pxr.UsdGeom.Sphere.Define(og.sim.stage, f"{cls.prim_path}/prototype0")
prototype.CreateRadiusAttr().Set(cls.particle_radius)
prototype = VisualGeomPrim(prim_path=prototype.GetPath().pathString, name=prototype.GetPath().pathString)
prototype.visible = False
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prototype.prim,
semantic_label=cls.name,
type_label="class",
)
return [prototype]
@classmethod
def _get_particle_material_template(cls):
# We use a template from OmniPresets if @_material_mtl_name is specified, else the default OmniSurface
return MaterialPrim.get_material(
prim_path=cls.mat_path,
name=cls.mat_name,
load_config={
"mdl_name": f"OmniSurface{'' if cls._material_mtl_name is None else 'Presets'}.mdl",
"mtl_name": f"OmniSurface{'' if cls._material_mtl_name is None else ('_' + cls._material_mtl_name)}"
}
)
@classmethod
def create(
cls,
name,
particle_contact_offset,
particle_density,
is_viscous=False,
material_mtl_name=None,
customize_particle_material=None,
**kwargs,
):
"""
Utility function to programmatically generate monolithic fluid system classes.
Args:
name (str): Name of the system
particle_contact_offset (float): Contact offset for the generated system
particle_density (float): Particle density for the generated system
is_viscous (bool): Whether or not the generated fluid system should be viscous
material_mtl_name (None or str): Material mdl preset name to use for generating this fluid material.
NOTE: Should be an entry from OmniSurfacePresets.mdl, minus the "OmniSurface_" string.
If None if specified, will default to the generic OmniSurface material
customize_particle_material (None or function): Method for customizing the particle material for the fluid
after it has been loaded. Default is None, which will produce a no-op.
If specified, expected signature:
_customize_particle_material(mat: MaterialPrim) --> None
where @MaterialPrim is the material to modify in-place
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
FluidSystem: Generated system class
"""
@classproperty
def cp_particle_contact_offset(cls):
return particle_contact_offset
@classproperty
def cp_material_mtl_name(cls):
return material_mtl_name
@classproperty
def cp_is_viscous(cls):
return is_viscous
@classmethod
def cm_customize_particle_material(cls):
if customize_particle_material is not None:
customize_particle_material(mat=cls._material)
# Add to any other params specified
kwargs["particle_contact_offset"] = cp_particle_contact_offset
kwargs["_material_mtl_name"] = cp_material_mtl_name
kwargs["is_viscous"] = cp_is_viscous
kwargs["_customize_particle_material"] = cm_customize_particle_material
# Create and return the class
return super().create(
name=name,
particle_density=particle_density,
**kwargs,
)
def customize_particle_material_factory(attr, value):
def func(mat):
setattr(mat, attr, np.array(value))
return func
class GranularSystem(MicroPhysicalParticleSystem):
"""
Particle system class simulating granular materials. Individual particles are composed of custom USD objects.
"""
# Cached particle contact offset determined from loaded prototype
_particle_contact_offset = None
_particle_template = None
@classproperty
def self_collision(cls):
# Don't self-collide to improve physics stability
# For whatever reason, granular (non-fluid) particles tend to explode when sampling Filled states, and it seems
# the only way to avoid this unstable behavior is to disable self-collisions. This actually enables the granular
# particles to converge to zero velocity.
return False
@classmethod
def _clear(cls):
og.sim.remove_object(cls._particle_template)
super()._clear()
cls._particle_template = None
cls._particle_contact_offset = None
@classproperty
def particle_contact_offset(cls):
return cls._particle_contact_offset
@classproperty
def is_fluid(cls):
return False
@classmethod
def _create_particle_prototypes(cls):
# Load the particle template
particle_template = cls._create_particle_template()
og.sim.import_object(obj=particle_template, register=False)
cls._particle_template = particle_template
# Make sure there is no ambiguity about which mesh to use as the particle from this template
assert len(particle_template.links) == 1, "GranularSystem particle template has more than one link"
assert len(particle_template.root_link.visual_meshes) == 1, "GranularSystem particle template has more than one visual mesh"
# Make sure template scaling is [1, 1, 1] -- any particle scaling should be done via cls.min/max_scale
assert np.all(particle_template.scale == 1.0)
# The prototype is assumed to be the first and only visual mesh belonging to the root link
visual_geom = list(particle_template.root_link.visual_meshes.values())[0]
# Copy it to the standardized prim path
prototype_path = f"{cls.prim_path}/prototype0"
lazy.omni.kit.commands.execute("CopyPrim", path_from=visual_geom.prim_path, path_to=prototype_path)
# Wrap it with VisualGeomPrim with the correct scale
prototype = VisualGeomPrim(prim_path=prototype_path, name=prototype_path)
prototype.scale *= cls.max_scale
prototype.visible = False
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prototype.prim,
semantic_label=cls.name,
type_label="class",
)
# Store the contact offset based on a minimum sphere
# Threshold the lower-bound to avoid super small particles
vertices = np.array(prototype.get_attribute("points")) * prototype.scale
_, particle_contact_offset = trimesh.nsphere.minimum_nsphere(trimesh.Trimesh(vertices=vertices))
if particle_contact_offset < m.MIN_PARTICLE_CONTACT_OFFSET:
prototype.scale *= m.MIN_PARTICLE_CONTACT_OFFSET / particle_contact_offset
particle_contact_offset = m.MIN_PARTICLE_CONTACT_OFFSET
cls._particle_contact_offset = particle_contact_offset
return [prototype]
@classmethod
def _create_particle_template(cls):
"""
Creates the particle template to be used for this system.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
Returns:
EntityPrim: Particle template that will be duplicated when generating future particle groups
"""
raise NotImplementedError()
@classmethod
def create(
cls,
name,
particle_density,
create_particle_template,
scale=None,
**kwargs,
):
"""
Utility function to programmatically generate monolithic fluid system classes.
Args:
name (str): Name of the system
particle_density (float): Particle density for the generated system
material_mtl_name (None or str): Material mdl preset name to use for generating this fluid material.
NOTE: Should be an entry from OmniSurfacePresets.mdl, minus the "OmniSurface_" string.
If None if specified, will default to the generic OmniSurface material
create_particle_template (function): Method for generating the visual particle template that will be duplicated
when generating groups of particles.
Expected signature:
create_particle_template(prim_path: str, name: str) --> EntityPrim
where @prim_path and @name are the parameters to assign to the generated EntityPrim.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
scale (None or 3-array): If specified, sets the scaling factor for the particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
GranularSystem: Generated granular system class
"""
@classmethod
def cm_create_particle_template(cls):
return create_particle_template(prim_path=f"{cls.prim_path}/template", name=f"{cls.name}_template")
# Add to any other params specified
kwargs["_create_particle_template"] = cm_create_particle_template
# Create and return the class
return super().create(
name=name,
particle_density=particle_density,
min_scale=scale,
max_scale=scale,
**kwargs,
)
class Cloth(MicroParticleSystem):
"""
Particle system class to simulate cloth.
"""
@classmethod
def remove_all_particles(cls):
# Override base method since there are no particles to be deleted
pass
@classmethod
def clothify_mesh_prim(cls, mesh_prim, remesh=True, particle_distance=None):
"""
Clothifies @mesh_prim by applying the appropriate Cloth API, optionally re-meshing the mesh so that the
resulting generated particles are roughly @particle_distance apart from each other.
Args:
mesh_prim (Usd.Prim): Mesh prim to clothify
remesh (bool): If True, will remesh the input mesh before converting it into a cloth
particle_distance (None or float): If set and @remesh is True, specifies the absolute target distance
between generated cloth particles. If None, a value is automatically chosen such that the generated
cloth particles are roughly touching each other, given cls.particle_contact_offset and
@mesh_prim's scale
"""
has_uv_mapping = mesh_prim.GetAttribute("primvars:st").Get() is not None
if not remesh:
# We always load into trimesh to remove redundant particles (since natively omni redundantly represents
# the number of vertices as 6x the total unique number of vertices)
tm = mesh_prim_to_trimesh_mesh(mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=False)
texcoord = np.array(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None
else:
# We will remesh in pymeshlab, but it doesn't allow programmatic construction of a mesh with texcoords so
# we convert our mesh into a trimesh mesh, then export it to a temp file, then load it into pymeshlab
scaled_world_transform = PoseAPI.get_world_pose_with_scale(mesh_prim.GetPath().pathString)
# Convert to trimesh mesh (in world frame)
tm = mesh_prim_to_trimesh_mesh(mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=True)
# Tmp file written to: {tmp_dir}/{tmp_fname}/{tmp_fname}.obj
tmp_name = str(uuid.uuid4())
tmp_dir = os.path.join(tempfile.gettempdir(), tmp_name)
tmp_fpath = os.path.join(tmp_dir, f"{tmp_name}.obj")
Path(tmp_dir).mkdir(parents=True, exist_ok=True)
tm.export(tmp_fpath)
# Start with the default particle distance
particle_distance = cls.particle_contact_offset * 2 / 1.5 if particle_distance is None else particle_distance
# Repetitively re-mesh at lower resolution until we have a mesh that has less than MAX_CLOTH_PARTICLES vertices
for _ in range(10):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(tmp_fpath)
# Re-mesh based on @particle_distance - distance chosen such that at rest particles should be just touching
# each other. The 1.5 magic number comes from the particle cloth demo from omni
# Note that this means that the particles will overlap with each other, since at dist = 2 * contact_offset
# the particles are just touching each other at rest
avg_edge_percentage_mismatch = 1.0
# Loop re-meshing until average edge percentage is within error threshold or we reach the max number of tries
for _ in range(5):
if avg_edge_percentage_mismatch <= m.CLOTH_REMESHING_ERROR_THRESHOLD:
break
ms.meshing_isotropic_explicit_remeshing(iterations=5, adaptive=True, targetlen=pymeshlab.AbsoluteValue(particle_distance))
avg_edge_percentage_mismatch = abs(1.0 - particle_distance / ms.get_geometric_measures()["avg_edge_length"])
else:
# Terminate anyways, but don't fail
log.warn("The generated cloth may not have evenly distributed particles.")
# Check if we have too many vertices
cm = ms.current_mesh()
if cm.vertex_number() > m.MAX_CLOTH_PARTICLES:
# We have too many vertices, so we will re-mesh again
particle_distance *= np.sqrt(2) # halve the number of vertices
log.warn(f"Too many vertices ({cm.vertex_number()})! Re-meshing with particle distance {particle_distance}...")
else:
break
else:
raise ValueError(f"Could not remesh with less than MAX_CLOTH_PARTICLES ({m.MAX_CLOTH_PARTICLES}) vertices!")
# Re-write data to @mesh_prim
new_faces = cm.face_matrix()
new_vertices = cm.vertex_matrix()
new_normals = cm.vertex_normal_matrix()
texcoord = np.array(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None
tm = trimesh.Trimesh(
vertices=new_vertices,
faces=new_faces,
vertex_normals=new_normals,
)
# Apply the inverse of the world transform to get the mesh back into its local frame
tm.apply_transform(np.linalg.inv(scaled_world_transform))
# Update the mesh prim
face_vertex_counts = np.array([len(face) for face in tm.faces], dtype=int)
mesh_prim.GetAttribute("faceVertexCounts").Set(face_vertex_counts)
mesh_prim.GetAttribute("points").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertices))
mesh_prim.GetAttribute("faceVertexIndices").Set(tm.faces.flatten())
mesh_prim.GetAttribute("normals").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertex_normals))
if has_uv_mapping:
mesh_prim.GetAttribute("primvars:st").Set(lazy.pxr.Vt.Vec2fArray.FromNumpy(texcoord))
# Convert into particle cloth
lazy.omni.physx.scripts.particleUtils.add_physx_particle_cloth(
stage=og.sim.stage,
path=mesh_prim.GetPath(),
dynamic_mesh_path=None,
particle_system_path=cls.system_prim_path,
spring_stretch_stiffness=m.CLOTH_STRETCH_STIFFNESS,
spring_bend_stiffness=m.CLOTH_BEND_STIFFNESS,
spring_shear_stiffness=m.CLOTH_SHEAR_STIFFNESS,
spring_damping=m.CLOTH_DAMPING,
self_collision=True,
self_collision_filter=True,
)
# Disable welding because it can potentially make thin objects non-manifold
auto_particle_cloth_api = lazy.pxr.PhysxSchema.PhysxAutoParticleClothAPI(mesh_prim)
auto_particle_cloth_api.GetDisableMeshWeldingAttr().Set(True)
@classproperty
def _pbd_material_kwargs(cls):
return dict(
friction=m.CLOTH_FRICTION,
drag=m.CLOTH_DRAG,
lift=m.CLOTH_LIFT,
)
@classproperty
def _register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def particle_contact_offset(cls):
return m.CLOTH_PARTICLE_CONTACT_OFFSET
@classproperty
def state_size(cls):
# Default is no state
return 0
@classmethod
def _dump_state(cls):
# Empty by default
return dict()
@classmethod
def _load_state(cls, state):
# Nothing by default
pass
@classmethod
def _serialize(cls, state):
# Nothing by default
return np.array([], dtype=float)
@classmethod
def _deserialize(cls, state):
# Nothing by default
return dict(), 0
| 76,701 | Python | 43.490719 | 183 | 0.645089 |
StanfordVL/OmniGibson/omnigibson/action_primitives/action_primitive_set_base.py | import inspect
from abc import ABCMeta, abstractmethod
from enum import IntEnum
from typing import List
from future.utils import with_metaclass
from omnigibson import Environment
from omnigibson.robots import BaseRobot
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
from omnigibson.tasks.task_base import BaseTask
REGISTERED_PRIMITIVE_SETS = {}
class ActionPrimitiveError(ValueError):
class Reason(IntEnum):
# A primitive could not be executed because a precondition was not satisfied, e.g. PLACE was called without an
# object currently in hand.
PRE_CONDITION_ERROR = 0
# A sampling error occurred: e.g. a position to place an object could not be found, or the robot could not
# find a pose near the object to navigate to.
SAMPLING_ERROR = 1
# The planning for a primitive failed possibly due to not being able to find a path.
PLANNING_ERROR = 2
# The planning for a primitive was successfully completed, but an error occurred during execution.
EXECUTION_ERROR = 3
# The execution of the primitive happened correctly, but while checking post-conditions, an error was found.
POST_CONDITION_ERROR = 4
def __init__(self, reason: Reason, message, metadata=None):
self.reason = reason
self.metadata = metadata if metadata is not None else {}
super().__init__(f"{reason.name}: {message}. Additional info: {metadata}")
class ActionPrimitiveErrorGroup(ValueError):
def __init__(self, exceptions: List[ActionPrimitiveError]) -> None:
self._exceptions = tuple(exceptions)
submessages = [f"Attempt {i}: {e}" for i, e in enumerate(exceptions)]
submessages = "\n\n".join(submessages)
message = "An error occurred during each attempt of this action.\n\n" + submessages
super().__init__(message)
@property
def exceptions(self):
return self._exceptions
class BaseActionPrimitiveSet(with_metaclass(ABCMeta, object)):
def __init_subclass__(cls, **kwargs):
"""
Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
user additions. This way, users can add their custom primitive set by simply extending this class,
and it will automatically be registered internally. This allows users to then specify their primitive set
directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
in our code.
"""
if not inspect.isabstract(cls):
REGISTERED_PRIMITIVE_SETS[cls.__name__] = cls
def __init__(self, env):
self.env : Environment = env
@property
def robot(self):
# Currently returns the first robot in the environment, but can be scaled to multiple robots
# by creating multiple action generators and passing in a robot index etc.
return self.env.robots[0]
@abstractmethod
def get_action_space(self):
"""Get the higher-level action space as an OpenAI Gym Space object."""
pass
@abstractmethod
def apply(self, action):
"""
Apply a primitive action.
Given a higher-level action in the same format as the action space (e.g. as a number),
generates a sequence of lower level actions (or raise ActionPrimitiveError). The action
will get resolved and passed into apply_ref.
"""
pass
@abstractmethod
def apply_ref(self, action, *args):
"""
Apply a primitive action by reference.
Given a higher-level action from the corresponding action set enum and any necessary arguments,
generates a sequence of lower level actions (or raise ActionPrimitiveError)
"""
pass
| 3,867 | Python | 38.070707 | 118 | 0.681148 |
StanfordVL/OmniGibson/omnigibson/action_primitives/symbolic_semantic_action_primitives.py | """
WARNING!
A set of action primitives that work without executing low-level physics but instead teleporting
objects directly into their post-condition states. Useful for learning high-level methods.
"""
from aenum import IntEnum, auto
import numpy as np
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.systems.system_base import REGISTERED_SYSTEMS
from omnigibson.transition_rules import REGISTERED_RULES, TransitionRuleAPI
from omnigibson import object_states
from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError, ActionPrimitiveErrorGroup
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives
from omnigibson.objects import DatasetObject
class SymbolicSemanticActionPrimitiveSet(IntEnum):
_init_ = 'value __doc__'
GRASP = auto(), "Grasp an object"
PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object"
PLACE_INSIDE = auto(), "Place the currently grasped object inside another object"
OPEN = auto(), "Open an object"
CLOSE = auto(), "Close an object"
TOGGLE_ON = auto(), "Toggle an object on"
TOGGLE_OFF = auto(), "Toggle an object off"
SOAK_UNDER = auto(), "Soak the currently grasped object under a fluid source."
SOAK_INSIDE = auto(), "Soak the currently grasped object inside the fluid within a container."
WIPE = auto(), "Wipe the given object with the currently grasped object."
CUT = auto(), "Cut (slice or dice) the given object with the currently grasped object."
PLACE_NEAR_HEATING_ELEMENT = auto(), "Place the currently grasped object near the heating element of another object."
NAVIGATE_TO = auto(), "Navigate to an object"
RELEASE = auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object."
class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
def __init__(self, env):
super().__init__(env)
self.controller_functions = {
SymbolicSemanticActionPrimitiveSet.GRASP: self._grasp,
SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top,
SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside,
SymbolicSemanticActionPrimitiveSet.OPEN: self._open,
SymbolicSemanticActionPrimitiveSet.CLOSE: self._close,
SymbolicSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on,
SymbolicSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off,
SymbolicSemanticActionPrimitiveSet.SOAK_UNDER: self._soak_under,
SymbolicSemanticActionPrimitiveSet.SOAK_INSIDE: self._soak_inside,
SymbolicSemanticActionPrimitiveSet.WIPE: self._wipe,
SymbolicSemanticActionPrimitiveSet.CUT: self._cut,
SymbolicSemanticActionPrimitiveSet.PLACE_NEAR_HEATING_ELEMENT: self._place_near_heating_element,
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj,
SymbolicSemanticActionPrimitiveSet.RELEASE: self._release,
}
def apply_ref(self, prim, *args, attempts=3):
"""
Yields action for robot to execute the primitive with the given arguments.
Args:
prim (SymbolicSemanticActionPrimitiveSet): Primitive to execute
args: Arguments for the primitive
attempts (int): Number of attempts to make before raising an error
Returns:
np.array or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
"""
assert attempts > 0, "Must make at least one attempt"
ctrl = self.controller_functions[prim]
if any(isinstance(arg, BaseRobot) for arg in args):
raise ActionPrimitiveErrorGroup([
ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot call a symbolic semantic action primitive with a robot as an argument."
)
])
errors = []
for _ in range(attempts):
# Attempt
success = False
try:
yield from ctrl(*args)
success = True
except ActionPrimitiveError as e:
errors.append(e)
try:
# Settle before returning.
yield from self._settle_robot()
except ActionPrimitiveError:
pass
# Stop on success
if success:
return
raise ActionPrimitiveErrorGroup(errors)
def _open_or_close(self, obj, should_open):
if self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot open or close an object while holding an object",
{"object in hand": self._get_obj_in_hand().name},
)
if object_states.Open not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not openable.",
{"target object": obj.name}
)
# Don't do anything if the object is already closed and we're trying to close.
if should_open == obj.states[object_states.Open].get_value():
return
# Set the value
obj.states[object_states.Open].set_value(should_open)
# Settle
yield from self._settle_robot()
if obj.states[object_states.Open].get_value() != should_open:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"The object did not open or close as expected. Maybe try again",
{"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()},
)
def _grasp(self, obj: DatasetObject):
"""
Yields action for the robot to navigate to object if needed, then to grasp it
Args:
DatasetObject: Object for robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Don't do anything if the object is already grasped.
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is not None:
if obj_in_hand == obj:
return
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot grasp when your hand is already full",
{"target object": obj.name, "object currently in hand": obj_in_hand.name},
)
# Get close
# yield from self._navigate_if_needed(obj)
# Perform forced assisted grasp
obj.set_position(self.robot.get_eef_position(self.arm))
self.robot._establish_grasp(self.arm, (obj, obj.root_link), obj.get_position())
# Execute for a moment
yield from self._settle_robot()
# Verify
if self._get_obj_in_hand() is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"Grasp completed, but no object detected in hand after executing grasp",
{"target object": obj.name},
)
if self._get_obj_in_hand() != obj:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"An unexpected object was detected in hand after executing grasp. Consider releasing it",
{"expected object": obj.name, "actual object": self._get_obj_in_hand().name},
)
def _release(self):
if not self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot release an object if you're not already holding an object",
)
self.robot.release_grasp_immediately()
yield from self._settle_robot()
def _toggle(self, obj, value):
if self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot toggle an object while holding an object",
{"object in hand": self._get_obj_in_hand()},
)
if object_states.ToggledOn not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not toggleable.",
{"target object": obj.name}
)
if obj.states[object_states.ToggledOn].get_value() == value:
return
# Call the setter
obj.states[object_states.ToggledOn].set_value(value)
# Yield some actions
yield from self._settle_robot()
# Check that it actually happened
if obj.states[object_states.ToggledOn].get_value() != value:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"The object did not toggle as expected - maybe try again",
{"target object": obj.name, "is it currently toggled on": obj.states[object_states.ToggledOn].get_value()}
)
def _place_with_predicate(self, obj, predicate, near_poses=None, near_poses_threshold=None):
"""
Yields action for the robot to navigate to the object if needed, then to place it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
)
# Find a spot to put it
obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj, near_poses=near_poses, near_poses_threshold=near_poses_threshold)
# Get close, release the object.
# yield from self._navigate_if_needed(obj, pose_on_obj=obj_pose)
yield from self._release()
# Actually move the object to the spot and step a bit to settle it.
obj_in_hand.set_position_orientation(*obj_pose)
yield from self._settle_robot()
if not obj_in_hand.states[predicate].get_value(obj):
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Failed to place object at the desired place (probably dropped). The object was still released, so you need to grasp it again to continue",
{"dropped object": obj_in_hand.name, "target object": obj.name}
)
def _soak_under(self, obj):
# Check that our current object is a particle remover
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a soakable object first."
)
# Check that the target object is a particle source
if object_states.ParticleSource not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not a particle source, so you can not soak anything under it.",
{"target object": obj.name}
)
# Check if the target object has any particles in it
producing_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.ParticleSource].check_conditions_for_system(ps)}
if not producing_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object currently is not producing any particles - try toggling it on.",
{"target object": obj.name}
)
# Check that the current object can remove those particles
if object_states.Saturated not in obj_in_hand.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The currently grasped object cannot soak particles.",
{"object in hand": obj_in_hand.name}
)
supported_systems = {
x for x in producing_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
}
if not supported_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object only contains particles that this object cannot soak.",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is producing": sorted(x.name for x in producing_systems),
"particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
}
)
currently_removable_systems = {
x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
}
if not currently_removable_systems:
# TODO: This needs to be far more descriptive.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered by some particles that this object can normally soak, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is producing": sorted(x.name for x in producing_systems),
}
)
# If so, remove the particles.
for system in currently_removable_systems:
obj_in_hand.states[object_states.Saturated].set_value(system, True)
def _soak_inside(self, obj):
# Check that our current object is a particle remover
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a soakable object first."
)
# Check that the target object is fillable
if object_states.Contains not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not fillable by particles, so you can not soak anything in it.",
{"target object": obj.name}
)
# Check if the target object has any particles in it
contained_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.Contains].get_value(ps.states)}
if not contained_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object currently does not contain any particles.",
{"target object": obj.name}
)
# Check that the current object can remove those particles
if object_states.Saturated not in obj_in_hand.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The currently grasped object cannot soak particles.",
{"object in hand": obj_in_hand.name}
)
supported_systems = {
x for x in contained_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
}
if not supported_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object only contains particles that this object cannot soak.",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object contains": sorted(x.name for x in contained_systems),
"particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
}
)
currently_removable_systems = {
x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
}
if not currently_removable_systems:
# TODO: This needs to be far more descriptive.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered by some particles that this object can normally soak, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object contains": sorted(x.name for x in contained_systems),
}
)
# If so, remove the particles.
for system in currently_removable_systems:
obj_in_hand.states[object_states.Saturated].set_value(system, True)
def _wipe(self, obj):
# Check that our current object is a particle remover
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a wiping tool (particle remover) first to wipe an object."
)
# Check that the target object is coverable
if object_states.Covered not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not coverable by any particles, so there is no need to wipe it.",
{"target object": obj.name}
)
# Check if the target object has any particles on it
covering_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.Covered].get_value(ps.states)}
if not covering_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not covered by any particles.",
{"target object": obj.name}
)
# Check that the current object can remove those particles
if object_states.ParticleRemover not in obj_in_hand.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The currently grasped object is not a particle remover.",
{"object in hand": obj_in_hand.name}
)
supported_systems = {
x for x in covering_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
}
if not supported_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered only by particles that this cleaning tool cannot remove.",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is covered by": sorted(x.name for x in covering_systems),
"particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
}
)
currently_removable_systems = {
x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
}
if not currently_removable_systems:
# TODO: This needs to be far more descriptive.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered by some particles that this cleaning tool can normally remove, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is covered by": sorted(x.name for x in covering_systems),
}
)
# If so, remove the particles.
for system in currently_removable_systems:
obj_in_hand.states[object_states.Covered].set_value(system, False)
def _cut(self, obj):
# Check that our current object is a slicer
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a cutting tool first to slice an object."
)
if "slicer" not in obj_in_hand._abilities:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The current object is not a cutting tool.",
{"object in hand": obj_in_hand.name}
)
# Check that the target object is sliceable
if "sliceable" not in obj._abilities and "diceable" not in obj._abilities:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not sliceable or diceable.",
{"target object": obj.name}
)
# Get close
# yield from self._navigate_if_needed(obj)
# TODO: Do some more validation
added_obj_attrs = []
removed_objs = []
output = REGISTERED_RULES["SlicingRule"].transition({"sliceable": [obj]})
added_obj_attrs += output.add
removed_objs += output.remove
TransitionRuleAPI.execute_transition(added_obj_attrs=added_obj_attrs, removed_objs=removed_objs)
yield from self._settle_robot()
def _place_near_heating_element(self, heat_source_obj):
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
)
if object_states.HeatSourceOrSink not in heat_source_obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "The target object is not a heat source or sink.", {"target object": heat_source_obj.name}
)
if heat_source_obj.states[object_states.HeatSourceOrSink].requires_inside:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The heat source object has no explicit heating element, it just requires the cookable object to be placed inside it.",
{"target object": heat_source_obj.name}
)
# Get the position of the heat source on the thing we're placing near
heating_element_positions = np.array([link.get_position() for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values()])
heating_distance_threshold = heat_source_obj.states[object_states.HeatSourceOrSink].distance_threshold
# Call place-with-predicate
yield from self._place_with_predicate(heat_source_obj, object_states.OnTop, near_poses=heating_element_positions, near_poses_threshold=heating_distance_threshold)
def _wait_for_cooked(self, obj):
# Check that the current object is cookable
if object_states.Cooked not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "Target object is not cookable.",
{"target object": obj.name}
)
# Keep waiting as long as the thing is warming up.
prev_temp = obj.states[object_states.Temperature].get_value()
while not obj.states[object_states.Cooked].get_value():
# Pass some time
for _ in range(10):
yield from self._empty_action()
# Check that we are still heating up
new_temp = obj.states[object_states.Temperature].get_value()
if new_temp - prev_temp < 1e-2:
raise ActionPrimitiveError(
ActionPrimitiveError.PRE_CONDITION_ERROR,
"Target object is not currently heating up.",
{"target object": obj.name}
)
def _navigate_to_pose(self, pose_2d):
"""
Yields the action to navigate robot to the specified 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
robot_pose = self._get_robot_pose_from_2d_pose(pose_2d)
self.robot.set_position_orientation(*robot_pose)
yield from self._settle_robot()
| 26,260 | Python | 45.64476 | 212 | 0.614966 |
StanfordVL/OmniGibson/omnigibson/action_primitives/starter_semantic_action_primitives.py | """
WARNING!
The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example.
It currently only works with Fetch and Tiago with their JointControllers set to delta mode.
See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for
runnable examples.
"""
from functools import cached_property
import inspect
import logging
import random
from aenum import IntEnum, auto
from math import ceil
import cv2
from matplotlib import pyplot as plt
import gym
import numpy as np
from scipy.spatial.transform import Rotation, Slerp
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson import object_states
from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError, ActionPrimitiveErrorGroup, BaseActionPrimitiveSet
from omnigibson.controllers import JointController, DifferentialDriveController
from omnigibson.macros import create_module_macros
from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate
from omnigibson.objects.object_base import BaseObject
from omnigibson.robots import BaseRobot, Fetch, Tiago
from omnigibson.tasks.behavior_task import BehaviorTask
from omnigibson.utils.motion_planning_utils import (
plan_base_motion,
plan_arm_motion,
plan_arm_motion_ik,
set_base_and_detect_collision,
detect_robot_collision_in_sim
)
import omnigibson.utils.transform_utils as T
from omnigibson.utils.control_utils import IKSolver
from omnigibson.utils.grasping_planning_utils import (
get_grasp_poses_for_object_sticky,
get_grasp_position_for_open
)
from omnigibson.controllers.controller_base import ControlType
from omnigibson.utils.control_utils import FKSolver
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.objects.usd_object import USDObject
m = create_module_macros(module_path=__file__)
m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.05
m.KP_LIN_VEL = 0.3
m.KP_ANGLE_VEL = 0.2
m.MAX_STEPS_FOR_SETTLING = 500
m.MAX_CARTESIAN_HAND_STEP = 0.002
m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500
m.MAX_STEPS_FOR_HAND_MOVE_IK = 1000
m.MAX_STEPS_FOR_GRASP_OR_RELEASE = 250
m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION = 500
m.MAX_ATTEMPTS_FOR_OPEN_CLOSE = 20
m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20
m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200
m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60
m.PREDICATE_SAMPLING_Z_OFFSET = 0.02
m.GRASP_APPROACH_DISTANCE = 0.2
m.OPEN_GRASP_APPROACH_DISTANCE = 0.4
m.DEFAULT_DIST_THRESHOLD = 0.05
m.DEFAULT_ANGLE_THRESHOLD = 0.05
m.LOW_PRECISION_DIST_THRESHOLD = 0.1
m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2
m.TIAGO_TORSO_FIXED = False
m.JOINT_POS_DIFF_THRESHOLD = 0.005
m.JOINT_CONTROL_MIN_ACTION = 0.0
m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = np.deg2rad(45)
log = create_module_logger(module_name=__name__)
def indented_print(msg, *args, **kwargs):
log.debug(" " * len(inspect.stack()) + str(msg), *args, **kwargs)
class RobotCopy:
"""A data structure for storing information about a robot copy, used for collision checking in planning."""
def __init__(self):
self.prims = {}
self.meshes = {}
self.relative_poses = {}
self.links_relative_poses = {}
self.reset_pose = {
"original": ([0, 0, -5.0], [0, 0, 0, 1]),
"simplified": ([5, 0, -5.0], [0, 0, 0, 1]),
}
class PlanningContext(object):
"""
A context manager that sets up a robot copy for collision checking in planning.
"""
def __init__(self, robot, robot_copy, robot_copy_type="original"):
self.robot = robot
self.robot_copy = robot_copy
self.robot_copy_type = robot_copy_type if robot_copy_type in robot_copy.prims.keys() else "original"
self.disabled_collision_pairs_dict = {}
def __enter__(self):
self._assemble_robot_copy()
self._construct_disabled_collision_pairs()
return self
def __exit__(self, *args):
self._set_prim_pose(self.robot_copy.prims[self.robot_copy_type], self.robot_copy.reset_pose[self.robot_copy_type])
def _assemble_robot_copy(self):
if m.TIAGO_TORSO_FIXED:
fk_descriptor = "left_fixed"
else:
fk_descriptor = "combined" if "combined" in self.robot.robot_arm_descriptor_yamls else self.robot.default_arm
self.fk_solver = FKSolver(
robot_description_path=self.robot.robot_arm_descriptor_yamls[fk_descriptor],
robot_urdf_path=self.robot.urdf_path,
)
# TODO: Remove the need for this after refactoring the FK / descriptors / etc.
arm_links = self.robot.manipulation_link_names
if m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
joint_control_idx = self.robot.arm_control_idx["left"]
joint_pos = np.array(self.robot.get_joint_positions()[joint_control_idx])
else:
joint_combined_idx = np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[fk_descriptor]])
joint_pos = np.array(self.robot.get_joint_positions()[joint_combined_idx])
link_poses = self.fk_solver.get_link_poses(joint_pos, arm_links)
# Set position of robot copy root prim
self._set_prim_pose(self.robot_copy.prims[self.robot_copy_type], self.robot.get_position_orientation())
# Assemble robot meshes
for link_name, meshes in self.robot_copy.meshes[self.robot_copy_type].items():
for mesh_name, copy_mesh in meshes.items():
# Skip grasping frame (this is necessary for Tiago, but should be cleaned up in the future)
if "grasping_frame" in link_name:
continue
# Set poses of meshes relative to the robot to construct the robot
link_pose = link_poses[link_name] if link_name in arm_links else self.robot_copy.links_relative_poses[self.robot_copy_type][link_name]
mesh_copy_pose = T.pose_transform(*link_pose, *self.robot_copy.relative_poses[self.robot_copy_type][link_name][mesh_name])
self._set_prim_pose(copy_mesh, mesh_copy_pose)
def _set_prim_pose(self, prim, pose):
translation = lazy.pxr.Gf.Vec3d(*np.array(pose[0], dtype=float))
prim.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
prim.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
def _construct_disabled_collision_pairs(self):
robot_meshes_copy = self.robot_copy.meshes[self.robot_copy_type]
# Filter out collision pairs of meshes part of the same link
for meshes in robot_meshes_copy.values():
for mesh in meshes.values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] = [m.GetPrimPath().pathString for m in meshes.values()]
# Filter out all self-collisions
if self.robot_copy_type == "simplified":
all_meshes = [mesh.GetPrimPath().pathString for link in robot_meshes_copy.keys() for mesh in robot_meshes_copy[link].values()]
for link in robot_meshes_copy.keys():
for mesh in robot_meshes_copy[link].values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += all_meshes
# Filter out collision pairs of meshes part of disabled collision pairs
else:
for pair in self.robot.disabled_collision_pairs:
link_1 = pair[0]
link_2 = pair[1]
if link_1 in robot_meshes_copy.keys() and link_2 in robot_meshes_copy.keys():
for mesh in robot_meshes_copy[link_1].values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [m.GetPrimPath().pathString for m in robot_meshes_copy[link_2].values()]
for mesh in robot_meshes_copy[link_2].values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [m.GetPrimPath().pathString for m in robot_meshes_copy[link_1].values()]
# Filter out colliders all robot copy meshes should ignore
disabled_colliders = []
# Disable original robot colliders so copy can't collide with it
disabled_colliders += [link.prim_path for link in self.robot.links.values()]
filter_categories = ["floors"]
for obj in og.sim.scene.objects:
if obj.category in filter_categories:
disabled_colliders += [link.prim_path for link in obj.links.values()]
# Disable object in hand
obj_in_hand = self.robot._ag_obj_in_hand[self.robot.default_arm]
if obj_in_hand is not None:
disabled_colliders += [link.prim_path for link in obj_in_hand.links.values()]
for colliders in self.disabled_collision_pairs_dict.values():
colliders += disabled_colliders
class StarterSemanticActionPrimitiveSet(IntEnum):
_init_ = 'value __doc__'
GRASP = auto(), "Grasp an object"
PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object"
PLACE_INSIDE = auto(), "Place the currently grasped object inside another object"
OPEN = auto(), "Open an object"
CLOSE = auto(), "Close an object"
NAVIGATE_TO = auto(), "Navigate to an object (mostly for debugging purposes - other primitives also navigate first)"
RELEASE = auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object."
TOGGLE_ON = auto(), "Toggle an object on"
TOGGLE_OFF = auto(), "Toggle an object off"
class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
def __init__(self, env, add_context=False, enable_head_tracking=True, always_track_eef=False, task_relevant_objects_only=False):
"""
Initializes a StarterSemanticActionPrimitives generator.
Args:
env (Environment): The environment that the primitives will run on.
add_context (bool): Whether to add text context to the return value. Defaults to False.
enable_head_tracking (bool): Whether to enable head tracking. Defaults to True.
always_track_eef (bool, optional): Whether to always track the end effector, as opposed
to switching between target object and end effector based on context. Defaults to False.
task_relevant_objects_only (bool): Whether to only consider objects relevant to the task
when computing the action space. Defaults to False.
"""
log.warning(
"The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. "
"It currently only works with Fetch and Tiago with their JointControllers set to delta mode."
)
super().__init__(env)
self.controller_functions = {
StarterSemanticActionPrimitiveSet.GRASP: self._grasp,
StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top,
StarterSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside,
StarterSemanticActionPrimitiveSet.OPEN: self._open,
StarterSemanticActionPrimitiveSet.CLOSE: self._close,
StarterSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj,
StarterSemanticActionPrimitiveSet.RELEASE: self._execute_release,
StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on,
StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off,
}
# Validate the robot
assert isinstance(self.robot, (Fetch, Tiago)), "StarterSemanticActionPrimitives only works with Fetch and Tiago."
assert isinstance(self.robot.controllers["base"], (JointController, DifferentialDriveController)), \
"StarterSemanticActionPrimitives only works with a JointController or DifferentialDriveController at the robot base."
self._base_controller_is_joint = isinstance(self.robot.controllers["base"], JointController)
if self._base_controller_is_joint:
assert self.robot.controllers["base"].control_type == ControlType.VELOCITY, \
"StarterSemanticActionPrimitives only works with a base JointController with velocity mode."
assert not self.robot.controllers["base"].use_delta_commands, \
"StarterSemanticActionPrimitives only works with a base JointController with absolute mode."
assert self.robot.controllers["base"].command_dim == 3, \
"StarterSemanticActionPrimitives only works with a base JointController with 3 dof (x, y, theta)."
self.arm = self.robot.default_arm
self.robot_model = self.robot.model_name
self.robot_base_mass = self.robot._links["base_link"].mass
self.add_context = add_context
self._task_relevant_objects_only = task_relevant_objects_only
self._enable_head_tracking = enable_head_tracking
self._always_track_eef = always_track_eef
self._tracking_object = None
self.robot_copy = self._load_robot_copy()
def _postprocess_action(self, action):
"""Postprocesses action by applying head tracking and adding context if necessary."""
if self._enable_head_tracking:
action = self._overwrite_head_action(action)
if not self.add_context:
return action
stack = inspect.stack()
action_type = "manip:"
context_function = stack[1].function
for frame_info in stack[1:]:
function_name = frame_info.function
# TODO: Make this stop at apply_ref
if function_name in ["_grasp", "_place_on_top", "_place_or_top", "_open_or_close"]:
break
if "nav" in function_name:
action_type = "nav"
context = action_type + context_function
return action, context
def _load_robot_copy(self):
"""Loads a copy of the robot that can be manipulated into arbitrary configurations for collision checking in planning."""
robot_copy = RobotCopy()
robots_to_copy = {
"original": {
"robot": self.robot,
"copy_path": "/World/robot_copy"
}
}
if hasattr(self.robot, 'simplified_mesh_usd_path'):
simplified_robot = {
"robot": USDObject("simplified_copy", self.robot.simplified_mesh_usd_path),
"copy_path": "/World/simplified_robot_copy"
}
robots_to_copy['simplified'] = simplified_robot
for robot_type, rc in robots_to_copy.items():
copy_robot = None
copy_robot_meshes = {}
copy_robot_meshes_relative_poses = {}
copy_robot_links_relative_poses = {}
# Create prim under which robot meshes are nested and set position
lazy.omni.usd.commands.CreatePrimCommand("Xform", rc['copy_path']).do()
copy_robot = lazy.omni.isaac.core.utils.prims.get_prim_at_path(rc['copy_path'])
reset_pose = robot_copy.reset_pose[robot_type]
translation = lazy.pxr.Gf.Vec3d(*np.array(reset_pose[0], dtype=float))
copy_robot.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
copy_robot.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
robot_to_copy = None
if robot_type == "simplified":
robot_to_copy = rc['robot']
og.sim.import_object(robot_to_copy)
else:
robot_to_copy = rc['robot']
# Copy robot meshes
for link in robot_to_copy.links.values():
link_name = link.prim_path.split("/")[-1]
for mesh_name, mesh in link.collision_meshes.items():
split_path = mesh.prim_path.split("/")
# Do not copy grasping frame (this is necessary for Tiago, but should be cleaned up in the future)
if "grasping_frame" in link_name:
continue
copy_mesh_path = rc['copy_path'] + "/" + link_name
copy_mesh_path += f"_{split_path[-1]}" if split_path[-1] != "collisions" else ""
lazy.omni.usd.commands.CopyPrimCommand(mesh.prim_path, path_to=copy_mesh_path).do()
copy_mesh = lazy.omni.isaac.core.utils.prims.get_prim_at_path(copy_mesh_path)
relative_pose = T.relative_pose_transform(*mesh.get_position_orientation(), *link.get_position_orientation())
relative_pose = (relative_pose[0], np.array([0, 0, 0, 1]))
if link_name not in copy_robot_meshes.keys():
copy_robot_meshes[link_name] = {mesh_name: copy_mesh}
copy_robot_meshes_relative_poses[link_name] = {mesh_name: relative_pose}
else:
copy_robot_meshes[link_name][mesh_name] = copy_mesh
copy_robot_meshes_relative_poses[link_name][mesh_name] = relative_pose
copy_robot_links_relative_poses[link_name] = T.relative_pose_transform(*link.get_position_orientation(), *self.robot.get_position_orientation())
if robot_type == "simplified":
og.sim.remove_object(robot_to_copy)
robot_copy.prims[robot_type] = copy_robot
robot_copy.meshes[robot_type] = copy_robot_meshes
robot_copy.relative_poses[robot_type] = copy_robot_meshes_relative_poses
robot_copy.links_relative_poses[robot_type] = copy_robot_links_relative_poses
og.sim.step()
return robot_copy
def get_action_space(self):
# TODO: Figure out how to implement what happens when the set of objects in scene changes.
if self._task_relevant_objects_only:
assert isinstance(self.env.task, BehaviorTask), "Activity relevant objects can only be used for BEHAVIOR tasks"
self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name)
else:
self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name)
# Filter out the robots.
self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)]
self.num_objects = len(self.addressable_objects)
return gym.spaces.Tuple(
[gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))]
)
def get_action_from_primitive_and_object(self, primitive: StarterSemanticActionPrimitiveSet, obj: BaseObject):
assert obj in self.addressable_objects
primitive_int = int(primitive)
return primitive_int, self.addressable_objects.index(obj)
def _get_obj_in_hand(self):
"""
Get object in the robot's hand
Returns:
StatefulObject or None: Object if robot is holding something or None if it is not
"""
obj_in_hand = self.robot._ag_obj_in_hand[self.arm] # TODO(MP): Expose this interface.
return obj_in_hand
def apply(self, action):
# Decompose the tuple
action_idx, obj_idx = action
# Find the target object.
target_obj = self.addressable_objects[obj_idx]
# Find the appropriate action generator.
action = StarterSemanticActionPrimitiveSet(action_idx)
return self.apply_ref(action, target_obj)
def apply_ref(self, prim, *args, attempts=3):
"""
Yields action for robot to execute the primitive with the given arguments.
Args:
prim (StarterSemanticActionPrimitiveSet): Primitive to execute
args: Arguments for the primitive
attempts (int): Number of attempts to make before raising an error
Yields:
np.array or None: Action array for one step for the robot to execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
"""
assert attempts > 0, "Must make at least one attempt"
ctrl = self.controller_functions[prim]
errors = []
for _ in range(attempts):
# Attempt
success = False
try:
yield from ctrl(*args)
success = True
except ActionPrimitiveError as e:
errors.append(e)
try:
# If we're not holding anything, release the hand so it doesn't stick to anything else.
if not self._get_obj_in_hand():
yield from self._execute_release()
except ActionPrimitiveError:
pass
try:
# Make sure we retract the arm after every step
yield from self._reset_hand()
except ActionPrimitiveError:
pass
try:
# Settle before returning.
yield from self._settle_robot()
except ActionPrimitiveError:
pass
# Stop on success
if success:
return
raise ActionPrimitiveErrorGroup(errors)
def _open(self, obj):
yield from self._open_or_close(obj, True)
def _close(self, obj):
yield from self._open_or_close(obj, False)
def _open_or_close(self, obj, should_open):
# Update the tracking to track the eef.
self._tracking_object = self.robot
if self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot open or close an object while holding an object",
{"object in hand": self._get_obj_in_hand().name},
)
# Open the hand first
yield from self._execute_release()
for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE):
try:
# TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.)
if should_open:
grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None)
else:
grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3)
if grasp_data is None:
# We were trying to do something but didn't have the data.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.SAMPLING_ERROR,
"Could not sample grasp position for target object",
{"target object": obj.name},
)
relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data
if abs(pos_change) < 0.1:
indented_print("Yaw change is small and done,", pos_change)
return
# Prepare data for the approach later.
approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE
approach_pose = (approach_pos, grasp_pose[1])
# If the grasp pose is too far, navigate
yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose)
yield from self._move_hand(grasp_pose, stop_if_stuck=True)
# We can pre-grasp in sticky grasping mode only for opening
if should_open:
yield from self._execute_grasp()
# Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm.
# It's okay if we can't go all the way because we run into the object.
yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose)
if should_open:
yield from self._move_hand_linearly_cartesian(approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True)
else:
yield from self._move_hand_linearly_cartesian(approach_pose, ignore_failure=False, stop_if_stuck=True)
# Step once to update
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
for i, target_pose in enumerate(target_poses):
yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True)
# Moving to target pose often fails. This might leave the robot's motors with torques that
# try to get to a far-away position thus applying large torques, but unable to move due to
# the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an
# arbitrary direction. To avoid this, we command the hand to apply torques with its current
# position as its target. This prevents the hand from jerking into some other position when we do a release.
yield from self._move_hand_linearly_cartesian(
self.robot.eef_links[self.arm].get_position_orientation(),
ignore_failure=True,
stop_if_stuck=True
)
if should_open:
yield from self._execute_release()
yield from self._move_base_backward()
except ActionPrimitiveError as e:
indented_print(e)
if should_open:
yield from self._execute_release()
yield from self._move_base_backward()
else:
yield from self._move_hand_backward()
if obj.states[object_states.Open].get_value() != should_open:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again",
{"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()},
)
# TODO: Figure out how to generalize out of this "backing out" behavior.
def _move_base_backward(self, steps=5, speed=0.2):
"""
Yields action for the robot to move base so the eef is in the target pose using the planner
Args:
steps (int): steps to move base
speed (float): base speed
Returns:
np.array or None: Action array for one step for the robot to move base or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
action[self.robot.base_control_idx[0]] = -speed
yield self._postprocess_action(action)
def _move_hand_backward(self, steps=5, speed=0.2):
"""
Yields action for the robot to move its base backwards.
Args:
steps (int): steps to move eef
speed (float): eef speed
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
action[self.robot.controller_action_idx["arm_{}".format(self.arm)][0]] = -speed
yield self._postprocess_action(action)
def _move_hand_upward(self, steps=5, speed=0.1):
"""
Yields action for the robot to move hand upward.
Args:
steps (int): steps to move eef
speed (float): eef speed
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
# TODO: Combine these movement functions.
for _ in range(steps):
action = self._empty_action()
action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
action[self.robot.controller_action_idx["arm_{}".format(self.arm)][2]] = speed
yield self._postprocess_action(action)
def _grasp(self, obj):
"""
Yields action for the robot to navigate to object if needed, then to grasp it
Args:
StatefulObject: Object for robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
# Don't do anything if the object is already grasped.
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is not None:
if obj_in_hand == obj:
return
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot grasp when your hand is already full",
{"target object": obj.name, "object currently in hand": obj_in_hand.name},
)
# Open the hand first
yield from self._execute_release()
# Allow grasping from suboptimal extents if we've tried enough times.
grasp_poses = get_grasp_poses_for_object_sticky(obj)
grasp_pose, object_direction = random.choice(grasp_poses)
# Prepare data for the approach later.
approach_pos = grasp_pose[0] + object_direction * m.GRASP_APPROACH_DISTANCE
approach_pose = (approach_pos, grasp_pose[1])
# If the grasp pose is too far, navigate.
yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose)
yield from self._move_hand(grasp_pose)
# We can pre-grasp in sticky grasping mode.
yield from self._execute_grasp()
# Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm.
# It's okay if we can't go all the way because we run into the object.
indented_print("Performing grasp approach")
yield from self._move_hand_linearly_cartesian(approach_pose, stop_on_contact=True)
# Step once to update
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
if self._get_obj_in_hand() is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"Grasp completed, but no object detected in hand after executing grasp",
{"target object": obj.name},
)
yield from self._reset_hand()
if self._get_obj_in_hand() != obj:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"An unexpected object was detected in hand after executing grasp. Consider releasing it",
{"expected object": obj.name, "actual object": self._get_obj_in_hand().name},
)
def _place_on_top(self, obj):
"""
Yields action for the robot to navigate to the object if needed, then to place an object on it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.OnTop)
def _place_inside(self, obj):
"""
Yields action for the robot to navigate to the object if needed, then to place an object in it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.Inside)
def _toggle_on(self, obj):
yield from self._toggle(obj, True)
def _toggle_off(self, obj):
yield from self._toggle(obj, False)
def _toggle(self, obj, value):
if obj.states[object_states.ToggledOn].get_value() == value:
return
# Put the hand in the toggle marker.
toggle_state = obj.states[object_states.ToggledOn]
toggle_position = toggle_state.get_link_position()
yield from self._navigate_if_needed(obj, toggle_position)
hand_orientation = self.robot.eef_links[self.arm].get_orientation() # Just keep the current hand orientation.
desired_hand_pose = (toggle_position, hand_orientation)
yield from self._move_hand(desired_hand_pose)
if obj.states[object_states.ToggledOn].get_value() != value:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"The object did not toggle as expected - maybe try again",
{"target object": obj.name, "is it currently toggled on": obj.states[object_states.ToggledOn].get_value()}
)
def _place_with_predicate(self, obj, predicate):
"""
Yields action for the robot to navigate to the object if needed, then to place it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
)
# Sample location to place object
obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj)
hand_pose = self._get_hand_pose_for_object_pose(obj_pose)
yield from self._navigate_if_needed(obj, pose_on_obj=hand_pose)
yield from self._move_hand(hand_pose)
yield from self._execute_release()
if self._get_obj_in_hand() is not None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Could not release object - the object is still in your hand",
{"object": self._get_obj_in_hand().name}
)
if not obj_in_hand.states[predicate].get_value(obj):
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Failed to place object at the desired place (probably dropped). The object was still released, so you need to grasp it again to continue",
{"dropped object": obj_in_hand.name, "target object": obj.name}
)
yield from self._move_hand_upward()
def _convert_cartesian_to_joint_space(self, target_pose):
"""
Gets joint positions for the arm so eef is at the target pose
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef
Returns:
2-tuple
- np.array or None: Joint positions to reach target pose or None if impossible to reach target pose
- np.array: Indices for joints in the robot
"""
relative_target_pose = self._get_pose_in_robot_frame(target_pose)
joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose)
if joint_pos is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"Could not find joint positions for target pose. You cannot reach it. Try again for a new pose"
)
return joint_pos
def _target_in_reach_of_robot(self, target_pose):
"""
Determines whether the eef for the robot can reach the target pose in the world frame
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for the pose for the eef
Returns:
bool: Whether eef can reach the target pose
"""
relative_target_pose = self._get_pose_in_robot_frame(target_pose)
return self._target_in_reach_of_robot_relative(relative_target_pose)
def _target_in_reach_of_robot_relative(self, relative_target_pose):
"""
Determines whether eef for the robot can reach the target pose where the target pose is in the robot frame
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef
Returns:
bool: Whether eef can the reach target pose
"""
return self._ik_solver_cartesian_to_joint_space(relative_target_pose) is not None
@cached_property
def _manipulation_control_idx(self):
"""The appropriate manipulation control idx for the current settings."""
if isinstance(self.robot, Tiago):
if m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
return self.robot.arm_control_idx["left"]
else:
return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])
# Otherwise just return the default arm control idx
return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])
@cached_property
def _manipulation_descriptor_path(self):
"""The appropriate manipulation descriptor for the current settings."""
if isinstance(self.robot, Tiago) and m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
return self.robot.robot_arm_descriptor_yamls["left_fixed"]
# Otherwise just return the default arm control idx
return self.robot.robot_arm_descriptor_yamls[self.arm]
def _ik_solver_cartesian_to_joint_space(self, relative_target_pose):
"""
Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame
Args:
relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame
Returns:
2-tuple
- np.array or None: Joint positions to reach target pose or None if impossible to reach the target pose
- np.array: Indices for joints in the robot
"""
ik_solver = IKSolver(
robot_description_path=self._manipulation_descriptor_path,
robot_urdf_path=self.robot.urdf_path,
reset_joint_pos=self.robot.reset_joint_pos[self._manipulation_control_idx],
eef_name=self.robot.eef_link_names[self.arm],
)
# Grab the joint positions in order to reach the desired pose target
joint_pos = ik_solver.solve(
target_pos=relative_target_pose[0],
target_quat=relative_target_pose[1],
max_iterations=100,
)
return joint_pos
def _move_hand(self, target_pose, stop_if_stuck=False):
"""
Yields action for the robot to move hand so the eef is in the target pose using the planner
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
yield from self._settle_robot()
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
target_pose_relative = self._get_pose_in_robot_frame(target_pose)
yield from self._move_hand_ik(target_pose_relative, stop_if_stuck=stop_if_stuck)
else:
joint_pos = self._convert_cartesian_to_joint_space(target_pose)
yield from self._move_hand_joint(joint_pos)
def _move_hand_joint(self, joint_pos):
"""
Yields action for the robot to move arm to reach the specified joint positions using the planner
Args:
joint_pos (np.array): Joint positions for the arm
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
with PlanningContext(self.robot, self.robot_copy, "original") as context:
plan = plan_arm_motion(
robot=self.robot,
end_conf=joint_pos,
context=context,
torso_fixed=m.TIAGO_TORSO_FIXED,
)
# plan = self._add_linearly_interpolated_waypoints(plan, 0.1)
if plan is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"There is no accessible path from where you are to the desired joint position. Try again"
)
# Follow the plan to navigate.
indented_print("Plan has %d steps", len(plan))
for i, joint_pos in enumerate(plan):
indented_print("Executing grasp plan step %d/%d", i + 1, len(plan))
yield from self._move_hand_direct_joint(joint_pos, ignore_failure=True)
def _move_hand_ik(self, eef_pose, stop_if_stuck=False):
"""
Yields action for the robot to move arm to reach the specified eef positions using the planner
Args:
eef_pose (np.array): End Effector pose for the arm
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
eef_pos = eef_pose[0]
eef_ori = T.quat2axisangle(eef_pose[1])
end_conf = np.append(eef_pos, eef_ori)
with PlanningContext(self.robot, self.robot_copy, "original") as context:
plan = plan_arm_motion_ik(
robot=self.robot,
end_conf=end_conf,
context=context,
torso_fixed=m.TIAGO_TORSO_FIXED,
)
# plan = self._add_linearly_interpolated_waypoints(plan, 0.1)
if plan is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"There is no accessible path from where you are to the desired joint position. Try again"
)
# Follow the plan to navigate.
indented_print("Plan has %d steps", len(plan))
for i, target_pose in enumerate(plan):
target_pos = target_pose[:3]
target_quat = T.axisangle2quat(target_pose[3:])
indented_print("Executing grasp plan step %d/%d", i + 1, len(plan))
yield from self._move_hand_direct_ik((target_pos, target_quat), ignore_failure=True, in_world_frame=False, stop_if_stuck=stop_if_stuck)
def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist):
"""
Adds waypoints to the plan so the distance between values in the plan never exceeds the max_inter_dist.
Args:
plan (Array of arrays): Planned path
max_inter_dist (float): Maximum distance between values in the plan
Returns:
Array of arrays: Planned path with additional waypoints
"""
plan = np.array(plan)
interpolated_plan = []
for i in range(len(plan) - 1):
max_diff = max(plan[i+1] - plan[i])
num_intervals = ceil(max_diff / max_inter_dist)
interpolated_plan += np.linspace(plan[i], plan[i+1], num_intervals, endpoint=False).tolist()
interpolated_plan.append(plan[-1].tolist())
return interpolated_plan
def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failure=False):
"""
Yields action for the robot to move its arm to reach the specified joint positions by directly actuating with no planner
Args:
joint_pos (np.array): Array of joint positions for the arm
stop_on_contact (boolean): Determines whether to stop move once an object is hit
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
controller_name = f"arm_{self.arm}"
use_delta = self.robot._controllers[controller_name].use_delta_commands
action = self._empty_action()
controller_name = "arm_{}".format(self.arm)
action[self.robot.controller_action_idx[controller_name]] = joint_pos
prev_eef_pos = np.zeros(3)
for _ in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT):
current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx]
diff_joint_pos = np.array(current_joint_pos) - np.array(joint_pos)
if np.max(np.abs(diff_joint_pos)) < m.JOINT_POS_DIFF_THRESHOLD:
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
if np.max(np.abs(self.robot.get_eef_position(self.arm) - prev_eef_pos)) < 0.0001:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
f"Hand got stuck during execution."
)
if use_delta:
# Convert actions to delta.
action[self.robot.controller_action_idx[controller_name]] = diff_joint_pos
prev_eef_pos = self.robot.get_eef_position(self.arm)
yield self._postprocess_action(action)
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired joint position"
)
def _move_hand_direct_ik(self, target_pose, stop_on_contact=False, ignore_failure=False, pos_thresh=0.04, ori_thresh=0.4, in_world_frame=True, stop_if_stuck=False):
"""
Moves the hand to a target pose using inverse kinematics.
Args:
target_pose (tuple): A tuple of two elements, representing the target pose of the hand as a position and a quaternion.
stop_on_contact (bool, optional): Whether to stop the movement if the hand collides with an object. Defaults to False.
ignore_failure (bool, optional): Whether to raise an exception if the movement fails. Defaults to False.
pos_thresh (float, optional): The position threshold for considering the target pose reached. Defaults to 0.04.
ori_thresh (float, optional): The orientation threshold for considering the target pose reached. Defaults to 0.4.
in_world_frame (bool, optional): Whether the target pose is given in the world frame. Defaults to True.
stop_if_stuck (bool, optional): Whether to stop the movement if the hand is stuck. Defaults to False.
Yields:
numpy.ndarray: The action to be executed by the robot controller.
Raises:
ActionPrimitiveError: If the movement fails and ignore_failure is False.
"""
# make sure controller is InverseKinematicsController and in expected mode
controller_config = self.robot._controller_config["arm_" + self.arm]
assert controller_config["name"] == "InverseKinematicsController", "Controller must be InverseKinematicsController"
assert controller_config["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode"
if in_world_frame:
target_pose = self._get_pose_in_robot_frame(target_pose)
target_pos = target_pose[0]
target_orn = target_pose[1]
target_orn_axisangle = T.quat2axisangle(target_pose[1])
action = self._empty_action()
control_idx = self.robot.controller_action_idx["arm_" + self.arm]
prev_pos = prev_orn = None
for i in range(m.MAX_STEPS_FOR_HAND_MOVE_IK):
current_pose = self._get_pose_in_robot_frame((self.robot.get_eef_position(), self.robot.get_eef_orientation()))
current_pos = current_pose[0]
current_orn = current_pose[1]
delta_pos = target_pos - current_pos
target_pos_diff = np.linalg.norm(delta_pos)
target_orn_diff = (Rotation.from_quat(target_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
reached_goal = target_pos_diff < pos_thresh and target_orn_diff < ori_thresh
if reached_goal:
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
# if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
if i > 0 and stop_if_stuck:
pos_diff = np.linalg.norm(prev_pos - current_pos)
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
if pos_diff < 0.0003 and orn_diff < 0.01:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
f"Hand is stuck"
)
prev_pos = current_pos
prev_orn = current_orn
action[control_idx] = np.concatenate([delta_pos, target_orn_axisangle])
yield self._postprocess_action(action)
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired joint position"
)
def _move_hand_linearly_cartesian(self, target_pose, stop_on_contact=False, ignore_failure=False, stop_if_stuck=False):
"""
Yields action for the robot to move its arm to reach the specified target pose by moving the eef along a line in cartesian
space from its current pose
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
stop_on_contact (boolean): Determines whether to stop move once an object is hit
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the target pose
"""
# To make sure that this happens in a roughly linear fashion, we will divide the trajectory
# into 1cm-long pieces
start_pos, start_orn = self.robot.eef_links[self.arm].get_position_orientation()
travel_distance = np.linalg.norm(target_pose[0] - start_pos)
num_poses = np.max([2, int(travel_distance / m.MAX_CARTESIAN_HAND_STEP) + 1])
pos_waypoints = np.linspace(start_pos, target_pose[0], num_poses)
# Also interpolate the rotations
combined_rotation = Rotation.from_quat(np.array([start_orn, target_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(np.linspace(0, 1, num_poses))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
waypoints = list(zip(pos_waypoints, quat_waypoints))
for i, waypoint in enumerate(waypoints):
if i < len(waypoints) - 1:
yield from self._move_hand_direct_ik(waypoint, stop_on_contact=stop_on_contact, ignore_failure=ignore_failure, stop_if_stuck=stop_if_stuck)
else:
yield from self._move_hand_direct_ik(
waypoints[-1],
pos_thresh=0.01, ori_thresh=0.1,
stop_on_contact=stop_on_contact,
ignore_failure=ignore_failure,
stop_if_stuck=stop_if_stuck
)
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.005 and orn_diff < np.deg2rad(0.1):
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired world position"
)
else:
# Use joint positions
joint_space_data = [self._convert_cartesian_to_joint_space(waypoint) for waypoint in zip(pos_waypoints, quat_waypoints)]
joints = list(self.robot.joints.values())
for joint_pos in joint_space_data:
# Check if the movement can be done roughly linearly.
current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx]
failed_joints = []
for joint_idx, target_joint_pos, current_joint_pos in zip(self._manipulation_control_idx, joint_pos, current_joint_positions):
if np.abs(target_joint_pos - current_joint_pos) > m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION:
failed_joints.append(joints[joint_idx].joint_name)
if failed_joints:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"You cannot reach the target position in a straight line - it requires rotating your arm which might cause collisions. You might need to get closer and retry",
{"failed joints": failed_joints}
)
# Otherwise, move the joint
yield from self._move_hand_direct_joint(joint_pos, stop_on_contact=stop_on_contact, ignore_failure=ignore_failure)
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.001 and orn_diff < np.deg2rad(0.1):
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired world position"
)
def _execute_grasp(self):
"""
Yields action for the robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if its done grasping
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
action = self._empty_action()
controller_name = "gripper_{}".format(self.arm)
action[self.robot.controller_action_idx[controller_name]] = -1.0
yield self._postprocess_action(action)
def _execute_release(self):
"""
Yields action for the robot to release its grasp
Returns:
np.array or None: Action array for one step for the robot to release or None if its done releasing
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
action = self._empty_action()
controller_name = "gripper_{}".format(self.arm)
action[self.robot.controller_action_idx[controller_name]] = 1.0
yield self._postprocess_action(action)
if self._get_obj_in_hand() is not None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"An object was still detected in your hand after executing release",
{"object in hand": self._get_obj_in_hand().name},
)
def _overwrite_head_action(self, action):
"""
Overwrites camera control actions to track an object of interest.
If self._always_track_eef is true, always tracks the end effector of the robot.
Otherwise, tracks the object of interest or the end effector as specified by the primitive.
Args:
action (array) : action array to overwrite
"""
if self._always_track_eef:
target_obj_pose = (self.robot.get_eef_position(), self.robot.get_eef_orientation())
else:
if self._tracking_object is None:
return action
if self._tracking_object == self.robot:
target_obj_pose = (self.robot.get_eef_position(), self.robot.get_eef_orientation())
else:
target_obj_pose = self._tracking_object.get_position_orientation()
assert self.robot_model == "Tiago", "Tracking object with camera is currently only supported for Tiago"
head_q = self._get_head_goal_q(target_obj_pose)
head_idx = self.robot.controller_action_idx["camera"]
config = self.robot._controller_config["camera"]
assert config["name"] == "JointController", "Camera controller must be JointController"
assert config["motor_type"] == "position", "Camera controller must be in position control mode"
use_delta = config["use_delta_commands"]
if use_delta:
cur_head_q = self.robot.get_joint_positions()[self.robot.camera_control_idx]
head_action = head_q - cur_head_q
else:
head_action = head_q
action[head_idx] = head_action
return action
def _get_head_goal_q(self, target_obj_pose):
"""
Get goal joint positions for head to look at an object of interest,
If the object cannot be seen, return the current head joint positions.
"""
# get current head joint positions
head1_joint = self.robot.joints["head_1_joint"]
head2_joint = self.robot.joints["head_2_joint"]
head1_joint_limits = [head1_joint.lower_limit, head1_joint.upper_limit]
head2_joint_limits = [head2_joint.lower_limit, head2_joint.upper_limit]
head1_joint_goal = head1_joint.get_state()[0][0]
head2_joint_goal = head2_joint.get_state()[0][0]
# grab robot and object poses
robot_pose = self.robot.get_position_orientation()
# obj_pose = obj.get_position_orientation()
obj_in_base = T.relative_pose_transform(*target_obj_pose, *robot_pose)
# compute angle between base and object in xy plane (parallel to floor)
theta = np.arctan2(obj_in_base[0][1], obj_in_base[0][0])
# if it is possible to get object in view, compute both head joint positions
if head1_joint_limits[0] < theta < head1_joint_limits[1]:
head1_joint_goal = theta
# compute angle between base and object in xz plane (perpendicular to floor)
head2_pose = self.robot.links["head_2_link"].get_position_orientation()
head2_in_base = T.relative_pose_transform(*head2_pose, *robot_pose)
phi = np.arctan2(obj_in_base[0][2] - head2_in_base[0][2], obj_in_base[0][0])
if head2_joint_limits[0] < phi < head2_joint_limits[1]:
head2_joint_goal = phi
# if not possible to look at object, return current head joint positions
else:
default_head_pos = self._get_reset_joint_pos()[self.robot.controller_action_idx["camera"]]
head1_joint_goal = default_head_pos[0]
head2_joint_goal = default_head_pos[1]
return [head1_joint_goal, head2_joint_goal]
def _empty_action(self):
"""
Get a no-op action that allows us to run simulation without changing robot configuration.
Returns:
np.array or None: Action array for one step for the robot to do nothing
"""
action = np.zeros(self.robot.action_dim)
for name, controller in self.robot._controllers.items():
joint_idx = controller.dof_idx
action_idx = self.robot.controller_action_idx[name]
if controller.control_type == ControlType.POSITION and len(joint_idx) == len(action_idx) and not controller.use_delta_commands:
action[action_idx] = self.robot.get_joint_positions()[joint_idx]
elif self.robot._controller_config[name]["name"] == "InverseKinematicsController":
# overwrite the goal orientation, since it is in absolute frame.
assert self.robot._controller_config["arm_" + self.arm]["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode"
current_quat = self.robot.get_relative_eef_orientation()
current_ori = T.quat2axisangle(current_quat)
control_idx = self.robot.controller_action_idx["arm_" + self.arm]
action[control_idx[3:]] = current_ori
return action
def _reset_hand(self):
"""
Yields action to move the hand to the position optimal for executing subsequent action primitives
Returns:
np.array or None: Action array for one step for the robot to reset its hand or None if it is done resetting
"""
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
indented_print("Resetting hand")
reset_eef_pose = self._get_reset_eef_pose()
try:
yield from self._move_hand_ik(reset_eef_pose)
except ActionPrimitiveError:
indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body")
yield from self._move_hand_direct_ik(reset_eef_pose, ignore_failure=True, in_world_frame=False)
else:
indented_print("Resetting hand")
reset_pose = self._get_reset_joint_pos()[self._manipulation_control_idx]
try:
yield from self._move_hand_joint(reset_pose)
except ActionPrimitiveError:
indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body")
yield from self._move_hand_direct_joint(reset_pose, ignore_failure=True)
def _get_reset_eef_pose(self):
# TODO: Add support for Fetch
if self.robot_model == "Tiago":
return np.array([0.28493954, 0.37450749, 1.1512334]), np.array([-0.21533823, 0.05361032, -0.08631776, 0.97123871])
else:
return np.array([ 0.48688125, -0.12507881, 0.97888719]), np.array([ 0.61324748, 0.61305553, -0.35266518, 0.35173529])
def _get_reset_joint_pos(self):
reset_pose_fetch = np.array(
[
0.0,
0.0, # wheels
0.0, # trunk
0.0,
-1.0,
0.0, # head
-1.0,
1.53448,
2.2,
0.0,
1.36904,
1.90996, # arm
0.05,
0.05, # gripper
]
)
reset_pose_tiago = np.array([
-1.78029833e-04,
3.20231302e-05,
-1.85759447e-07,
0.0,
-0.2,
0.0,
0.1,
-6.10000000e-01,
-1.10000000e+00,
0.00000000e+00,
-1.10000000e+00,
1.47000000e+00,
0.00000000e+00,
8.70000000e-01,
2.71000000e+00,
1.50000000e+00,
1.71000000e+00,
-1.50000000e+00,
-1.57000000e+00,
4.50000000e-01,
1.39000000e+00,
0.00000000e+00,
0.00000000e+00,
4.50000000e-02,
4.50000000e-02,
4.50000000e-02,
4.50000000e-02
])
return reset_pose_tiago if self.robot_model == "Tiago" else reset_pose_fetch
def _navigate_to_pose(self, pose_2d):
"""
Yields the action to navigate robot to the specified 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
with PlanningContext(self.robot, self.robot_copy, "simplified") as context:
plan = plan_base_motion(
robot=self.robot,
end_conf=pose_2d,
context=context,
)
if plan is None:
# TODO: Would be great to produce a more informative error.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"Could not make a navigation plan to get to the target position"
)
# self._draw_plan(plan)
# Follow the plan to navigate.
indented_print("Plan has %d steps", len(plan))
for i, pose_2d in enumerate(plan):
indented_print("Executing navigation plan step %d/%d", i + 1, len(plan))
low_precision = True if i < len(plan) - 1 else False
yield from self._navigate_to_pose_direct(pose_2d, low_precision=low_precision)
def _draw_plan(self, plan):
SEARCHED = []
trav_map = self.env.scene._trav_map
for q in plan:
# The below code is useful for plotting the RRT tree.
SEARCHED.append(np.flip(trav_map.world_to_map((q[0], q[1]))))
fig = plt.figure()
plt.imshow(trav_map.floor_map[0])
plt.scatter(*zip(*SEARCHED), 5)
fig.canvas.draw()
# Convert the canvas to image
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close(fig)
# Convert to BGR for cv2-based viewing.
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("SceneGraph", img)
cv2.waitKey(1)
def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs):
"""
Yields action to navigate the robot to be in range of the object if it not in the range
Args:
obj (StatefulObject): Object for the robot to be in range of
pose_on_obj (Iterable): (pos, quat) Pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
if pose_on_obj is not None:
if self._target_in_reach_of_robot(pose_on_obj):
# No need to navigate.
return
elif self._target_in_reach_of_robot(obj.get_position_orientation()):
return
yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, **kwargs)
def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs):
"""
Yields action to navigate the robot to be in range of the pose
Args:
obj (StatefulObject): object to be in range of
pose_on_obj (Iterable): (pos, quat) pose
Returns:
np.array or None: Action array for one step for the robot to navigate in range or None if it is done navigating
"""
pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs)
yield from self._navigate_to_pose(pose)
def _navigate_to_pose_direct(self, pose_2d, low_precision=False):
"""
Yields action to navigate the robot to the 2d pose without planning
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
low_precision (bool): Determines whether to navigate to the pose within a large range (low precision) or small range (high precison)
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
dist_threshold = m.LOW_PRECISION_DIST_THRESHOLD if low_precision else m.DEFAULT_DIST_THRESHOLD
angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD
end_pose = self._get_robot_pose_from_2d_pose(pose_2d)
body_target_pose = self._get_pose_in_robot_frame(end_pose)
for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION):
if np.linalg.norm(body_target_pose[0][:2]) < dist_threshold:
break
diff_pos = end_pose[0] - self.robot.get_position()
intermediate_pose = (end_pose[0], T.euler2quat([0, 0, np.arctan2(diff_pos[1], diff_pos[0])]))
body_intermediate_pose = self._get_pose_in_robot_frame(intermediate_pose)
diff_yaw = T.quat2euler(body_intermediate_pose[1])[2]
if abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD:
yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD)
else:
action = self._empty_action()
if self._base_controller_is_joint:
direction_vec = body_target_pose[0][:2] / np.linalg.norm(body_target_pose[0][:2]) * m.KP_LIN_VEL
base_action = [direction_vec[0], direction_vec[1], 0.0]
action[self.robot.controller_action_idx["base"]] = base_action
else:
base_action = [m.KP_LIN_VEL, 0.0]
action[self.robot.controller_action_idx["base"]] = base_action
yield self._postprocess_action(action)
body_target_pose = self._get_pose_in_robot_frame(end_pose)
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Could not navigate to the target position",
{"target pose": end_pose},
)
# Rotate in place to final orientation once at location
yield from self._rotate_in_place(end_pose, angle_threshold=angle_threshold)
def _rotate_in_place(self, end_pose, angle_threshold = m.DEFAULT_ANGLE_THRESHOLD):
"""
Yields action to rotate the robot to the 2d end pose
Args:
end_pose (Iterable): (x, y, yaw) 2d pose
angle_threshold (float): The angle difference between the robot's current and end pose that determines when the robot is done rotating
Returns:
np.array or None: Action array for one step for the robot to rotate or None if it is done rotating
"""
body_target_pose = self._get_pose_in_robot_frame(end_pose)
diff_yaw = T.quat2euler(body_target_pose[1])[2]
for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION):
if abs(diff_yaw) < angle_threshold:
break
action = self._empty_action()
direction = -1.0 if diff_yaw < 0.0 else 1.0
ang_vel = m.KP_ANGLE_VEL * direction
base_action = [0.0, 0.0, ang_vel] if self._base_controller_is_joint else [0.0, ang_vel]
action[self.robot.controller_action_idx["base"]] = base_action
yield self._postprocess_action(action)
body_target_pose = self._get_pose_in_robot_frame(end_pose)
diff_yaw = T.quat2euler(body_target_pose[1])[2]
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Could not rotate in place to the desired orientation",
{"target pose": end_pose},
)
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs):
"""
Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything
Args:
obj (StatefulObject): Object to sample a 2d pose near
pose_on_obj (Iterable of arrays or None): The pose to sample near
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
with PlanningContext(self.robot, self.robot_copy, "simplified") as context:
for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT):
if pose_on_obj is None:
pos_on_obj = self._sample_position_on_aabb_side(obj)
pose_on_obj = [pos_on_obj, np.array([0, 0, 0, 1])]
distance = np.random.uniform(0.0, 5.0)
yaw = np.random.uniform(-np.pi, np.pi)
avg_arm_workspace_range = np.mean(self.robot.arm_workspace_range[self.arm])
pose_2d = np.array(
[pose_on_obj[0][0] + distance * np.cos(yaw), pose_on_obj[0][1] + distance * np.sin(yaw), yaw + np.pi - avg_arm_workspace_range]
)
# Check room
obj_rooms = obj.in_rooms if obj.in_rooms else [self.env.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])]
if self.env.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms:
indented_print("Candidate position is in the wrong room.")
continue
if not self._test_pose(pose_2d, context, pose_on_obj=pose_on_obj, **kwargs):
continue
return pose_2d
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.SAMPLING_ERROR, "Could not find valid position near object.",
{"target object": obj.name, "target pos": obj.get_position(), "pose on target": pose_on_obj}
)
@staticmethod
def _sample_position_on_aabb_side(target_obj):
"""
Returns a position on one of the axis-aligned bounding box (AABB) side faces of the target object.
Args:
target_obj (StatefulObject): Object to sample a position on
Returns:
3-array: (x,y,z) Position in the world frame
"""
aabb_center, aabb_extent = target_obj.aabb_center, target_obj.aabb_extent
# We want to sample only from the side-facing faces.
face_normal_axis = np.random.choice([0, 1])
face_normal_direction = np.random.choice([-1, 1])
face_center = aabb_center + np.eye(3)[face_normal_axis] * aabb_extent * face_normal_direction
face_lateral_axis = 0 if face_normal_axis == 1 else 1
face_lateral_half_extent = np.eye(3)[face_lateral_axis] * aabb_extent / 2
face_vertical_half_extent = np.eye(3)[2] * aabb_extent / 2
face_min = face_center - face_vertical_half_extent - face_lateral_half_extent
face_max = face_center + face_vertical_half_extent + face_lateral_half_extent
return np.random.uniform(face_min, face_max)
# def _sample_pose_in_room(self, room: str):
# """
# Returns a pose for the robot within in the room where the robot is not in collision with anything
# Args:
# room (str): Name of room
# Returns:
# 2-tuple:
# - 3-array: (x,y,z) Position in the world frame
# - 4-array: (x,y,z,w) Quaternion orientation in the world frame
# """
# # TODO(MP): Bias the sampling near the agent.
# for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM):
# _, pos = self.env.scene.get_random_point_by_room_instance(room)
# yaw = np.random.uniform(-np.pi, np.pi)
# pose = (pos[0], pos[1], yaw)
# if self._test_pose(pose):
# return pose
# raise ActionPrimitiveError(
# ActionPrimitiveError.Reason.SAMPLING_ERROR,
# "Could not find valid position in the given room to travel to",
# {"room": room}
# )
def _sample_pose_with_object_and_predicate(self, predicate, held_obj, target_obj, near_poses=None, near_poses_threshold=None):
"""
Returns a pose for the held object relative to the target object that satisfies the predicate
Args:
predicate (object_states.OnTop or object_states.Inside): Relation between held object and the target object
held_obj (StatefulObject): Object held by the robot
target_obj (StatefulObject): Object to sample a pose relative to
near_poses (Iterable of arrays): Poses in the world frame to sample near
near_poses_threshold (float): The distance threshold to check if the sampled pose is near the poses in near_poses
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
pred_map = {object_states.OnTop: "onTop", object_states.Inside: "inside"}
for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE):
_, _, bb_extents, bb_center_in_base = held_obj.get_base_aligned_bbox()
sampling_results = sample_cuboid_for_predicate(pred_map[predicate], target_obj, bb_extents)
if sampling_results[0][0] is None:
continue
sampled_bb_center = sampling_results[0][0] + np.array([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
sampled_bb_orn = sampling_results[0][2]
# Get the object pose by subtracting the offset
sampled_obj_pose = T.pose2mat((sampled_bb_center, sampled_bb_orn)) @ T.pose_inv(T.pose2mat((bb_center_in_base, [0, 0, 0, 1])))
# Check that the pose is near one of the poses in the near_poses list if provided.
if near_poses:
sampled_pos = np.array([sampled_obj_pose[0]])
if not np.any(np.linalg.norm(near_poses - sampled_pos, axis=1) < near_poses_threshold):
continue
# Return the pose
return T.mat2pose(sampled_obj_pose)
# If we get here, sampling failed.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.SAMPLING_ERROR,
"Could not find a position to put this object in the desired relation to the target object",
{"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]},
)
# TODO: Why do we need to pass in the context here?
def _test_pose(self, pose_2d, context, pose_on_obj=None):
"""
Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
context (Context): Planning context reference
pose_on_obj (Iterable of arrays): Pose on the object in the world frame
Returns:
bool: True if the robot is in a valid pose, False otherwise
"""
pose = self._get_robot_pose_from_2d_pose(pose_2d)
if pose_on_obj is not None:
relative_pose = T.relative_pose_transform(*pose_on_obj, *pose)
if not self._target_in_reach_of_robot_relative(relative_pose):
return False
if set_base_and_detect_collision(context, pose):
indented_print("Candidate position failed collision test.")
return False
return True
@staticmethod
def _get_robot_pose_from_2d_pose(pose_2d):
"""
Gets 3d pose from 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
pos = np.array([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
orn = T.euler2quat([0, 0, pose_2d[2]])
return pos, orn
def _get_pose_in_robot_frame(self, pose):
"""
Converts the pose in the world frame to the robot frame
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
body_pose = self.robot.get_position_orientation()
return T.relative_pose_transform(*pose, *body_pose)
def _get_hand_pose_for_object_pose(self, desired_pose):
"""
Gets the pose of the hand for the desired object pose
Args:
desired_pose (Iterable of arrays): Pose of the object in the world frame
Returns:
2-tuple:
- 3-array: (x,y,z) Position of the hand in the world frame
- 4-array: (x,y,z,w) Quaternion orientation of the hand in the world frame
"""
obj_in_hand = self._get_obj_in_hand()
assert obj_in_hand is not None
# Get the object pose & the robot hand pose
obj_in_world = obj_in_hand.get_position_orientation()
hand_in_world = self.robot.eef_links[self.arm].get_position_orientation()
# Get the hand pose relative to the obj pose
hand_in_obj = T.relative_pose_transform(*hand_in_world, *obj_in_world)
# Now apply desired obj pose.
desired_hand_pose = T.pose_transform(*desired_pose, *hand_in_obj)
return desired_hand_pose
# Function that is particularly useful for Fetch, where it gives time for the base of robot to settle due to its uneven base.
def _settle_robot(self):
"""
Yields a no op action for a few steps to allow the robot and physics to settle
Returns:
np.array or None: Action array for one step for the robot to do nothing
"""
for _ in range(30):
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
for _ in range(m.MAX_STEPS_FOR_SETTLING):
if np.linalg.norm(self.robot.get_linear_velocity()) < 0.01:
break
empty_action = self._empty_action()
yield self._postprocess_action(empty_action) | 84,553 | Python | 45.128751 | 183 | 0.605561 |
StanfordVL/OmniGibson/omnigibson/prims/entity_prim.py | import numpy as np
import networkx as nx
from functools import cached_property
import omnigibson as og
import omnigibson.lazy as lazy
import omnigibson.utils.transform_utils as T
from omnigibson.prims.cloth_prim import ClothPrim
from omnigibson.prims.joint_prim import JointPrim
from omnigibson.prims.rigid_prim import RigidPrim
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.constants import PrimType, JointType, JointAxis
from omnigibson.utils.ui_utils import suppress_omni_log
from omnigibson.utils.usd_utils import PoseAPI
from omnigibson.macros import gm, create_module_macros
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Default sleep threshold for all objects -- see https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html?highlight=sleep#sleeping
m.DEFAULT_SLEEP_THRESHOLD = 0.001
class EntityPrim(XFormPrim):
"""
Provides high level functions to deal with an articulation prim and its attributes/ properties. Note that this
type of prim cannot be created from scratch, and assumes there is already a pre-existing prim tree that should
be converted into an articulation!
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that by default, this assumes an articulation already exists (i.e.:
load() will raise NotImplementedError)! Subclasses must implement _load() for this prim to be able to be
dynamically loaded after this class is created.
visual_only (None or bool): If specified, whether this prim should include collisions or not.
Default is True.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._root_link_name = None # Name of the root link
self._n_dof = None
self._links = None
self._joints = None
self._materials = None
self._visual_only = None
self._articulation_tree = None
self._articulation_view_direct = None
# This needs to be initialized to be used for _load() of PrimitiveObject
self._prim_type = load_config["prim_type"] if load_config is not None and "prim_type" in load_config else PrimType.RIGID
assert self._prim_type in iter(PrimType), f"Unknown prim type {self._prim_type}!"
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _initialize(self):
# Run super method
super()._initialize()
# Set the default sleep threshold
self.sleep_threshold = m.DEFAULT_SLEEP_THRESHOLD
# Force populate inputs and outputs of the shaders of all materials
# We suppress errors from omni.usd if we're using encrypted assets, because we're loading from tmp location,
# not the original location
with suppress_omni_log(channels=["omni.usd"]):
for material in self.materials:
material.shader_force_populate(render=False)
# Initialize all the links
for link in self._links.values():
link.initialize()
# Update joint information
self.update_joints()
def _load(self):
# By default, this prim cannot be instantiated from scratch!
raise NotImplementedError("By default, an entity prim cannot be created from scratch.")
def _post_load(self):
# If this is a cloth, delete the root link and replace it with the single nested mesh
if self._prim_type == PrimType.CLOTH:
# Verify only a single link and a single mesh exists
old_link_prim = None
cloth_mesh_prim = None
for prim in self._prim.GetChildren():
if prim.GetPrimTypeInfo().GetTypeName() == "Xform":
assert old_link_prim is None, "Found multiple XForm links for a Cloth entity prim! Expected: 1"
old_link_prim = prim
for child in prim.GetChildren():
if child.GetPrimTypeInfo().GetTypeName() == "Mesh" and not child.HasAPI(lazy.pxr.UsdPhysics.CollisionAPI):
assert cloth_mesh_prim is None, "Found multiple meshes for a Cloth entity prim! Expected: 1"
cloth_mesh_prim = child
# Move mesh prim one level up via copy, then delete the original link
# NOTE: We copy because we cannot directly move the prim because it is ancestral
# NOTE: We use this specific delete method because alternative methods (eg: "delete_prim") fail beacuse
# the prim is ancestral. Note that because it is non-destructive, the original link prim path is still
# tracked by omni, so we have to utilize a new unique prim path for the copied cloth mesh
# See omni.kit.context_menu module for reference
new_path = f"{self._prim_path}/{old_link_prim.GetName()}_cloth"
lazy.omni.kit.commands.execute("CopyPrim", path_from=cloth_mesh_prim.GetPath(), path_to=new_path)
lazy.omni.kit.commands.execute("DeletePrims", paths=[old_link_prim.GetPath()], destructive=False)
# Setup links info FIRST before running any other post loading behavior
# We pass in scale explicitly so that the generated links can leverage the desired entity scale
self.update_links()
# Optionally set the scale
if "scale" in self._load_config and self._load_config["scale"] is not None:
self.scale = self._load_config["scale"]
# Prepare the articulation view.
if self.n_joints > 0:
# Import now to avoid too-eager load of Omni classes due to inheritance
from omnigibson.utils.deprecated_utils import ArticulationView
self._articulation_view_direct = ArticulationView(f"{self._prim_path}/{self.root_link_name}")
# Set visual only flag
# This automatically handles setting collisions / gravity appropriately per-link
self.visual_only = self._load_config["visual_only"] if \
"visual_only" in self._load_config and self._load_config["visual_only"] is not None else False
if self._prim_type == PrimType.CLOTH:
assert not self._visual_only, "Cloth cannot be visual-only."
assert len(self._links) == 1, f"Cloth entity prim can only have one link; got: {len(self._links)}"
if gm.AG_CLOTH:
self.create_attachment_point_link()
# Globally disable any requested collision links
for link_name in self.disabled_collision_link_names:
self._links[link_name].disable_collisions()
# Disable any requested collision pairs
for a_name, b_name in self.disabled_collision_pairs:
link_a, link_b = self._links[a_name], self._links[b_name]
link_a.add_filtered_collision_pair(prim=link_b)
# Run super
super()._post_load()
# Cache material information
materials = set()
material_paths = set()
for link in self._links.values():
xforms = [link] + list(link.visual_meshes.values()) if self.prim_type == PrimType.RIGID else [link]
for xform in xforms:
if xform.has_material():
mat_path = xform.material.prim_path
if mat_path not in material_paths:
materials.add(xform.material)
material_paths.add(mat_path)
self._materials = materials
def remove(self):
# First remove all joints
if self._joints is not None:
for joint in self._joints.values():
joint.remove()
# Then links
if self._links is not None:
for link in self._links.values():
link.remove()
# Finally, remove this prim
super().remove()
def update_links(self):
"""
Helper function to refresh owned joints. Useful for synchronizing internal data if
additional bodies are added manually
"""
# Make sure to clean up all pre-existing names for all links
if self._links is not None:
for link in self._links.values():
link.remove_names()
# We iterate over all children of this object's prim,
# and grab any that are presumed to be rigid bodies (i.e.: other Xforms)
joint_children = set()
links_to_create = {}
for prim in self._prim.GetChildren():
link_cls = None
link_name = prim.GetName()
if self._prim_type == PrimType.RIGID and prim.GetPrimTypeInfo().GetTypeName() == "Xform":
# For rigid body object, process prims that are Xforms (e.g. rigid links)
link_cls = RigidPrim
# Also iterate through all children to infer joints and determine the children of those joints
# We will use this info to infer which link is the base link!
for child_prim in prim.GetChildren():
if "joint" in child_prim.GetPrimTypeInfo().GetTypeName().lower():
# Store the child target of this joint
relationships = {r.GetName(): r for r in child_prim.GetRelationships()}
# Only record if this is NOT a fixed link tying us to the world (i.e.: no target for body0)
if len(relationships["physics:body0"].GetTargets()) > 0:
joint_children.add(relationships["physics:body1"].GetTargets()[0].pathString.split("/")[-1])
elif self._prim_type == PrimType.CLOTH and prim.GetPrimTypeInfo().GetTypeName() == "Mesh":
# For cloth object, process prims that are Meshes
link_cls = ClothPrim
# Keep track of all the links we will create. We can't create that just yet because we need to find
# the base link first.
if link_cls is not None:
links_to_create[link_name] = (link_cls, prim)
# Infer the correct root link name -- this corresponds to whatever link does not have any joint existing
# in the children joints
valid_root_links = list(set(links_to_create.keys()) - joint_children)
assert len(valid_root_links) == 1, f"Only a single root link should have been found for {self.name}, " \
f"but found multiple instead: {valid_root_links}"
self._root_link_name = valid_root_links[0] if len(valid_root_links) == 1 else "base_link"
# Now actually create the links
self._links = dict()
for link_name, (link_cls, prim) in links_to_create.items():
# Fixed child links of kinematic-only objects are not kinematic-only, to avoid the USD error:
# PhysicsUSD: CreateJoint - cannot create a joint between static bodies, joint prim: ...
link_load_config = {
"kinematic_only": self._load_config.get("kinematic_only", False)
if link_name == self._root_link_name else False,
"remesh": self._load_config.get("remesh", True),
}
self._links[link_name] = link_cls(
prim_path=prim.GetPrimPath().__str__(),
name=f"{self._name}:{link_name}",
load_config=link_load_config,
)
def update_joints(self):
"""
Helper function to refresh owned joints. Useful for synchronizing internal data if
additional bodies are added manually
"""
# Make sure to clean up all pre-existing names for all joints
if self._joints is not None:
for joint in self._joints.values():
joint.remove_names()
# Initialize joints dictionary
self._joints = dict()
self.update_handles()
# Handle case separately based on whether we are actually articulated or not
if self._articulation_view and not self.kinematic_only:
self._n_dof = self._articulation_view.num_dof
# Additionally grab DOF info if we have non-fixed joints
if self._n_dof > 0:
for i in range(self._articulation_view._metadata.joint_count):
# Only add the joint if it's not fixed (i.e.: it has DOFs > 0)
if self._articulation_view._metadata.joint_dof_counts[i] > 0:
joint_name = self._articulation_view._metadata.joint_names[i]
joint_dof_offset = self._articulation_view._metadata.joint_dof_offsets[i]
joint_path = self._articulation_view._dof_paths[0][joint_dof_offset]
joint = JointPrim(
prim_path=joint_path,
name=f"{self._name}:joint_{joint_name}",
articulation_view=self._articulation_view_direct,
)
joint.initialize()
self._joints[joint_name] = joint
else:
# TODO: May need to extend to clusters of rigid bodies, that aren't exactly joined
# We assume this object contains a single rigid body
self._n_dof = 0
assert self.n_joints == len(self._joints), \
f"Number of joints inferred from prim tree ({self.n_joints}) does not match number of joints " \
f"found in the articulation view ({len(self._joints)})!"
self._update_joint_limits()
self._compute_articulation_tree()
def _update_joint_limits(self):
"""
Helper function to update internal joint limits for prismatic joints based on the object's scale
"""
# If the scale is [1, 1, 1], we can skip this step
if np.allclose(self.scale, np.ones(3)):
return
prismatic_joints = {j_name: j for j_name, j in self._joints.items() if j.joint_type == JointType.JOINT_PRISMATIC}
# If there are no prismatic joints, we can skip this step
if len(prismatic_joints) == 0:
return
uniform_scale = np.allclose(self.scale, self.scale[0])
for joint_name, joint in prismatic_joints.items():
if uniform_scale:
scale_along_axis = self.scale[0]
else:
assert not self.initialized, \
"Cannot update joint limits for a non-uniformly scaled object when already initialized."
for link in self.links.values():
if joint.body0 == link.prim_path:
# Find the parent link frame orientation in the object frame
_, link_local_orn = link.get_local_pose()
# Find the joint frame orientation in the parent link frame
joint_local_orn = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(joint.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
# Compute the joint frame orientation in the object frame
joint_orn = T.quat_multiply(quaternion1=joint_local_orn, quaternion0=link_local_orn)
# assert T.check_quat_right_angle(joint_orn), \
# f"Objects that are NOT uniformly scaled requires all joints to have orientations that " \
# f"are factors of 90 degrees! Got orn: {joint_orn} for object {self.name}"
# Find the joint axis unit vector (e.g. [1, 0, 0] for "X", [0, 1, 0] for "Y", etc.)
axis_in_joint_frame = np.zeros(3)
axis_in_joint_frame[JointAxis.index(joint.axis)] = 1.0
# Compute the joint axis unit vector in the object frame
axis_in_obj_frame = T.quat2mat(joint_orn) @ axis_in_joint_frame
# Find the correct scale along the joint axis direction
scale_along_axis = self.scale[np.argmax(np.abs(axis_in_obj_frame))]
joint.lower_limit = joint.lower_limit * scale_along_axis
joint.upper_limit = joint.upper_limit * scale_along_axis
@property
def _articulation_view(self):
if self._articulation_view_direct is None:
return None
# Validate that the articulation view is initialized and that if physics is running, the
# view is valid.
if og.sim.is_playing() and self.initialized:
assert self._articulation_view_direct.is_physics_handle_valid() and \
self._articulation_view_direct._physics_view.check(), \
"Articulation view must be valid if physics is running!"
return self._articulation_view_direct
@property
def prim_type(self):
"""
Returns:
str: Type of this entity prim, one of omnigibson.utils.constants.PrimType
"""
return self._prim_type
@property
def articulated(self):
"""
Returns:
bool: Whether this prim is articulated or not
"""
# Note that this is not equivalent to self.n_joints > 0 because articulation root path is
# overridden by the object classes
return self.articulation_root_path is not None
@property
def articulation_root_path(self):
"""
Returns:
None or str: Absolute USD path to the expected prim that represents the articulation root, if it exists. By default,
this corresponds to self.prim_path
"""
return self._prim_path if self.n_joints > 0 else None
@property
def root_link_name(self):
"""
Returns:
str: Name of this entity's root link
"""
return self._root_link_name
@property
def root_link(self):
"""
Returns:
RigidPrim or ClothPrim: Root link of this object prim
"""
return self._links[self.root_link_name]
@property
def root_prim(self):
"""
Returns:
UsdPrim: Root prim object associated with the root link of this object prim
"""
# The root prim belongs to the link with name root_link_name
return self._links[self.root_link_name].prim
@property
def n_dof(self):
"""
Returns:
int: number of DoFs of the object
"""
return self._n_dof
@property
def n_joints(self):
"""
Returns:
int: Number of joints owned by this articulation
"""
if self.initialized:
num = len(self._joints)
else:
# Manually iterate over all links and check for any joints that are not fixed joints!
num = 0
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
prim_type = child_prim.GetPrimTypeInfo().GetTypeName().lower()
if "joint" in prim_type and "fixed" not in prim_type:
num += 1
return num
@cached_property
def n_fixed_joints(self):
"""
Returns:
int: Number of fixed joints owned by this articulation
"""
# Manually iterate over all links and check for any joints that are not fixed joints!
num = 0
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
prim_type = child_prim.GetPrimTypeInfo().GetTypeName().lower()
if "joint" in prim_type and "fixed" in prim_type:
num += 1
return num
@property
def n_links(self):
"""
Returns:
int: Number of links owned by this articulation
"""
return len(list(self._links.keys()))
@property
def joints(self):
"""
Returns:
dict: Dictionary mapping joint names (str) to joint prims (JointPrim) owned by this articulation
"""
return self._joints
@property
def links(self):
"""
Returns:
dict: Dictionary mapping link names (str) to link prims (RigidPrim) owned by this articulation
"""
return self._links
@cached_property
def has_attachment_points(self):
"""
Returns:
bool: Whether this object has any attachment points
"""
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
if "attachment" in child_prim.GetName():
return True
return False
def _compute_articulation_tree(self):
"""
Get a graph of the articulation tree, where nodes are link names and edges
correspond to joint names, where the joint name is accessible on the `joint_name`
data field of the edge, and the joint type on the `joint_type` field.
"""
G = nx.DiGraph()
rename_later = {}
# Add the links
for link_name, link in self.links.items():
prim_path = link.prim_path
G.add_node(prim_path)
rename_later[prim_path] = link_name
# Add the joints
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
prim_type = child_prim.GetPrimTypeInfo().GetTypeName()
if "Joint" in prim_type:
# Get body 0
body0_targets = child_prim.GetRelationship("physics:body0").GetTargets()
if not body0_targets:
continue
body0 = str(body0_targets[0])
# Get body 1
body1_targets = child_prim.GetRelationship("physics:body1").GetTargets()
if not body1_targets:
continue
body1 = str(body1_targets[0])
# Assert both bodies in links
if body0 not in G.nodes or body1 not in G.nodes:
continue
# Add the joint
joint_type = JointType.get_type(prim_type.split("Physics")[-1])
G.add_edge(body0, body1, joint_name=child_prim.GetName(), joint_type=joint_type)
# Relabel nodes to use link name instead of prim path
nx.relabel_nodes(G, rename_later, copy=False)
# Assert all nodes have in-degree of 1 except root
in_degrees = {node: G.in_degree(node) for node in G.nodes}
assert in_degrees[self.root_link_name] == 0, "Root link should have in-degree of 0!"
assert all([in_degrees[node] == 1 for node in G.nodes if node != self.root_link_name]), \
"All non-root links should have in-degree of 1!"
self._articulation_tree = G
@property
def articulation_tree(self):
return self._articulation_tree
@property
def materials(self):
"""
Loop through each link and their visual meshes to gather all the materials that belong to this object
Returns:
set of MaterialPrim: a set of MaterialPrim that belongs to this object
"""
return self._materials
@property
def visual_only(self):
"""
Returns:
bool: Whether this link is a visual-only link (i.e.: no gravity or collisions applied)
"""
return self._visual_only
@visual_only.setter
def visual_only(self, val):
"""
Sets the visaul only state of this link
Args:
val (bool): Whether this link should be a visual-only link (i.e.: no gravity or collisions applied)
"""
# Iterate over all owned links and set their respective visual-only properties accordingly
for link in self._links.values():
link.visual_only = val
# Also set the internal value
self._visual_only = val
def contact_list(self):
"""
Get list of all current contacts with this object prim
Returns:
list of CsRawData: raw contact info for this rigid body
"""
contacts = []
for link in self._links.values():
contacts += link.contact_list()
return contacts
def enable_gravity(self) -> None:
"""
Enables gravity for this entity
"""
for link in self._links.values():
link.enable_gravity()
def disable_gravity(self) -> None:
"""
Disables gravity for this entity
"""
for link in self._links.values():
link.disable_gravity()
def reset(self):
"""
Resets this entity to some default, pre-defined state
"""
# Make sure simulation is playing, otherwise, we cannot reset because physx requires active running
# simulation in order to set joints
assert og.sim.is_playing(), "Simulator must be playing in order to reset controllable object's joints!"
# If this is a cloth, reset the particle positions
if self.prim_type == PrimType.CLOTH:
self.root_link.reset()
# Otherwise, set all joints to have 0 position and 0 velocity if this object has joints
elif self.n_joints > 0:
self.set_joint_positions(positions=np.zeros(self.n_dof), drive=False)
self.set_joint_velocities(velocities=np.zeros(self.n_dof), drive=False)
def set_joint_positions(self, positions, indices=None, normalized=False, drive=False):
"""
Set the joint positions (both actual value and target values) in simulation. Note: only works if the simulator
is actively running!
Args:
positions (np.ndarray): positions to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @positions must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF positions to set.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint positions should be interpreted as normalized values. Default
is False
drive (bool): Whether the positions being set are values that should be driven naturally by this entity's
motors or manual values to immediately set. Default is False, corresponding to an instantaneous
setting of the positions
"""
# Run sanity checks -- make sure that we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
# Possibly de-normalize the inputs
if normalized:
positions = self._denormalize_positions(positions=positions, indices=indices)
# Set the DOF states
if drive:
self._articulation_view.set_joint_position_targets(positions, joint_indices=indices)
else:
self._articulation_view.set_joint_positions(positions, joint_indices=indices)
PoseAPI.invalidate()
def set_joint_velocities(self, velocities, indices=None, normalized=False, drive=False):
"""
Set the joint velocities (both actual value and target values) in simulation. Note: only works if the simulator
is actively running!
Args:
velocities (np.ndarray): velocities to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @velocities must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF velocities to set.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint velocities should be interpreted as normalized values. Default
is False
drive (bool): Whether the velocities being set are values that should be driven naturally by this entity's
motors or manual values to immediately set. Default is False, corresponding to an instantaneous
setting of the velocities
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
# Possibly de-normalize the inputs
if normalized:
velocities = self._denormalize_velocities(velocities=velocities, indices=indices)
# Set the DOF states
if drive:
self._articulation_view.set_joint_velocity_targets(velocities, joint_indices=indices)
else:
self._articulation_view.set_joint_velocities(velocities, joint_indices=indices)
def set_joint_efforts(self, efforts, indices=None, normalized=False):
"""
Set the joint efforts (both actual value and target values) in simulation. Note: only works if the simulator
is actively running!
Args:
efforts (np.ndarray): efforts to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @efforts must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF efforts to set.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint efforts should be interpreted as normalized values. Default
is False
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
# Possibly de-normalize the inputs
if normalized:
efforts = self._denormalize_efforts(efforts=efforts, indices=indices)
# Set the DOF states
self._articulation_view.set_joint_efforts(efforts, joint_indices=indices)
def _normalize_positions(self, positions, indices=None):
"""
Normalizes raw joint positions @positions
Args:
positions (n- or k-array): n-DOF raw positions to normalize, or k (k < n) specific positions to normalize.
In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
positions to normalize. Default is None, which assumes the positions correspond to all DOF being
normalized.
Returns:
n- or k-array: normalized positions in range [-1, 1] for the specified DOFs
"""
low, high = self.joint_lower_limits, self.joint_upper_limits
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
return (positions - mean) / magnitude if indices is None else (positions - mean[indices]) / magnitude[indices]
def _denormalize_positions(self, positions, indices=None):
"""
De-normalizes joint positions @positions
Args:
positions (n- or k-array): n-DOF normalized positions or k (k < n) specific positions in range [-1, 1]
to de-normalize. In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
positions to de-normalize. Default is None, which assumes the positions correspond to all DOF being
de-normalized.
Returns:
n- or k-array: de-normalized positions for the specified DOFs
"""
low, high = self.joint_lower_limits, self.joint_upper_limits
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
return positions * magnitude + mean if indices is None else positions * magnitude[indices] + mean[indices]
def _normalize_velocities(self, velocities, indices=None):
"""
Normalizes raw joint velocities @velocities
Args:
velocities (n- or k-array): n-DOF raw velocities to normalize, or k (k < n) specific velocities to normalize.
In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
velocities to normalize. Default is None, which assumes the velocities correspond to all DOF being
normalized.
Returns:
n- or k-array: normalized velocities in range [-1, 1] for the specified DOFs
"""
return velocities / self.max_joint_velocities if indices is None else \
velocities / self.max_joint_velocities[indices]
def _denormalize_velocities(self, velocities, indices=None):
"""
De-normalizes joint velocities @velocities
Args:
velocities (n- or k-array): n-DOF normalized velocities or k (k < n) specific velocities in range [-1, 1]
to de-normalize. In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
velocities to de-normalize. Default is None, which assumes the velocities correspond to all DOF being
de-normalized.
Returns:
n- or k-array: de-normalized velocities for the specified DOFs
"""
return velocities * self.max_joint_velocities if indices is None else \
velocities * self.max_joint_velocities[indices]
def _normalize_efforts(self, efforts, indices=None):
"""
Normalizes raw joint efforts @efforts
Args:
efforts (n- or k-array): n-DOF raw efforts to normalize, or k (k < n) specific efforts to normalize.
In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
efforts to normalize. Default is None, which assumes the efforts correspond to all DOF being
normalized.
Returns:
n- or k-array: normalized efforts in range [-1, 1] for the specified DOFs
"""
return efforts / self.max_joint_efforts if indices is None else efforts / self.max_joint_efforts[indices]
def _denormalize_efforts(self, efforts, indices=None):
"""
De-normalizes joint efforts @efforts
Args:
efforts (n- or k-array): n-DOF normalized efforts or k (k < n) specific efforts in range [-1, 1]
to de-normalize. In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
efforts to de-normalize. Default is None, which assumes the efforts correspond to all DOF being
de-normalized.
Returns:
n- or k-array: de-normalized efforts for the specified DOFs
"""
return efforts * self.max_joint_efforts if indices is None else efforts * self.max_joint_efforts[indices]
def update_handles(self):
"""
Updates all internal handles for this prim, in case they change since initialization
"""
assert og.sim.is_playing(), "Simulator must be playing if updating handles!"
# Reinitialize the articulation view
if self._articulation_view_direct is not None:
self._articulation_view_direct.initialize(og.sim.physics_sim_view)
# Update all links and joints as well
for link in self._links.values():
if not link.initialized:
link.initialize()
link.update_handles()
for joint in self._joints.values():
if not joint.initialized:
joint.initialize()
joint.update_handles()
def get_joint_positions(self, normalized=False):
"""
Grabs this entity's joint positions
Args:
normalized (bool): Whether returned values should be normalized to range [-1, 1] based on limits or not.
Returns:
n-array: n-DOF length array of positions
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
joint_positions = self._articulation_view.get_joint_positions().reshape(self.n_dof)
# Possibly normalize values when returning
return self._normalize_positions(positions=joint_positions) if normalized else joint_positions
def get_joint_velocities(self, normalized=False):
"""
Grabs this entity's joint velocities
Args:
normalized (bool): Whether returned values should be normalized to range [-1, 1] based on limits or not.
Returns:
n-array: n-DOF length array of velocities
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
joint_velocities = self._articulation_view.get_joint_velocities().reshape(self.n_dof)
# Possibly normalize values when returning
return self._normalize_velocities(velocities=joint_velocities) if normalized else joint_velocities
def get_joint_efforts(self, normalized=False):
"""
Grabs this entity's joint efforts
Args:
normalized (bool): Whether returned values should be normalized to range [-1, 1] based on limits or not.
Returns:
n-array: n-DOF length array of efforts
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
joint_efforts = self._articulation_view.get_applied_joint_efforts().reshape(self.n_dof)
# Possibly normalize values when returning
return self._normalize_efforts(efforts=joint_efforts) if normalized else joint_efforts
def set_linear_velocity(self, velocity: np.ndarray):
"""
Sets the linear velocity of the root prim in stage.
Args:
velocity (np.ndarray): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
self.root_link.set_linear_velocity(velocity)
def get_linear_velocity(self):
"""
Gets the linear velocity of the root prim in stage.
Returns:
velocity (np.ndarray): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
return self.root_link.get_linear_velocity()
def set_angular_velocity(self, velocity):
"""
Sets the angular velocity of the root prim in stage.
Args:
velocity (np.ndarray): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
self.root_link.set_angular_velocity(velocity)
def get_angular_velocity(self):
"""Gets the angular velocity of the root prim in stage.
Returns:
velocity (np.ndarray): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
return self.root_link.get_angular_velocity()
def get_relative_linear_velocity(self):
"""
Returns:
3-array: (x,y,z) Linear velocity of root link in its own frame
"""
return T.quat2mat(self.get_orientation()).T @ self.get_linear_velocity()
def get_relative_angular_velocity(self):
"""
Returns:
3-array: (ax,ay,az) angular velocity of root link in its own frame
"""
return T.quat2mat(self.get_orientation()).T @ self.get_angular_velocity()
def set_position_orientation(self, position=None, orientation=None):
# If kinematic only, clear cache for the root link
if self.kinematic_only:
self.root_link.clear_kinematic_only_cache()
# If the simulation isn't running, we should set this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
XFormPrim.set_position_orientation(self, position=position, orientation=orientation)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
self.root_link.set_position_orientation(position=position, orientation=orientation)
# Sim is running and articulation view exists, so use that physx API backend
else:
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._articulation_view.set_world_poses(position, orientation)
PoseAPI.invalidate()
def get_position_orientation(self):
# If the simulation isn't running, we should read from this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
return XFormPrim.get_position_orientation(self)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
return self.root_link.get_position_orientation()
# Sim is running and articulation view exists, so use that physx API backend
else:
positions, orientations = self._articulation_view.get_world_poses()
return positions[0], orientations[0][[1, 2, 3, 0]]
def set_local_pose(self, position=None, orientation=None):
# If kinematic only, clear cache for the root link
if self.kinematic_only:
self.root_link.clear_kinematic_only_cache()
# If the simulation isn't running, we should set this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
return XFormPrim.set_local_pose(self, position, orientation)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
self.root_link.set_local_pose(position=position, orientation=orientation)
# Sim is running and articulation view exists, so use that physx API backend
else:
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._articulation_view.set_local_poses(position, orientation)
PoseAPI.invalidate()
def get_local_pose(self):
# If the simulation isn't running, we should read from this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
return XFormPrim.get_local_pose(self)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
return self.root_link.get_local_pose()
# Sim is running and articulation view exists, so use that physx API backend
else:
positions, orientations = self._articulation_view.get_local_poses()
return positions[0], orientations[0][[1, 2, 3, 0]]
# TODO: Is the omni joint damping (used for driving motors) same as dissipative joint damping (what we had in pb)?
@property
def joint_damping(self):
"""
Returns:
n-array: joint damping values for this prim
"""
return np.concatenate([joint.damping for joint in self._joints.values()])
@property
def joint_lower_limits(self):
"""
Returns:
n-array: minimum values for this robot's joints. If joint does not have a range, returns -1000
for that joint
"""
return np.array([joint.lower_limit for joint in self._joints.values()])
@property
def joint_upper_limits(self):
"""
Returns:
n-array: maximum values for this robot's joints. If joint does not have a range, returns 1000
for that joint
"""
return np.array([joint.upper_limit for joint in self._joints.values()])
@property
def joint_range(self):
"""
Returns:
n-array: joint range values for this robot's joints
"""
return self.joint_upper_limits - self.joint_lower_limits
@property
def max_joint_velocities(self):
"""
Returns:
n-array: maximum velocities for this robot's joints
"""
return np.array([joint.max_velocity for joint in self._joints.values()])
@property
def max_joint_efforts(self):
"""
Returns:
n-array: maximum efforts for this robot's joints
"""
return np.array([joint.max_effort for joint in self._joints.values()])
@property
def joint_position_limits(self):
"""
Returns:
2-tuple:
- n-array: min joint position limits, where each is an n-DOF length array
- n-array: max joint position limits, where each is an n-DOF length array
"""
return self.joint_lower_limits, self.joint_upper_limits
@property
def joint_velocity_limits(self):
"""
Returns:
2-tuple:
- n-array: min joint velocity limits, where each is an n-DOF length array
- n-array: max joint velocity limits, where each is an n-DOF length array
"""
return -self.max_joint_velocities, self.max_joint_velocities
@property
def joint_effort_limits(self):
"""
Returns:
2-tuple:
- n-array: min joint effort limits, where each is an n-DOF length array
- n-array: max joint effort limits, where each is an n-DOF length array
"""
return -self.max_joint_efforts, self.max_joint_efforts
@property
def joint_at_limits(self):
"""
Returns:
n-array: n-DOF length array specifying whether joint is at its limit,
with 1.0 --> at limit, otherwise 0.0
"""
return 1.0 * (np.abs(self.get_joint_positions(normalized=True)) > 0.99)
@property
def joint_has_limits(self):
"""
Returns:
n-array: n-DOF length array specifying whether joint has a limit or not
"""
return np.array([j.has_limit for j in self._joints.values()])
@property
def disabled_collision_link_names(self):
"""
Returns:
list of str: List of link names for this entity whose collisions should be globally disabled
"""
return []
@property
def disabled_collision_pairs(self):
"""
Returns:
list of (str, str): List of rigid body collision pairs to disable within this object prim.
Default is an empty list (no pairs)
"""
return []
@property
def scale(self):
# For the EntityPrim (object) level, @self.scale represents the scale with respect to the original scale of
# the link (RigidPrim or ClothPrim), which might not be uniform ([1, 1, 1]) itself.
return self.root_link.scale / self.root_link.original_scale
@scale.setter
def scale(self, scale):
# For the EntityPrim (object) level, @self.scale represents the scale with respect to the original scale of
# the link (RigidPrim or ClothPrim), which might not be uniform ([1, 1, 1]) itself.
# We iterate over all rigid bodies owned by this object prim and set their individual scales
# We do this because omniverse cannot scale orientation of an articulated prim, so we get mesh mismatches as
# they rotate in the world.
for link in self._links.values():
link.scale = scale * link.original_scale
@property
def solver_position_iteration_count(self):
"""
Returns:
int: How many position iterations to take per physics step by the physx solver
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:solverPositionIterationCount") if \
self.articulated else self.root_link.solver_position_iteration_count
@solver_position_iteration_count.setter
def solver_position_iteration_count(self, count):
"""
Sets how many position iterations to take per physics step by the physx solver
Args:
count (int): How many position iterations to take per physics step by the physx solver
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:solverPositionIterationCount", count)
else:
for link in self._links.values():
link.solver_position_iteration_count = count
@property
def solver_velocity_iteration_count(self):
"""
Returns:
int: How many velocity iterations to take per physics step by the physx solver
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:solverVelocityIterationCount") if \
self.articulated else self.root_link.solver_velocity_iteration_count
@solver_velocity_iteration_count.setter
def solver_velocity_iteration_count(self, count):
"""
Sets how many velocity iterations to take per physics step by the physx solver
Args:
count (int): How many velocity iterations to take per physics step by the physx solver
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:solverVelocityIterationCount", count)
else:
for link in self._links.values():
link.solver_velocity_iteration_count = count
@property
def stabilization_threshold(self):
"""
Returns:
float: threshold for stabilizing this articulation
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:stabilizationThreshold") if \
self.articulated else self.root_link.stabilization_threshold
@stabilization_threshold.setter
def stabilization_threshold(self, threshold):
"""
Sets threshold for stabilizing this articulation
Args:
threshold (float): Stabilization threshold
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:stabilizationThreshold", threshold)
else:
for link in self._links.values():
link.stabilization_threshold = threshold
@property
def is_asleep(self):
"""
Returns:
bool: whether this entity is asleep or not
"""
# If we're kinematic only, immediately return False since it doesn't follow the sleep / wake paradigm
if self.kinematic_only:
return False
else:
return og.sim.psi.is_sleeping(og.sim.stage_id, lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.articulation_root_path)) \
if self.articulated else self.root_link.is_asleep
@property
def sleep_threshold(self):
"""
Returns:
float: threshold for sleeping this articulation
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:sleepThreshold") if \
self.articulated else self.root_link.sleep_threshold
@sleep_threshold.setter
def sleep_threshold(self, threshold):
"""
Sets threshold for sleeping this articulation
Args:
threshold (float): Sleeping threshold
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:sleepThreshold", threshold)
else:
for link in self._links.values():
link.sleep_threshold = threshold
@property
def self_collisions(self):
"""
Returns:
bool: Whether self-collisions are enabled for this prim or not
"""
assert self.articulated, "Cannot get self-collision for non-articulated EntityPrim!"
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:enabledSelfCollisions")
@self_collisions.setter
def self_collisions(self, flag):
"""
Sets whether self-collisions are enabled for this prim or not
Args:
flag (bool): Whether self collisions are enabled for this prim or not
"""
assert self.articulated, "Cannot set self-collision for non-articulated EntityPrim!"
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:enabledSelfCollisions", flag)
@property
def kinematic_only(self):
"""
Returns:
bool: Whether this object is a kinematic-only object (otherwise, it is a rigid body). A kinematic-only
object is not subject to simulator dynamics, and remains fixed unless the user explicitly sets the
body's pose / velocities. See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics/rigid-bodies.html?highlight=rigid%20body%20enabled#kinematic-rigid-bodies
for more information
"""
return self.root_link.kinematic_only
@property
def aabb(self):
# If we're a cloth prim type, we compute the bounding box from the limits of the particles. Otherwise, use the
# normal method for computing bounding box
if self._prim_type == PrimType.CLOTH:
particle_contact_offset = self.root_link.cloth_system.particle_contact_offset
particle_positions = self.root_link.compute_particle_positions()
aabb_lo, aabb_hi = np.min(particle_positions, axis=0) - particle_contact_offset, \
np.max(particle_positions, axis=0) + particle_contact_offset
else:
points_world = [link.collision_boundary_points_world for link in self._links.values()]
all_points = np.concatenate([p for p in points_world if p is not None], axis=0)
aabb_lo = np.min(all_points, axis=0)
aabb_hi = np.max(all_points, axis=0)
return aabb_lo, aabb_hi
@property
def aabb_extent(self):
"""
Get this xform's actual bounding box extent
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.aabb
return max_corner - min_corner
@property
def aabb_center(self):
"""
Get this xform's actual bounding box center
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.aabb
return (max_corner + min_corner) / 2.0
def get_coriolis_and_centrifugal_forces(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N,) shaped per-DOF coriolis and centrifugal forces experienced by the entity, if articulated
"""
assert self.articulated, "Cannot get coriolis and centrifugal forces for non-articulated entity!"
return self._articulation_view.get_coriolis_and_centrifugal_forces(clone=clone).reshape(self.n_dof)
def get_generalized_gravity_forces(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N, N) shaped per-DOF gravity forces, if articulated
"""
assert self.articulated, "Cannot get generalized gravity forces for non-articulated entity!"
return self._articulation_view.get_generalized_gravity_forces(clone=clone).reshape(self.n_dof)
def get_mass_matrix(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N, N) shaped per-DOF mass matrix, if articulated
"""
assert self.articulated, "Cannot get mass matrix for non-articulated entity!"
return self._articulation_view.get_mass_matrices(clone=clone).reshape(self.n_dof, self.n_dof)
def get_jacobian(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N_links - 1 [+ 1], 6, N_dof [+ 6]) shaped per-link jacobian, if articulated. Note that the first
dimension is +1 and the final dimension is +6 if the entity does not have a fixed base
(i.e.: there is an additional "floating" joint tying the robot to the world frame)
"""
assert self.articulated, "Cannot get jacobian for non-articulated entity!"
return self._articulation_view.get_jacobians(clone=clone).squeeze(axis=0)
def get_relative_jacobian(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N_links - 1 [+ 1], 6, N_dof [+ 6]) shaped per-link relative jacobian, if articulated (expressed in
this entity's base frame). Note that the first dimension is +1 and the final dimension is +6 if the
entity does not have a fixed base (i.e.: there is an additional "floating" joint tying the robot to
the world frame)
"""
jac = self.get_jacobian(clone=clone)
ori_t = T.quat2mat(self.get_orientation()).T.astype(np.float32)
tf = np.zeros((1, 6, 6), dtype=np.float32)
tf[:, :3, :3] = ori_t
tf[:, 3:, 3:] = ori_t
return tf @ jac
def wake(self):
"""
Enable physics for this articulation
"""
if self.articulated:
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.wake_up(og.sim.stage_id, prim_id)
else:
for link in self._links.values():
link.wake()
def sleep(self):
"""
Disable physics for this articulation
"""
if self.articulated:
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.put_to_sleep(og.sim.stage_id, prim_id)
else:
for link in self._links.values():
link.sleep()
def keep_still(self):
"""
Zero out all velocities for this prim
"""
self.set_linear_velocity(velocity=np.zeros(3))
self.set_angular_velocity(velocity=np.zeros(3))
for joint in self._joints.values():
joint.keep_still()
# Make sure object is awake
self.wake()
def create_attachment_point_link(self):
"""
Create a collision-free, invisible attachment point link for the cloth object, and create an attachment between
the ClothPrim and this attachment point link (RigidPrim).
One use case for this is that we can create a fixed joint between this link and the world to enable AG fo cloth.
During simulation, this joint will move and match the robot gripper frame, which will then drive the cloth.
"""
assert self._prim_type == PrimType.CLOTH, "create_attachment_point_link should only be called for Cloth"
link_name = "attachment_point"
stage = lazy.omni.isaac.core.utils.stage.get_current_stage()
link_prim = stage.DefinePrim(f"{self._prim_path}/{link_name}", "Xform")
vis_prim = lazy.pxr.UsdGeom.Sphere.Define(stage, f"{self._prim_path}/{link_name}/visuals").GetPrim()
col_prim = lazy.pxr.UsdGeom.Sphere.Define(stage, f"{self._prim_path}/{link_name}/collisions").GetPrim()
# Set the radius to be 0.03m. In theory, we want this radius to be as small as possible. Otherwise, the cloth
# dynamics will be unrealistic. However, in practice, if the radius is too small, the attachment becomes very
# unstable. Empirically 0.03m works reasonably well.
vis_prim.GetAttribute("radius").Set(0.03)
col_prim.GetAttribute("radius").Set(0.03)
# Need to sync the extents
extent = vis_prim.GetAttribute("extent").Get()
extent[0] = lazy.pxr.Gf.Vec3f(-0.03, -0.03, -0.03)
extent[1] = lazy.pxr.Gf.Vec3f(0.03, 0.03, 0.03)
vis_prim.GetAttribute("extent").Set(extent)
col_prim.GetAttribute("extent").Set(extent)
# Add collision API to collision geom
lazy.pxr.UsdPhysics.CollisionAPI.Apply(col_prim)
lazy.pxr.UsdPhysics.MeshCollisionAPI.Apply(col_prim)
lazy.pxr.PhysxSchema.PhysxCollisionAPI.Apply(col_prim)
# Create a attachment point link
link = RigidPrim(
prim_path=link_prim.GetPrimPath().__str__(),
name=f"{self._name}:{link_name}",
)
link.disable_collisions()
# TODO (eric): Should we disable gravity for this link?
# link.disable_gravity()
link.visible = False
# Set a very small mass
link.mass = 1e-6
link.density = 0.0
self._links[link_name] = link
# Create an attachment between the root link (ClothPrim) and the newly created attachment point link (RigidPrim)
attachment_path = self.root_link.prim.GetPath().AppendElementString("attachment")
lazy.omni.kit.commands.execute("CreatePhysicsAttachment", target_attachment_path=attachment_path,
actor0_path=self.root_link.prim.GetPath(), actor1_path=link.prim.GetPath())
def _dump_state(self):
# We don't call super, instead, this state is simply the root link state and all joint states
state = dict(root_link=self.root_link._dump_state())
joint_state = dict()
for prim_name, prim in self._joints.items():
joint_state[prim_name] = prim._dump_state()
state["joints"] = joint_state
return state
def _load_state(self, state):
# Load base link state and joint states
self.root_link._load_state(state=state["root_link"])
for joint_name, joint_state in state["joints"].items():
self._joints[joint_name]._load_state(state=joint_state)
# Make sure this object is awake
self.wake()
def _serialize(self, state):
# We serialize by first flattening the root link state and then iterating over all joints and
# adding them to the a flattened array
state_flat = [self.root_link.serialize(state=state["root_link"])]
if self.n_joints > 0:
state_flat.append(
np.concatenate(
[prim.serialize(state=state["joints"][prim_name]) for prim_name, prim in self._joints.items()]
)
)
return np.concatenate(state_flat).astype(float)
def _deserialize(self, state):
# We deserialize by first de-flattening the root link state and then iterating over all joints and
# sequentially grabbing from the flattened state array, incrementing along the way
idx = self.root_link.state_size
state_dict = dict(root_link=self.root_link.deserialize(state=state[:idx]))
joint_state_dict = dict()
for prim_name, prim in self._joints.items():
joint_state_dict[prim_name] = prim.deserialize(state=state[idx:idx+prim.state_size])
idx += prim.state_size
state_dict["joints"] = joint_state_dict
return state_dict, idx
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
# Subclass must implement this method for duplication functionality
raise NotImplementedError("Subclass must implement _create_prim_with_same_kwargs() to enable duplication "
"functionality for EntityPrim!")
| 65,273 | Python | 42.285146 | 192 | 0.61563 |
StanfordVL/OmniGibson/omnigibson/prims/material_prim.py | import numpy as np
import asyncio
import os
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.utils.physx_utils import bind_material
from omnigibson.prims.prim_base import BasePrim
class MaterialPrim(BasePrim):
"""
Provides high level functions to deal with a material prim and its attributes/ properties.
If there is a material prim present at the path, it will use it. Otherwise, a new material prim at
the specified prim path will be created.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. Subclasses should define the exact keys expected
for their class. For this material prim, the below values can be specified:
mdl_name (None or str): If specified, should be the name of the mdl preset to load (including .mdl).
None results in default, "OmniPBR.mdl"
mtl_name (None or str): If specified, should be the name of the mtl preset to load.
None results in default, "OmniPBR"
"""
# Persistent dictionary of materials, mapped from prim_path to MaterialPrim
MATERIALS = dict()
@classmethod
def get_material(cls, name, prim_path, load_config=None):
"""
Get a material prim from the persistent dictionary of materials, or create a new one if it doesn't exist.
Args:
name (str): Name for the object.
prim_path (str): prim path of the MaterialPrim.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists.
Returns:
MaterialPrim: Material prim at the specified path
"""
# If the material already exists, return it
if prim_path in cls.MATERIALS:
return cls.MATERIALS[prim_path]
# Otherwise, create a new one and return it
new_material = cls(prim_path=prim_path, name=name, load_config=load_config)
cls.MATERIALS[prim_path] = new_material
return new_material
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._shader = None
# Users of this material: should be a set of BaseObject and BaseSystem
self._users = set()
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# We create a new material at the specified path
mtl_created = []
lazy.omni.kit.commands.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniPBR.mdl" if self._load_config.get("mdl_name", None) is None else self._load_config["mdl_name"],
mtl_name="OmniPBR" if self._load_config.get("mtl_name", None) is None else self._load_config["mtl_name"],
mtl_created_list=mtl_created,
)
material_path = mtl_created[0]
# Move prim to desired location
lazy.omni.kit.commands.execute("MovePrim", path_from=material_path, path_to=self._prim_path)
# Return generated material
return lazy.omni.isaac.core.utils.prims.get_prim_at_path(self._prim_path)
@classmethod
def clear(cls):
cls.MATERIALS = dict()
@property
def users(self):
"""
Users of this material: should be a list of BaseObject and BaseSystem
"""
return self._users
def add_user(self, user):
"""
Adds a user to the material. This can be a BaseObject or BaseSystem.
Args:
user (BaseObject or BaseSystem): User to add to the material
"""
self._users.add(user)
def remove_user(self, user):
"""
Removes a user from the material. This can be a BaseObject or BaseSystem.
If there are no users left, the material will be removed.
Args:
user (BaseObject or BaseSystem): User to remove from the material
"""
self._users.remove(user)
if len(self._users) == 0:
self.remove()
def remove(self):
# Remove from global sensors dictionary
self.MATERIALS.pop(self._prim_path)
# Run super
super().remove()
def _post_load(self):
# run super first
super()._post_load()
# Add this material to the list of global materials
self.MATERIALS[self._prim_path] = self
# Generate shader reference
self._shader = lazy.omni.usd.get_shader_from_material(self._prim)
def bind(self, target_prim_path):
"""
Bind this material to an arbitrary prim (usually a visual mesh prim)
Args:
target_prim_path (str): prim path of the Prim to bind to
"""
bind_material(prim_path=target_prim_path, material_path=self.prim_path)
async def _load_mdl_parameters(self, render=True):
"""
Loads MDL parameters internally so they can be accessed by our class instance
Args:
render (bool): If True, takes a rendering step before loading the mdl parameters.
Note that a rendering step is necessary to load these parameters, though if a step has already
occurred externally, no additional rendering step is needed
"""
if render:
og.sim.render()
await lazy.omni.usd.get_context().load_mdl_parameters_for_prim_async(self._shader)
def shader_force_populate(self, render=True):
"""
Force populate inputs and outputs of the shader
Args:
render (bool): If True, takes a rendering step before force populating the inputs and outputs.
Note that a rendering step is necessary to load these I/Os, though if a step has already
occurred externally, no additional rendering step is needed
"""
assert self._shader is not None
asyncio.run(self._load_mdl_parameters(render=render))
def shader_update_asset_paths_with_root_path(self, root_path):
"""
Similar to @shader_update_asset_paths, except in this case, root_path is explicitly provided by the caller.
Args:
root_path (str): root to be pre-appended to the original asset paths
"""
for inp_name in self.shader_input_names_by_type("SdfAssetPath"):
inp = self.get_input(inp_name)
# If the input doesn't have any path, skip
if inp is None:
continue
original_path = inp.path if inp.resolvedPath == "" else inp.resolvedPath
# If the input has an empty path, skip
if original_path == "":
continue
new_path = os.path.join(root_path, original_path)
self.set_input(inp_name, new_path)
def get_input(self, inp):
"""
Grabs the input with corresponding name @inp associated with this material and shader
Args:
inp (str): Name of the shader input whose value will be grabbed
Returns:
any: value of the requested @inp
"""
return self._shader.GetInput(inp).Get()
def set_input(self, inp, val):
"""
Sets the input with corresponding name @inp associated with this material and shader
Args:
inp (str): Name of the shader input whose value will be set
val (any): Value to set for the input. This should be the valid type for that attribute.
"""
# Make sure the input exists first, so we avoid segfaults with "invalid null prim"
assert inp in self.shader_input_names, \
f"Got invalid shader input to set! Current inputs are: {self.shader_input_names}. Got: {inp}"
self._shader.GetInput(inp).Set(val)
@property
def is_glass(self):
"""
Returns:
bool: Whether this material is a glass material or not
"""
return "glass_color" in self.shader_input_names
@property
def shader(self):
"""
Returns:
Usd.Shade: Shader associated with this material
"""
return self._shader
@property
def shader_input_names(self):
"""
Returns:
set: All the shader input names associated with this material
"""
return {inp.GetBaseName() for inp in self._shader.GetInputs()}
def shader_input_names_by_type(self, input_type):
"""
Args:
input_type (str): input type
Returns:
set: All the shader input names associated with this material that match the given input type
"""
return {inp.GetBaseName() for inp in self._shader.GetInputs() if inp.GetTypeName().cppTypeName == input_type}
@property
def diffuse_color_constant(self):
"""
Returns:
3-array: this material's applied (R,G,B) color
"""
return np.array(self.get_input(inp="diffuse_color_constant"))
@diffuse_color_constant.setter
def diffuse_color_constant(self, color):
"""
Args:
color (3-array): this material's applied (R,G,B) color
"""
self.set_input(inp="diffuse_color_constant", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def diffuse_texture(self):
"""
Returns:
str: this material's applied diffuse_texture filepath
"""
return self.get_input(inp="diffuse_texture").resolvedPath
@diffuse_texture.setter
def diffuse_texture(self, fpath):
"""
Args:
str: this material's applied diffuse_texture filepath
"""
self.set_input(inp="diffuse_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def albedo_desaturation(self):
"""
Returns:
float: this material's applied albedo_desaturation
"""
return self.get_input(inp="albedo_desaturation")
@albedo_desaturation.setter
def albedo_desaturation(self, desaturation):
"""
Args:
desaturation (float): this material's applied albedo_desaturation
"""
self.set_input(inp="albedo_desaturation", val=desaturation)
@property
def albedo_add(self):
"""
Returns:
float: this material's applied albedo_add
"""
return self.get_input(inp="albedo_add")
@albedo_add.setter
def albedo_add(self, add):
"""
Args:
add (float): this material's applied albedo_add
"""
self.set_input(inp="albedo_add", val=add)
@property
def albedo_brightness(self):
"""
Returns:
float: this material's applied albedo_brightness
"""
return self.get_input(inp="albedo_brightness")
@albedo_brightness.setter
def albedo_brightness(self, brightness):
"""
Args:
brightness (float): this material's applied albedo_brightness
"""
self.set_input(inp="albedo_brightness", val=brightness)
@property
def diffuse_tint(self):
"""
Returns:
3-array: this material's applied (R,G,B) diffuse_tint
"""
return np.array(self.get_input(inp="diffuse_tint"))
@diffuse_tint.setter
def diffuse_tint(self, color):
"""
Args:
color (3-array): this material's applied (R,G,B) diffuse_tint
"""
self.set_input(inp="diffuse_tint", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def reflection_roughness_constant(self):
"""
Returns:
float: this material's applied reflection_roughness_constant
"""
return self.get_input(inp="reflection_roughness_constant")
@reflection_roughness_constant.setter
def reflection_roughness_constant(self, roughness):
"""
Args:
roughness (float): this material's applied reflection_roughness_constant
"""
self.set_input(inp="reflection_roughness_constant", val=roughness)
@property
def reflection_roughness_texture_influence(self):
"""
Returns:
float: this material's applied reflection_roughness_texture_influence
"""
return self.get_input(inp="reflection_roughness_texture_influence")
@reflection_roughness_texture_influence.setter
def reflection_roughness_texture_influence(self, prop):
"""
Args:
prop (float): this material's applied reflection_roughness_texture_influence proportion
"""
self.set_input(inp="reflection_roughness_texture_influence", val=prop)
@property
def reflectionroughness_texture(self):
"""
Returns:
None or str: this material's applied reflectionroughness_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="reflectionroughness_texture")
return None if inp is None else inp.resolvedPath
@reflectionroughness_texture.setter
def reflectionroughness_texture(self, fpath):
"""
Args:
fpath (str): this material's applied reflectionroughness_texture fpath
"""
self.set_input(inp="reflectionroughness_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def metallic_constant(self):
"""
Returns:
float: this material's applied metallic_constant
"""
return self.get_input(inp="metallic_constant")
@metallic_constant.setter
def metallic_constant(self, constant):
"""
Args:
constant (float): this material's applied metallic_constant
"""
self.set_input(inp="metallic_constant", val=constant)
@property
def metallic_texture_influence(self):
"""
Returns:
float: this material's applied metallic_texture_influence
"""
return self.get_input(inp="metallic_texture_influence")
@metallic_texture_influence.setter
def metallic_texture_influence(self, prop):
"""
Args:
prop (float): this material's applied metallic_texture_influence
"""
self.set_input(inp="metallic_texture_influence", val=prop)
@property
def metallic_texture(self):
"""
Returns:
None or str: this material's applied metallic_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="metallic_texture")
return None if inp is None else inp.resolvedPath
@metallic_texture.setter
def metallic_texture(self, fpath):
"""
Args:
fpath (str): this material's applied metallic_texture fpath
"""
self.set_input(inp="metallic_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def specular_level(self):
"""
Returns:
float: this material's applied specular_level
"""
return self.get_input(inp="specular_level")
@specular_level.setter
def specular_level(self, level):
"""
Args:
level (float): this material's applied specular_level
"""
self.set_input(inp="specular_level", val=level)
@property
def enable_ORM_texture(self):
"""
Returns:
bool: this material's applied enable_ORM_texture
"""
return self.get_input(inp="enable_ORM_texture")
@enable_ORM_texture.setter
def enable_ORM_texture(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_ORM_texture
"""
self.set_input(inp="enable_ORM_texture", val=enabled)
@property
def ORM_texture(self):
"""
Returns:
None or str: this material's applied ORM_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="ORM_texture")
return None if inp is None else inp.resolvedPath
@ORM_texture.setter
def ORM_texture(self, fpath):
"""
Args:
fpath (str): this material's applied ORM_texture fpath
"""
self.set_input(inp="ORM_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def ao_to_diffuse(self):
"""
Returns:
float: this material's applied ao_to_diffuse
"""
return self.get_input(inp="ao_to_diffuse")
@ao_to_diffuse.setter
def ao_to_diffuse(self, val):
"""
Args:
val (float): this material's applied ao_to_diffuse
"""
self.set_input(inp="ao_to_diffuse", val=val)
@property
def ao_texture(self):
"""
Returns:
None or str: this material's applied ao_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="ao_texture")
return None if inp is None else inp.resolvedPath
@ao_texture.setter
def ao_texture(self, fpath):
"""
Args:
fpath (str): this material's applied ao_texture fpath
"""
self.set_input(inp="ao_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def enable_emission(self):
"""
Returns:
bool: this material's applied enable_emission
"""
return self.get_input(inp="enable_emission")
@enable_emission.setter
def enable_emission(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_emission
"""
self.set_input(inp="enable_emission", val=enabled)
@property
def emissive_color(self):
"""
Returns:
3-array: this material's applied (R,G,B) emissive_color
"""
return np.array(self.get_input(inp="emissive_color"))
@emissive_color.setter
def emissive_color(self, color):
"""
Args:
color (3-array): this material's applied emissive_color
"""
self.set_input(inp="emissive_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def emissive_color_texture(self):
"""
Returns:
None or str: this material's applied emissive_color_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="emissive_color_texture")
return None if inp is None else inp.resolvedPath
@emissive_color_texture.setter
def emissive_color_texture(self, fpath):
"""
Args:
fpath (str): this material's applied emissive_color_texture fpath
"""
self.set_input(inp="emissive_color_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def emissive_mask_texture(self):
"""
Returns:
None or str: this material's applied emissive_mask_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="emissive_mask_texture")
return None if inp is None else inp.resolvedPath
@emissive_mask_texture.setter
def emissive_mask_texture(self, fpath):
"""
Args:
fpath (str): this material's applied emissive_mask_texture fpath
"""
self.set_input(inp="emissive_mask_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def emissive_intensity(self):
"""
Returns:
float: this material's applied emissive_intensity
"""
return self.get_input(inp="emissive_intensity")
@emissive_intensity.setter
def emissive_intensity(self, intensity):
"""
Args:
intensity (float): this material's applied emissive_intensity
"""
self.set_input(inp="emissive_intensity", val=intensity)
@property
def enable_opacity(self):
"""
Returns:
bool: this material's applied enable_opacity
"""
return self.get_input(inp="enable_opacity")
@enable_opacity.setter
def enable_opacity(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_opacity
"""
self.set_input(inp="enable_opacity", val=enabled)
@property
def enable_opacity_texture(self):
"""
Returns:
bool: this material's applied enable_opacity_texture
"""
return self.get_input(inp="enable_opacity_texture")
@enable_opacity_texture.setter
def enable_opacity_texture(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_opacity_texture
"""
self.set_input(inp="enable_opacity_texture", val=enabled)
@property
def opacity_constant(self):
"""
Returns:
float: this material's applied opacity_constant
"""
return self.get_input(inp="opacity_constant")
@opacity_constant.setter
def opacity_constant(self, constant):
"""
Args:
constant (float): this material's applied opacity_constant
"""
self.set_input(inp="opacity_constant", val=constant)
@property
def opacity_texture(self):
"""
Returns:
None or str: this material's applied opacity_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="opacity_texture")
return None if inp is None else inp.resolvedPath
@opacity_texture.setter
def opacity_texture(self, fpath):
"""
Args:
fpath (str): this material's applied opacity_texture fpath
"""
self.set_input(inp="opacity_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def opacity_mode(self):
"""
Returns:
int: this material's applied opacity_mode
"""
return self.get_input(inp="opacity_mode")
@opacity_mode.setter
def opacity_mode(self, mode):
"""
Args:
mode (int): this material's applied opacity_mode
"""
self.set_input(inp="opacity_mode", val=mode)
@property
def opacity_threshold(self):
"""
Returns:
float: this material's applied opacity_threshold
"""
return self.get_input(inp="opacity_threshold")
@opacity_threshold.setter
def opacity_threshold(self, threshold):
"""
Args:
threshold (float): this material's applied opacity_threshold
"""
self.set_input(inp="opacity_threshold", val=threshold)
@property
def bump_factor(self):
"""
Returns:
float: this material's applied bump_factor
"""
return self.get_input(inp="bump_factor")
@bump_factor.setter
def bump_factor(self, factor):
"""
Args:
factor (float): this material's applied bump_factor
"""
self.set_input(inp="bump_factor", val=factor)
@property
def normalmap_texture(self):
"""
Returns:
None or str: this material's applied normalmap_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="normalmap_texture")
return None if inp is None else inp.resolvedPath
@normalmap_texture.setter
def normalmap_texture(self, fpath):
"""
Args:
fpath (str): this material's applied normalmap_texture fpath
"""
self.set_input(inp="normalmap_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def detail_bump_factor(self):
"""
Returns:
float: this material's applied detail_bump_factor
"""
return self.get_input(inp="detail_bump_factor")
@detail_bump_factor.setter
def detail_bump_factor(self, factor):
"""
Args:
factor (float): this material's applied detail_bump_factor
"""
self.set_input(inp="detail_bump_factor", val=factor)
@property
def detail_normalmap_texture(self):
"""
Returns:
None or str: this material's applied detail_normalmap_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="detail_normalmap_texture")
return None if inp is None else inp.resolvedPath
@detail_normalmap_texture.setter
def detail_normalmap_texture(self, fpath):
"""
Args:
fpath (str): this material's applied detail_normalmap_texture fpath
"""
self.set_input(inp="detail_normalmap_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def flip_tangent_u(self):
"""
Returns:
bool: this material's applied flip_tangent_u
"""
return self.get_input(inp="flip_tangent_u")
@flip_tangent_u.setter
def flip_tangent_u(self, flipped):
"""
Args:
flipped (bool): this material's applied flip_tangent_u
"""
self.set_input(inp="flip_tangent_u", val=flipped)
@property
def flip_tangent_v(self):
"""
Returns:
bool: this material's applied flip_tangent_v
"""
return self.get_input(inp="flip_tangent_v")
@flip_tangent_v.setter
def flip_tangent_v(self, flipped):
"""
Args:
flipped (bool): this material's applied flip_tangent_v
"""
self.set_input(inp="flip_tangent_v", val=flipped)
@property
def project_uvw(self):
"""
Returns:
bool: this material's applied project_uvw
"""
return self.get_input(inp="project_uvw")
@project_uvw.setter
def project_uvw(self, projected):
"""
Args:
projected (bool): this material's applied project_uvw
"""
self.set_input(inp="project_uvw", val=projected)
@property
def world_or_object(self):
"""
Returns:
bool: this material's applied world_or_object
"""
return self.get_input(inp="world_or_object")
@world_or_object.setter
def world_or_object(self, val):
"""
Args:
val (bool): this material's applied world_or_object
"""
self.set_input(inp="world_or_object", val=val)
@property
def uv_space_index(self):
"""
Returns:
int: this material's applied uv_space_index
"""
return self.get_input(inp="uv_space_index")
@uv_space_index.setter
def uv_space_index(self, index):
"""
Args:
index (int): this material's applied uv_space_index
"""
self.set_input(inp="uv_space_index", val=index)
@property
def texture_translate(self):
"""
Returns:
2-array: this material's applied texture_translate
"""
return np.array(self.get_input(inp="texture_translate"))
@texture_translate.setter
def texture_translate(self, translate):
"""
Args:
translate (2-array): this material's applied (x,y) texture_translate
"""
self.set_input(inp="texture_translate", val=lazy.pxr.Gf.Vec2f(*np.array(translate, dtype=float)))
@property
def texture_rotate(self):
"""
Returns:
float: this material's applied texture_rotate
"""
return self.get_input(inp="texture_rotate")
@texture_rotate.setter
def texture_rotate(self, rotate):
"""
Args:
rotate (float): this material's applied texture_rotate
"""
self.set_input(inp="texture_rotate", val=rotate)
@property
def texture_scale(self):
"""
Returns:
2-array: this material's applied texture_scale
"""
return np.array(self.get_input(inp="texture_scale"))
@texture_scale.setter
def texture_scale(self, scale):
"""
Args:
scale (2-array): this material's applied (x,y) texture_scale
"""
self.set_input(inp="texture_scale", val=lazy.pxr.Gf.Vec2f(*np.array(scale, dtype=float)))
@property
def detail_texture_translate(self):
"""
Returns:
2-array: this material's applied detail_texture_translate
"""
return np.array(self.get_input(inp="detail_texture_translate"))
@detail_texture_translate.setter
def detail_texture_translate(self, translate):
"""
Args:
translate (2-array): this material's applied detail_texture_translate
"""
self.set_input(inp="detail_texture_translate", val=lazy.pxr.Gf.Vec2f(*np.array(translate, dtype=float)))
@property
def detail_texture_rotate(self):
"""
Returns:
float: this material's applied detail_texture_rotate
"""
return self.get_input(inp="detail_texture_rotate")
@detail_texture_rotate.setter
def detail_texture_rotate(self, rotate):
"""
Args:
rotate (float): this material's applied detail_texture_rotate
"""
self.set_input(inp="detail_texture_rotate", val=rotate)
@property
def detail_texture_scale(self):
"""
Returns:
2-array: this material's applied detail_texture_scale
"""
return np.array(self.get_input(inp="detail_texture_scale"))
@detail_texture_scale.setter
def detail_texture_scale(self, scale):
"""
Args:
scale (2-array): this material's applied detail_texture_scale
"""
self.set_input(inp="detail_texture_scale", val=lazy.pxr.Gf.Vec2f(*np.array(scale, dtype=float)))
@property
def exclude_from_white_mode(self):
"""
Returns:
bool: this material's applied excludeFromWhiteMode
"""
return self.get_input(inp="excludeFromWhiteMode")
@exclude_from_white_mode.setter
def exclude_from_white_mode(self, exclude):
"""
Args:
exclude (bool): this material's applied excludeFromWhiteMode
"""
self.set_input(inp="excludeFromWhiteMode", val=exclude)
@property
def diffuse_reflection_weight(self):
"""
Returns:
float: this material's applied diffuse_reflection_weight
"""
return self.get_input(inp="diffuse_reflection_weight")
@diffuse_reflection_weight.setter
def diffuse_reflection_weight(self, weight):
"""
Args:
weight (float): this material's applied diffuse_reflection_weight
"""
self.set_input(inp="diffuse_reflection_weight", val=weight)
@property
def enable_specular_transmission(self):
"""
Returns:
bool: this material's applied enable_specular_transmission
"""
return self.get_input(inp="enable_specular_transmission")
@enable_specular_transmission.setter
def enable_specular_transmission(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_specular_transmission
"""
self.set_input(inp="enable_specular_transmission", val=enabled)
@property
def specular_transmission_weight(self):
"""
Returns:
float: this material's applied specular_transmission_weight
"""
return self.get_input(inp="specular_transmission_weight")
@specular_transmission_weight.setter
def specular_transmission_weight(self, weight):
"""
Args:
weight (float): this material's applied specular_transmission_weight
"""
self.set_input(inp="specular_transmission_weight", val=weight)
@property
def diffuse_reflection_color(self):
"""
Returns:
3-array: this material's diffuse_reflection_color in (R,G,B)
"""
return np.array(self.get_input(inp="diffuse_reflection_color"))
@diffuse_reflection_color.setter
def diffuse_reflection_color(self, color):
"""
Args:
color (3-array): this material's diffuse_reflection_color in (R,G,B)
"""
self.set_input(inp="diffuse_reflection_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_reflection_color(self):
"""
Returns:
3-array: this material's specular_reflection_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_reflection_color"))
@specular_reflection_color.setter
def specular_reflection_color(self, color):
"""
Args:
color (3-array): this material's specular_reflection_color in (R,G,B)
"""
self.set_input(inp="specular_reflection_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_transmission_color(self):
"""
Returns:
3-array: this material's specular_transmission_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_transmission_color"))
@specular_transmission_color.setter
def specular_transmission_color(self, color):
"""
Args:
color (3-array): this material's specular_transmission_color in (R,G,B)
"""
self.set_input(inp="specular_transmission_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_transmission_scattering_color(self):
"""
Returns:
3-array: this material's specular_transmission_scattering_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_transmission_scattering_color"))
@specular_transmission_scattering_color.setter
def specular_transmission_scattering_color(self, color):
"""
Args:
color (3-array): this material's specular_transmission_scattering_color in (R,G,B)
"""
self.set_input(inp="specular_transmission_scattering_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_reflection_ior_preset(self):
"""
Returns:
int: this material's specular_reflection_ior_preset (int corresponding to enum)
"""
return self.get_input(inp="specular_reflection_ior_preset")
@specular_reflection_ior_preset.setter
def specular_reflection_ior_preset(self, preset):
"""
Args:
preset (int): this material's specular_reflection_ior_preset (int corresponding to enum)
"""
self.set_input(inp="specular_reflection_ior_preset", val=preset)
@property
def enable_diffuse_transmission(self):
"""
Returns:
float: this material's applied enable_diffuse_transmission
"""
return self.get_input(inp="enable_diffuse_transmission")
@enable_diffuse_transmission.setter
def enable_diffuse_transmission(self, val):
"""
Args:
val (bool): this material's applied enable_diffuse_transmission
"""
self.set_input(inp="enable_diffuse_transmission", val=val)
@property
def glass_color(self):
"""
Returns:
3-array: this material's applied (R,G,B) glass color (only applicable to OmniGlass materials)
"""
assert self.is_glass, f"Tried to query glass_color shader input, " \
f"but material at {self.prim_path} is not an OmniGlass material!"
return np.array(self.get_input(inp="glass_color"))
@glass_color.setter
def glass_color(self, color):
"""
Args:
color (3-array): this material's applied (R,G,B) glass color (only applicable to OmniGlass materials)
"""
assert self.is_glass, f"Tried to set glass_color shader input, " \
f"but material at {self.prim_path} is not an OmniGlass material!"
self.set_input(inp="glass_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
| 36,853 | Python | 31.271454 | 122 | 0.595908 |
StanfordVL/OmniGibson/omnigibson/prims/__init__.py | from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.cloth_prim import ClothPrim
from omnigibson.prims.entity_prim import EntityPrim
from omnigibson.prims.geom_prim import GeomPrim, VisualGeomPrim, CollisionGeomPrim, CollisionVisualGeomPrim
from omnigibson.prims.joint_prim import JointPrim
from omnigibson.prims.rigid_prim import RigidPrim
from omnigibson.prims.xform_prim import XFormPrim
| 408 | Python | 50.124994 | 107 | 0.865196 |
StanfordVL/OmniGibson/omnigibson/prims/prim_base.py | from abc import ABC, abstractmethod
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.utils.python_utils import Serializable, UniquelyNamed, Recreatable
from omnigibson.utils.sim_utils import check_deletable_prim
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class BasePrim(Serializable, UniquelyNamed, Recreatable, ABC):
"""
Provides high level functions to deal with a basic prim and its attributes/ properties.
If there is an Xform prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created.
Note: the prim will have "xformOp:orient", "xformOp:translate" and "xformOp:scale" only post init,
unless it is a non-root articulation link.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. Subclasses should define the exact keys expected
for their class.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
self._prim_path = prim_path
self._name = name
self._load_config = dict() if load_config is None else load_config
# Other values that will be filled in at runtime
self._applied_visual_material = None
self._loaded = False # Whether this prim exists in the stage or not
self._initialized = False # Whether this prim has its internal handles / info initialized or not (occurs AFTER and INDEPENDENTLY from loading!)
self._prim = None
self._state_size = None
self._n_duplicates = 0 # Simple counter for keeping track of duplicates for unique name indexing
# Run super init
super().__init__()
# Run some post-loading steps if this prim has already been loaded
if lazy.omni.isaac.core.utils.prims.is_prim_path_valid(prim_path=self._prim_path):
log.debug(f"prim {name} already exists, skipping load")
self._prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=self._prim_path)
self._loaded = True
# Run post load.
self._post_load()
def _initialize(self):
"""
Initializes state of this object and sets up any references necessary post-loading. Should be implemented by
sub-class for extended utility
"""
pass
def initialize(self):
"""
Initializes state of this object and sets up any references necessary post-loading. Subclasses should
implement / extend the _initialize() method.
"""
assert not self._initialized, \
f"Prim {self.name} at prim_path {self._prim_path} can only be initialized once! (It is already initialized)"
self._initialize()
# Cache state size
self._state_size = len(self.dump_state(serialized=True))
self._initialized = True
def load(self):
"""
Load this prim into omniverse, and return loaded prim reference.
Returns:
Usd.Prim: Prim object loaded into the simulator
"""
if self._loaded:
raise ValueError(f"Cannot load prim {self.name} multiple times.")
# Load prim
self._prim = self._load()
self._loaded = True
# Run any post-loading logic
self._post_load()
return self._prim
def _post_load(self):
"""
Any actions that should be taken (e.g.: modifying the object's properties such as scale, visibility, additional
joints, etc.) that should be taken after loading the raw object into omniverse but BEFORE we initialize the
object and grab its handles and internal references. By default, this is a no-op.
"""
pass
def remove(self):
"""
Removes this prim from omniverse stage.
"""
if not self._loaded:
raise ValueError("Cannot remove a prim that was never loaded.")
# Remove prim if it can be deleted
if check_deletable_prim(self.prim_path):
lazy.omni.isaac.core.utils.prims.delete_prim(self.prim_path)
# Also clear the name so we can reuse this later
self.remove_names()
def _load(self):
"""
Loads the raw prim into the simulator. Any post-processing should be done in @self._post_load()
"""
raise NotImplementedError()
@property
def loaded(self):
return self._loaded
@property
def initialized(self):
return self._initialized
@property
def state_size(self):
# This is the cached value
return self._state_size
@property
def prim_path(self):
"""
Returns:
str: prim path in the stage.
"""
return self._prim_path
@property
def name(self):
"""
Returns:
str: unique name assigned to this prim
"""
return self._name
@property
def prim(self):
"""
Returns:
Usd.Prim: USD Prim object that this object holds.
"""
return self._prim
@property
def property_names(self):
"""
Returns:
set of str: Set of property names that this prim has (e.g.: visibility, proxyPrim, etc.)
"""
return set(self._prim.GetPropertyNames())
@property
def visible(self):
"""
Returns:
bool: true if the prim is visible in stage. false otherwise.
"""
return lazy.pxr.UsdGeom.Imageable(self.prim).ComputeVisibility(lazy.pxr.Usd.TimeCode.Default()) != lazy.pxr.UsdGeom.Tokens.invisible
@visible.setter
def visible(self, visible):
"""
Sets the visibility of the prim in stage.
Args:
visible (bool): flag to set the visibility of the usd prim in stage.
"""
imageable = lazy.pxr.UsdGeom.Imageable(self.prim)
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
return
def is_valid(self):
"""
Returns:
bool: True is the current prim path corresponds to a valid prim in stage. False otherwise.
"""
return lazy.omni.isaac.core.utils.prims.is_prim_path_valid(self.prim_path)
def change_prim_path(self, new_prim_path):
"""
Moves prim from the old path to a new one.
Args:
new_prim_path (str): new path of the prim to be moved to.
"""
lazy.omni.isaac.core.utils.prims.move_prim(path_from=self.prim_path, path_to=new_prim_path)
self._prim_path = new_prim_path
self._prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(self._prim_path)
return
def get_attribute(self, attr):
"""
Get this prim's attribute. Should be a valid attribute under self._prim.GetAttributes()
Returns:
any: value of the requested @attribute
"""
return self._prim.GetAttribute(attr).Get()
def set_attribute(self, attr, val):
"""
Set this prim's attribute. Should be a valid attribute under self._prim.GetAttributes()
Args:
attr (str): Attribute to set
val (any): Value to set for the attribute. This should be the valid type for that attribute.
"""
self._prim.GetAttribute(attr).Set(val)
def get_property(self, prop):
"""
Sets property @prop with value @val
Args:
prop (str): Name of the property to get. See Raw USD Properties in the GUI for examples of property names
Returns:
any: Property value
"""
self._prim.GetProperty(prop).Get()
def set_property(self, prop, val):
"""
Sets property @prop with value @val
Args:
prop (str): Name of the property to set. See Raw USD Properties in the GUI for examples of property names
val (any): Value to set for the property. Should be valid for that property
"""
self._prim.GetProperty(prop).Set(val)
def get_custom_data(self):
"""
Get custom data associated with this prim
Returns:
dict: Dictionary of any custom information
"""
return self._prim.GetCustomData()
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
"""
Generates a new instance of this prim's class with specified @prim_path, @name, and @load_config, but otherwise
all other kwargs should be identical to this instance's values.
Args:
prim_path (str): Absolute path to the newly generated prim
name (str): Name for the newly created prim
load_config (dict): Keyword-mapped kwargs to use to set specific attributes for the created prim's instance
Returns:
BasePrim: Generated prim object (not loaded, and not initialized!)
"""
return self.__class__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def duplicate(self, prim_path):
"""
Duplicates this object, and generates a new instance at @prim_path.
Note that the created object is automatically loaded into the simulator, but is NOT initialized
until a sim step occurs!
Args:
prim_path (str): Absolute path to the newly generated prim
Returns:
BasePrim: Generated prim object
"""
new_prim = self._create_prim_with_same_kwargs(
prim_path=prim_path,
name=f"{self.name}_copy{self._n_duplicates}",
load_config=self._load_config,
)
og.sim.import_object(new_prim, register=False)
# Increment duplicate count
self._n_duplicates += 1
# Set visibility
new_prim.visible = self.visible
return new_prim
| 10,456 | Python | 32.732258 | 177 | 0.605681 |
StanfordVL/OmniGibson/omnigibson/prims/geom_prim.py | from functools import cached_property
import numpy as np
import trimesh
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.usd_utils import PoseAPI, mesh_prim_shape_to_trimesh_mesh
import omnigibson.utils.transform_utils as T
class GeomPrim(XFormPrim):
"""
Provides high level functions to deal with a geom prim and its attributes / properties.
If there is an geom prim present at the path, it will use it. By default, a geom prim cannot be directly
created from scratch.at
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. For this mesh prim, the below values can be specified:
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# This should not be called, because this prim cannot be instantiated from scratch!
raise NotImplementedError("By default, a geom prim cannot be created from scratch.")
def _post_load(self):
# run super first
super()._post_load()
# By default, GeomPrim shows up in the rendering.
self.purpose = "default"
def duplicate(self, prim_path):
# Cannot directly duplicate a mesh prim
raise NotImplementedError("Cannot directly duplicate a geom prim!")
@property
def purpose(self):
"""
Returns:
str: the purpose used for this geom, one of {"default", "render", "proxy", "guide"}
"""
return self.get_attribute("purpose")
@purpose.setter
def purpose(self, purpose):
"""
Sets the purpose of this geom
Args:
purpose (str): the purpose used for this geom, one of {"default", "render", "proxy", "guide"}
"""
self.set_attribute("purpose", purpose)
@property
def color(self):
"""
Returns:
None or 3-array: If set, the default RGB color used for this visual geom
"""
if self.has_material():
return self.material.diffuse_color_constant
else:
color = self.get_attribute("primvars:displayColor")
return None if color is None else np.array(color)[0]
@color.setter
def color(self, rgb):
"""
Sets the RGB color of this visual mesh
Args:
3-array: The default RGB color used for this visual geom
"""
if self.has_material():
self.material.diffuse_color_constant = rgb
else:
self.set_attribute("primvars:displayColor", np.array(rgb))
@property
def opacity(self):
"""
Returns:
None or float: If set, the default opacity used for this visual geom
"""
if self.has_material():
return self.material.opacity_constant
else:
opacity = self.get_attribute("primvars:displayOpacity")
return None if opacity is None else np.array(opacity)[0]
@opacity.setter
def opacity(self, opacity):
"""
Sets the opacity of this visual mesh
Args:
opacity: The default opacity used for this visual geom
"""
if self.has_material():
self.material.opacity_constant = opacity
else:
self.set_attribute("primvars:displayOpacity", np.array([opacity]))
@property
def points(self):
"""
Returns:
np.ndarray: Local poses of all points
"""
# If the geom is a mesh we can directly return its points.
mesh = self.prim
mesh_type = mesh.GetPrimTypeInfo().GetTypeName()
if mesh_type == "Mesh":
# If the geom is a mesh we can directly return its points.
return np.array(self.prim.GetAttribute("points").Get())
else:
# Return the vertices of the trimesh
return np.array(mesh_prim_shape_to_trimesh_mesh(mesh).vertices)
@property
def points_in_parent_frame(self):
points = self.points
if points is None:
return None
position, orientation = self.get_local_pose()
scale = self.scale
points_scaled = points * scale
points_rotated = np.dot(T.quat2mat(orientation), points_scaled.T).T
points_transformed = points_rotated + position
return points_transformed
@property
def aabb(self):
world_pose_w_scale = PoseAPI.get_world_pose_with_scale(self.prim_path)
# transform self.points into world frame
points = self.points
points_homogeneous = np.hstack((points, np.ones((points.shape[0], 1))))
points_transformed = (points_homogeneous @ world_pose_w_scale.T)[:,:3]
aabb_lo = np.min(points_transformed, axis=0)
aabb_hi = np.max(points_transformed, axis=0)
return aabb_lo, aabb_hi
@property
def aabb_extent(self):
"""
Bounding box extent of this geom prim
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.aabb
return max_corner - min_corner
@property
def aabb_center(self):
"""
Bounding box center of this geom prim
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.aabb
return (max_corner + min_corner) / 2.0
@cached_property
def extent(self):
"""
Returns:
np.ndarray: The unscaled 3d extent of the mesh in its local frame.
"""
points = self.points
return np.max(points, axis=0) - np.min(points, axis=0)
class CollisionGeomPrim(GeomPrim):
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Store values created at runtime
self._collision_api = None
self._mesh_collision_api = None
self._physx_collision_api = None
self._applied_physics_material = None
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _post_load(self):
# run super first
super()._post_load()
# By default, CollisionGeomPrim does not show up in the rendering.
self.purpose = "guide"
# Create API references
self._collision_api = lazy.pxr.UsdPhysics.CollisionAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.UsdPhysics.CollisionAPI) else lazy.pxr.UsdPhysics.CollisionAPI.Apply(self._prim)
self._physx_collision_api = lazy.pxr.PhysxSchema.PhysxCollisionAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxCollisionAPI) else lazy.pxr.PhysxSchema.PhysxCollisionAPI.Apply(self._prim)
# Optionally add mesh collision API if this is a mesh
if self._prim.GetPrimTypeInfo().GetTypeName() == "Mesh":
self._mesh_collision_api = lazy.pxr.UsdPhysics.MeshCollisionAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.UsdPhysics.MeshCollisionAPI) else lazy.pxr.UsdPhysics.MeshCollisionAPI.Apply(self._prim)
# Set the approximation to be convex hull by default
self.set_collision_approximation(approximation_type="convexHull")
@property
def collision_enabled(self):
"""
Returns:
bool: Whether collisions are enabled for this collision mesh
"""
return self.get_attribute("physics:collisionEnabled")
@collision_enabled.setter
def collision_enabled(self, enabled):
"""
Sets whether collisions are enabled for this mesh
Args:
enabled (bool): Whether collisions should be enabled for this mesh
"""
# Currently, trying to toggle while simulator is playing while using GPU dynamics results in a crash, so we
# assert that the sim is stopped here
if self._initialized and gm.USE_GPU_DYNAMICS:
assert og.sim.is_stopped(), "Cannot toggle collisions while using GPU dynamics unless simulator is stopped!"
self.set_attribute("physics:collisionEnabled", enabled)
# TODO: Maybe this should all be added to RigidPrim instead?
def set_contact_offset(self, offset):
"""
Args:
offset (float): Contact offset of a collision shape. Allowed range [maximum(0, rest_offset), 0].
Default value is -inf, means default is picked by simulation based on the shape extent.
"""
self._physx_collision_api.GetContactOffsetAttr().Set(offset)
return
def get_contact_offset(self):
"""
Returns:
float: contact offset of the collision shape.
"""
return self._physx_collision_api.GetContactOffsetAttr().Get()
def set_rest_offset(self, offset):
"""
Args:
offset (float): Rest offset of a collision shape. Allowed range [-max_float, contact_offset.
Default value is -inf, means default is picked by simulatiion. For rigid bodies its zero.
"""
self._physx_collision_api.GetRestOffsetAttr().Set(offset)
return
def get_rest_offset(self):
"""
Returns:
float: rest offset of the collision shape.
"""
return self._physx_collision_api.GetRestOffsetAttr().Get()
def set_torsional_patch_radius(self, radius):
"""
Args:
radius (float): radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
self._physx_collision_api.GetTorsionalPatchRadiusAttr().Set(radius)
return
def get_torsional_patch_radius(self):
"""
Returns:
float: radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
return self._physx_collision_api.GetTorsionalPatchRadiusAttr().Get()
def set_min_torsional_patch_radius(self, radius):
"""
Args:
radius (float): minimum radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
self._physx_collision_api.GetMinTorsionalPatchRadiusAttr().Set(radius)
return
def get_min_torsional_patch_radius(self):
"""
Returns:
float: minimum radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
return self._physx_collision_api.GetMinTorsionalPatchRadiusAttr().Get()
def set_collision_approximation(self, approximation_type):
"""
Args:
approximation_type (str): approximation used for collision.
Can be one of: {"none", "convexHull", "convexDecomposition", "meshSimplification", "sdf",
"boundingSphere", "boundingCube"}
If None, the approximation will use the underlying triangle mesh.
"""
assert self._mesh_collision_api is not None, "collision_approximation only applicable for meshes!"
assert_valid_key(
key=approximation_type,
valid_keys={"none", "convexHull", "convexDecomposition", "meshSimplification", "sdf", "boundingSphere", "boundingCube"},
name="collision approximation type",
)
# Make sure to add the appropriate API if we're setting certain values
if approximation_type == "convexHull" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxConvexHullCollisionAPI):
lazy.pxr.PhysxSchema.PhysxConvexHullCollisionAPI.Apply(self._prim)
elif approximation_type == "convexDecomposition" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxConvexDecompositionCollisionAPI):
lazy.pxr.PhysxSchema.PhysxConvexDecompositionCollisionAPI.Apply(self._prim)
elif approximation_type == "meshSimplification" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI):
lazy.pxr.PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI.Apply(self._prim)
elif approximation_type == "sdf" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxSDFMeshCollisionAPI):
lazy.pxr.PhysxSchema.PhysxSDFMeshCollisionAPI.Apply(self._prim)
elif approximation_type == "none" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxTriangleMeshCollisionAPI):
lazy.pxr.PhysxSchema.PhysxTriangleMeshCollisionAPI.Apply(self._prim)
if approximation_type == "convexHull":
pch_api = lazy.pxr.PhysxSchema.PhysxConvexHullCollisionAPI(self._prim)
# Also make sure the maximum vertex count is 60 (max number compatible with GPU)
# https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics/rigid-bodies.html#collision-settings
if pch_api.GetHullVertexLimitAttr().Get() is None:
pch_api.CreateHullVertexLimitAttr()
pch_api.GetHullVertexLimitAttr().Set(60)
self._mesh_collision_api.GetApproximationAttr().Set(approximation_type)
def get_collision_approximation(self):
"""
Returns:
str: approximation used for collision, could be "none", "convexHull" or "convexDecomposition"
"""
assert self._mesh_collision_api is not None, "collision_approximation only applicable for meshes!"
return self._mesh_collision_api.GetApproximationAttr().Get()
def apply_physics_material(self, physics_material, weaker_than_descendants=False):
"""
Used to apply physics material to the held prim and optionally its descendants.
Args:
physics_material (PhysicsMaterial): physics material to be applied to the held prim. This where you want to
define friction, restitution..etc. Note: if a physics material is not
defined, the defaults will be used from PhysX.
weaker_than_descendants (bool, optional): True if the material shouldn't override the descendants
materials, otherwise False. Defaults to False.
"""
if self._binding_api is None:
if self._prim.HasAPI(lazy.pxr.UsdShade.MaterialBindingAPI):
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI(self.prim)
else:
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI.Apply(self.prim)
if weaker_than_descendants:
self._binding_api.Bind(
physics_material.material,
bindingStrength=lazy.pxr.UsdShade.Tokens.weakerThanDescendants,
materialPurpose="physics",
)
else:
self._binding_api.Bind(
physics_material.material,
bindingStrength=lazy.pxr.UsdShade.Tokens.strongerThanDescendants,
materialPurpose="physics",
)
self._applied_physics_material = physics_material
return
def get_applied_physics_material(self):
"""
Returns the current applied physics material in case it was applied using apply_physics_material or not.
Returns:
PhysicsMaterial: the current applied physics material.
"""
if self._binding_api is None:
if self._prim.HasAPI(lazy.pxr.UsdShade.MaterialBindingAPI):
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI(self.prim)
else:
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI.Apply(self.prim)
if self._applied_physics_material is not None:
return self._applied_physics_material
else:
physics_binding = self._binding_api.GetDirectBinding(materialPurpose="physics")
path = physics_binding.GetMaterialPath()
if path == "":
return None
else:
self._applied_physics_material = lazy.omni.isaac.core.materials.PhysicsMaterial(prim_path=path)
return self._applied_physics_material
class VisualGeomPrim(GeomPrim):
pass
class CollisionVisualGeomPrim(CollisionGeomPrim, VisualGeomPrim):
def _post_load(self):
# run super first
super()._post_load()
# The purpose should be default, not guide as set by CollisionGeomPrim
# this is to make sure the geom is visualized, even though it's also collidable
self.purpose = "default"
| 17,061 | Python | 38.133027 | 148 | 0.624465 |
StanfordVL/OmniGibson/omnigibson/prims/rigid_prim.py | from functools import cached_property
from scipy.spatial import ConvexHull, QhullError
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.prims.geom_prim import CollisionGeomPrim, VisualGeomPrim
from omnigibson.utils.constants import GEOM_TYPES
from omnigibson.utils.sim_utils import CsRawData
from omnigibson.utils.usd_utils import PoseAPI, get_mesh_volume_and_com, check_extent_radius_ratio
import omnigibson.utils.transform_utils as T
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_CONTACT_OFFSET = 0.001
m.DEFAULT_REST_OFFSET = 0.0
class RigidPrim(XFormPrim):
"""
Provides high level functions to deal with a rigid body prim and its attributes/ properties.
If there is an prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created.
Notes: if the prim does not already have a rigid body api applied to it before it is loaded,
it will apply it.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. For this joint prim, the below values can be
specified:
scale (None or float or 3-array): If specified, sets the scale for this object. A single number corresponds
to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.
mass (None or float): If specified, mass of this body in kg
density (None or float): If specified, density of this body in kg / m^3
visual_only (None or bool): If specified, whether this prim should include collisions or not.
Default is True.
kinematic_only (None or bool): If specified, whether this prim should be kinematic-only or not.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._rigid_prim_view_direct = None
self._cs = None # Contact sensor interface
self._body_name = None
self._visual_only = None
self._collision_meshes = None
self._visual_meshes = None
# Caches for kinematic-only objects
# This exists because RigidPrimView uses USD pose read, which is very slow
self._kinematic_world_pose_cache = None
self._kinematic_local_pose_cache = None
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _post_load(self):
# Create the view
# Import now to avoid too-eager load of Omni classes due to inheritance
from omnigibson.utils.deprecated_utils import RigidPrimView
self._rigid_prim_view_direct = RigidPrimView(self._prim_path)
# Set it to be kinematic if necessary
kinematic_only = "kinematic_only" in self._load_config and self._load_config["kinematic_only"]
self.set_attribute("physics:kinematicEnabled", kinematic_only)
self.set_attribute("physics:rigidBodyEnabled", not kinematic_only)
# run super first
super()._post_load()
# Apply rigid body and mass APIs
if not self._prim.HasAPI(lazy.pxr.UsdPhysics.RigidBodyAPI):
lazy.pxr.UsdPhysics.RigidBodyAPI.Apply(self._prim)
if not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxRigidBodyAPI):
lazy.pxr.PhysxSchema.PhysxRigidBodyAPI.Apply(self._prim)
if not self._prim.HasAPI(lazy.pxr.UsdPhysics.MassAPI):
lazy.pxr.UsdPhysics.MassAPI.Apply(self._prim)
# Only create contact report api if we're not visual only
if not self._visual_only:
lazy.pxr.PhysxSchema.PhysxContactReportAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxContactReportAPI) else \
lazy.pxr.PhysxSchema.PhysxContactReportAPI.Apply(self._prim)
# Store references to owned visual / collision meshes
# We iterate over all children of this object's prim,
# and grab any that are presumed to be meshes
self.update_meshes()
# Possibly set the mass / density
if not self.has_collision_meshes:
# A meta (virtual) link has no collision meshes; set a negligible mass and a zero density (ignored)
self.mass = 1e-6
self.density = 0.0
elif "mass" in self._load_config and self._load_config["mass"] is not None:
self.mass = self._load_config["mass"]
if "density" in self._load_config and self._load_config["density"] is not None:
self.density = self._load_config["density"]
# Set the visual-only attribute
# This automatically handles setting collisions / gravity appropriately
self.visual_only = self._load_config["visual_only"] if \
"visual_only" in self._load_config and self._load_config["visual_only"] is not None else False
# Create contact sensor
self._cs = lazy.omni.isaac.sensor._sensor.acquire_contact_sensor_interface()
# self._create_contact_sensor()
def _initialize(self):
# Run super method first
super()._initialize()
# Initialize all owned meshes
for mesh_group in (self._collision_meshes, self._visual_meshes):
for mesh in mesh_group.values():
mesh.initialize()
# Get contact info first
if self.contact_reporting_enabled:
self._cs.get_rigid_body_raw_data(self._prim_path)
# Grab handle to this rigid body and get name
self.update_handles()
self._body_name = self.prim_path.split("/")[-1]
def remove(self):
# First remove the meshes
if self._collision_meshes is not None:
for collision_mesh in self._collision_meshes.values():
collision_mesh.remove()
# Make sure to clean up all pre-existing names for all visual_meshes
if self._visual_meshes is not None:
for visual_mesh in self._visual_meshes.values():
visual_mesh.remove()
# Then self
super().remove()
def update_meshes(self):
"""
Helper function to refresh owned visual and collision meshes. Useful for synchronizing internal data if
additional bodies are added manually
"""
# Make sure to clean up all pre-existing names for all collision_meshes
if self._collision_meshes is not None:
for collision_mesh in self._collision_meshes.values():
collision_mesh.remove_names()
# Make sure to clean up all pre-existing names for all visual_meshes
if self._visual_meshes is not None:
for visual_mesh in self._visual_meshes.values():
visual_mesh.remove_names()
self._collision_meshes, self._visual_meshes = dict(), dict()
prims_to_check = []
coms, vols = [], []
for prim in self._prim.GetChildren():
prims_to_check.append(prim)
for child in prim.GetChildren():
prims_to_check.append(child)
for prim in prims_to_check:
mesh_type = prim.GetPrimTypeInfo().GetTypeName()
if mesh_type in GEOM_TYPES:
mesh_name, mesh_path = prim.GetName(), prim.GetPrimPath().__str__()
mesh_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=mesh_path)
is_collision = mesh_prim.HasAPI(lazy.pxr.UsdPhysics.CollisionAPI)
mesh_kwargs = {"prim_path": mesh_path, "name": f"{self._name}:{'collision' if is_collision else 'visual'}_{mesh_name}"}
if is_collision:
mesh = CollisionGeomPrim(**mesh_kwargs)
# We also modify the collision mesh's contact and rest offsets, since omni's default values result
# in lightweight objects sometimes not triggering contacts correctly
mesh.set_contact_offset(m.DEFAULT_CONTACT_OFFSET)
mesh.set_rest_offset(m.DEFAULT_REST_OFFSET)
self._collision_meshes[mesh_name] = mesh
volume, com = get_mesh_volume_and_com(mesh_prim)
# We need to transform the volume and CoM from the mesh's local frame to the link's local frame
local_pos, local_orn = mesh.get_local_pose()
vols.append(volume * np.product(mesh.scale))
coms.append(T.quat2mat(local_orn) @ (com * mesh.scale) + local_pos)
# If the ratio between the max extent and min radius is too large (i.e. shape too oblong), use
# boundingCube approximation for the underlying collision approximation for GPU compatibility
if not check_extent_radius_ratio(mesh_prim):
log.warning(f"Got overly oblong collision mesh: {mesh.name}; use boundingCube approximation")
mesh.set_collision_approximation("boundingCube")
else:
self._visual_meshes[mesh_name] = VisualGeomPrim(**mesh_kwargs)
# If we have any collision meshes, we aggregate their center of mass and volume values to set the center of mass
# for this link
if len(coms) > 0:
com = (np.array(coms) * np.array(vols).reshape(-1, 1)).sum(axis=0) / np.sum(vols)
self.set_attribute("physics:centerOfMass", lazy.pxr.Gf.Vec3f(*com))
def enable_collisions(self):
"""
Enable collisions for this RigidPrim
"""
# Iterate through all owned collision meshes and toggle on their collisions
for col_mesh in self._collision_meshes.values():
col_mesh.collision_enabled = True
def disable_collisions(self):
"""
Disable collisions for this RigidPrim
"""
# Iterate through all owned collision meshes and toggle off their collisions
for col_mesh in self._collision_meshes.values():
col_mesh.collision_enabled = False
def update_handles(self):
"""
Updates all internal handles for this prim, in case they change since initialization
"""
# We only do this for non-kinematic objects, because while the USD APIs for kinematic-only
# and dynamic objects are the same, physx tensor APIs do NOT exist for kinematic-only
# objects, meaning initializing the view actively breaks the view.
if not self.kinematic_only:
self._rigid_prim_view_direct.initialize(og.sim.physics_sim_view)
def contact_list(self):
"""
Get list of all current contacts with this rigid body
Returns:
list of CsRawData: raw contact info for this rigid body
"""
# Make sure we have the ability to grab contacts for this object
contacts = []
if self.contact_reporting_enabled:
raw_data = self._cs.get_rigid_body_raw_data(self._prim_path)
for c in raw_data:
# convert handles to prim paths for comparison
c = [*c] # CsRawData enforces body0 and body1 types to be ints, but we want strings
c[2] = self._cs.decode_body_name(c[2])
c[3] = self._cs.decode_body_name(c[3])
contacts.append(CsRawData(*c))
return contacts
def set_linear_velocity(self, velocity):
"""
Sets the linear velocity of the prim in stage.
Args:
velocity (np.ndarray): linear velocity to set the rigid prim to. Shape (3,).
"""
self._rigid_prim_view.set_linear_velocities(velocity[None, :])
def get_linear_velocity(self):
"""
Returns:
np.ndarray: current linear velocity of the the rigid prim. Shape (3,).
"""
return self._rigid_prim_view.get_linear_velocities()[0]
def set_angular_velocity(self, velocity):
"""
Sets the angular velocity of the prim in stage.
Args:
velocity (np.ndarray): angular velocity to set the rigid prim to. Shape (3,).
"""
self._rigid_prim_view.set_angular_velocities(velocity[None, :])
def get_angular_velocity(self):
"""
Returns:
np.ndarray: current angular velocity of the the rigid prim. Shape (3,).
"""
return self._rigid_prim_view.get_angular_velocities()[0]
def set_position_orientation(self, position=None, orientation=None):
# Invalidate kinematic-only object pose caches when new pose is set
if self.kinematic_only:
self.clear_kinematic_only_cache()
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._rigid_prim_view.set_world_poses(positions=position, orientations=orientation)
PoseAPI.invalidate()
def get_position_orientation(self):
# Return cached pose if we're kinematic-only
if self.kinematic_only and self._kinematic_world_pose_cache is not None:
return self._kinematic_world_pose_cache
pos, ori = self._rigid_prim_view.get_world_poses()
assert np.isclose(np.linalg.norm(ori), 1, atol=1e-3), \
f"{self.prim_path} orientation {ori} is not a unit quaternion."
pos = pos[0]
ori = ori[0][[1, 2, 3, 0]]
if self.kinematic_only:
self._kinematic_world_pose_cache = (pos, ori)
return pos, ori
def set_local_pose(self, position=None, orientation=None):
# Invalidate kinematic-only object pose caches when new pose is set
if self.kinematic_only:
self.clear_kinematic_only_cache()
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._rigid_prim_view.set_local_poses(position, orientation)
PoseAPI.invalidate()
def get_local_pose(self):
# Return cached pose if we're kinematic-only
if self.kinematic_only and self._kinematic_local_pose_cache is not None:
return self._kinematic_local_pose_cache
positions, orientations = self._rigid_prim_view.get_local_poses()
positions = positions[0]
orientations = orientations[0][[1, 2, 3, 0]]
if self.kinematic_only:
self._kinematic_local_pose_cache = (positions, orientations)
return positions, orientations
@property
def _rigid_prim_view(self):
if self._rigid_prim_view_direct is None:
return None
# Validate that the if physics is running, the view is valid.
if not self.kinematic_only and og.sim.is_playing() and self.initialized:
assert self._rigid_prim_view_direct.is_physics_handle_valid() and \
self._rigid_prim_view_direct._physics_view.check(), \
"Rigid prim view must be valid if physics is running!"
assert not (og.sim.is_playing() and not self._rigid_prim_view_direct.is_valid), \
"Rigid prim view must be valid if physics is running!"
return self._rigid_prim_view_direct
@property
def body_name(self):
"""
Returns:
str: Name of this body
"""
return self._body_name
@property
def collision_meshes(self):
"""
Returns:
dict: Dictionary mapping collision mesh names (str) to mesh prims (CollisionMeshPrim) owned by
this rigid body
"""
return self._collision_meshes
@property
def visual_meshes(self):
"""
Returns:
dict: Dictionary mapping visual mesh names (str) to mesh prims (VisualMeshPrim) owned by
this rigid body
"""
return self._visual_meshes
@property
def visual_only(self):
"""
Returns:
bool: Whether this link is a visual-only link (i.e.: no gravity or collisions applied)
"""
return self._visual_only
@property
def has_collision_meshes(self):
"""
Returns:
bool: Whether this link has any collision mesh
"""
return len(self._collision_meshes) > 0
@visual_only.setter
def visual_only(self, val):
"""
Sets the visaul only state of this link
Args:
val (bool): Whether this link should be a visual-only link (i.e.: no gravity or collisions applied)
"""
# Set gravity and collisions based on value
if val:
self.disable_collisions()
self.disable_gravity()
else:
self.enable_collisions()
self.enable_gravity()
# Also set the internal value
self._visual_only = val
@property
def volume(self):
"""
Note: Currently it doesn't support Capsule type yet
Returns:
float: total volume of all the collision meshes of the rigid body in m^3.
"""
# TODO (eric): revise this once omni exposes API to query volume of GeomPrims
return sum(get_mesh_volume_and_com(collision_mesh.prim, world_frame=True)[0] for collision_mesh in self._collision_meshes.values())
@volume.setter
def volume(self, volume):
raise NotImplementedError("Cannot set volume directly for an link!")
@property
def mass(self):
"""
Returns:
float: mass of the rigid body in kg.
"""
mass = self._rigid_prim_view.get_masses()[0]
# Fallback to analytical computation of volume * density
if mass == 0:
return self.volume * self.density
return mass
@mass.setter
def mass(self, mass):
"""
Args:
mass (float): mass of the rigid body in kg.
"""
self._rigid_prim_view.set_masses([mass])
@property
def density(self):
"""
Returns:
float: density of the rigid body in kg / m^3.
"""
mass = self._rigid_prim_view.get_masses()[0]
# We first check if the mass is specified, since mass overrides density. If so, density = mass / volume.
# Otherwise, we try to directly grab the raw usd density value, and if that value does not exist,
# we return 1000 since that is the canonical density assigned by omniverse
if mass != 0.0:
density = mass / self.volume
else:
density = self._rigid_prim_view.get_densities()[0]
if density == 0.0:
density = 1000.0
return density
@density.setter
def density(self, density):
"""
Args:
density (float): density of the rigid body in kg / m^3.
"""
self._rigid_prim_view.set_densities([density])
@property
def kinematic_only(self):
"""
Returns:
bool: Whether this object is a kinematic-only object (otherwise, it is a rigid body). A kinematic-only
object is not subject to simulator dynamics, and remains fixed unless the user explicitly sets the
body's pose / velocities. See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics/rigid-bodies.html?highlight=rigid%20body%20enabled#kinematic-rigid-bodies
for more information
"""
return self.get_attribute("physics:kinematicEnabled")
@property
def solver_position_iteration_count(self):
"""
Returns:
int: How many position iterations to take per physics step by the physx solver
"""
return self.get_attribute("physxRigidBody:solverPositionIterationCount")
@solver_position_iteration_count.setter
def solver_position_iteration_count(self, count):
"""
Sets how many position iterations to take per physics step by the physx solver
Args:
count (int): How many position iterations to take per physics step by the physx solver
"""
self.set_attribute("physxRigidBody:solverPositionIterationCount", count)
@property
def solver_velocity_iteration_count(self):
"""
Returns:
int: How many velocity iterations to take per physics step by the physx solver
"""
return self.get_attribute("physxRigidBody:solverVelocityIterationCount")
@solver_velocity_iteration_count.setter
def solver_velocity_iteration_count(self, count):
"""
Sets how many velocity iterations to take per physics step by the physx solver
Args:
count (int): How many velocity iterations to take per physics step by the physx solver
"""
self.set_attribute("physxRigidBody:solverVelocityIterationCount", count)
@property
def stabilization_threshold(self):
"""
Returns:
float: threshold for stabilizing this rigid body
"""
return self.get_attribute("physxRigidBody:stabilizationThreshold")
@stabilization_threshold.setter
def stabilization_threshold(self, threshold):
"""
Sets threshold for stabilizing this rigid body
Args:
threshold (float): stabilizing threshold
"""
self.set_attribute("physxRigidBody:stabilizationThreshold", threshold)
@property
def is_asleep(self):
"""
Returns:
bool: whether this rigid prim is asleep or not
"""
# If we're kinematic only, immediately return False since it doesn't follow the sleep / wake paradigm
return False if self.kinematic_only \
else og.sim.psi.is_sleeping(og.sim.stage_id, lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path))
@property
def sleep_threshold(self):
"""
Returns:
float: threshold for sleeping this rigid body
"""
return self.get_attribute("physxRigidBody:sleepThreshold")
@sleep_threshold.setter
def sleep_threshold(self, threshold):
"""
Sets threshold for sleeping this rigid body
Args:
threshold (float): Sleeping threshold
"""
self.set_attribute("physxRigidBody:sleepThreshold", threshold)
@property
def ccd_enabled(self):
"""
Returns:
bool: whether CCD is enabled or not for this link
"""
return self.get_attribute("physxRigidBody:enableCCD")
@ccd_enabled.setter
def ccd_enabled(self, enabled):
"""
Args:
enabled (bool): whether CCD should be enabled or not for this link
"""
self.set_attribute("physxRigidBody:enableCCD", enabled)
@property
def contact_reporting_enabled(self):
"""
Returns:
bool: Whether contact reporting is enabled for this rigid prim or not
"""
return self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxContactReportAPI)
def _compute_points_on_convex_hull(self, visual):
"""
Returns:
np.ndarray or None: points on the convex hull of all points from child geom prims
"""
meshes = self._visual_meshes if visual else self._collision_meshes
points = []
for mesh in meshes.values():
mesh_points = mesh.points_in_parent_frame
if mesh_points is not None and len(mesh_points) > 0:
points.append(mesh_points)
if not points:
return None
points = np.concatenate(points, axis=0)
try:
hull = ConvexHull(points)
return points[hull.vertices, :]
except:
# Handle the case where a convex hull cannot be formed (e.g., collinear points)
# return all the points in this case
return points
@cached_property
def visual_boundary_points_local(self):
"""
Returns:
np.ndarray: local coords of points on the convex hull of all points from child geom prims
"""
return self._compute_points_on_convex_hull(visual=True)
@property
def visual_boundary_points_world(self):
"""
Returns:
np.ndarray: world coords of points on the convex hull of all points from child geom prims
"""
local_points = self.visual_boundary_points_local
if local_points is None:
return None
return self.transform_local_points_to_world(local_points)
@cached_property
def collision_boundary_points_local(self):
"""
Returns:
np.ndarray: local coords of points on the convex hull of all points from child geom prims
"""
return self._compute_points_on_convex_hull(visual=False)
@property
def collision_boundary_points_world(self):
"""
Returns:
np.ndarray: world coords of points on the convex hull of all points from child geom prims
"""
local_points = self.collision_boundary_points_local
if local_points is None:
return None
return self.transform_local_points_to_world(local_points)
@property
def aabb(self):
position, _ = self.get_position_orientation()
hull_points = self.collision_boundary_points_world
if hull_points is None:
# When there's no points on the collision meshes
return position, position
aabb_lo = np.min(hull_points, axis=0)
aabb_hi = np.max(hull_points, axis=0)
return aabb_lo, aabb_hi
@property
def aabb_extent(self):
"""
Get this xform's actual bounding box extent
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.aabb
return max_corner - min_corner
@property
def aabb_center(self):
"""
Get this xform's actual bounding box center
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.aabb
return (max_corner + min_corner) / 2.0
@property
def visual_aabb(self):
hull_points = self.visual_boundary_points_world
assert hull_points is not None, "No visual boundary points found for this rigid prim"
# Calculate and return the AABB
aabb_lo = np.min(hull_points, axis=0)
aabb_hi = np.max(hull_points, axis=0)
return aabb_lo, aabb_hi
@property
def visual_aabb_extent(self):
"""
Get this xform's actual bounding box extent
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.visual_aabb
return max_corner - min_corner
@property
def visual_aabb_center(self):
"""
Get this xform's actual bounding box center
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.visual_aabb
return (max_corner + min_corner) / 2.0
def enable_gravity(self):
"""
Enables gravity for this rigid body
"""
self._rigid_prim_view.enable_gravities()
def disable_gravity(self):
"""
Disables gravity for this rigid body
"""
self._rigid_prim_view.disable_gravities()
def wake(self):
"""
Enable physics for this rigid body
"""
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.wake_up(og.sim.stage_id, prim_id)
def sleep(self):
"""
Disable physics for this rigid body
"""
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.put_to_sleep(og.sim.stage_id, prim_id)
def clear_kinematic_only_cache(self):
"""
Clears the internal kinematic only cached pose. Useful if the parent prim's pose
changes without explicitly calling this prim's pose setter
"""
assert self.kinematic_only
self._kinematic_local_pose_cache = None
self._kinematic_world_pose_cache = None
def _dump_state(self):
# Grab pose from super class
state = super()._dump_state()
state["lin_vel"] = self.get_linear_velocity()
state["ang_vel"] = self.get_angular_velocity()
return state
def _load_state(self, state):
# Call super first
super()._load_state(state=state)
# Set velocities if not kinematic
self.set_linear_velocity(np.array(state["lin_vel"]))
self.set_angular_velocity(np.array(state["ang_vel"]))
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
return np.concatenate([
state_flat,
state["lin_vel"],
state["ang_vel"],
]).astype(float)
def _deserialize(self, state):
# Call supermethod first
state_dic, idx = super()._deserialize(state=state)
# We deserialize deterministically by knowing the order of values -- lin_vel, ang_vel
state_dic["lin_vel"] = state[idx: idx+3]
state_dic["ang_vel"] = state[idx + 3: idx + 6]
return state_dic, idx + 6
| 30,312 | Python | 36.562577 | 192 | 0.611342 |
StanfordVL/OmniGibson/omnigibson/prims/cloth_prim.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, gm
from omnigibson.prims.geom_prim import GeomPrim
from omnigibson.systems import get_system
import omnigibson.utils.transform_utils as T
from omnigibson.utils.sim_utils import CsRawData
from omnigibson.utils.usd_utils import array_to_vtarray, mesh_prim_to_trimesh_mesh, sample_mesh_keypoints
from omnigibson.utils.python_utils import classproperty
import omnigibson as og
import numpy as np
from collections.abc import Iterable
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Subsample cloth particle points to boost performance
m.N_CLOTH_KEYPOINTS = 1000
m.KEYPOINT_COVERAGE_THRESHOLD = 0.75
m.N_CLOTH_KEYFACES = 500
class ClothPrim(GeomPrim):
"""
Provides high level functions to deal with a cloth prim and its attributes/ properties.
If there is an prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created.
Notes: if the prim does not already have a cloth api applied to it before it is loaded,
it will apply it.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. For this joint prim, the below values can be
specified:
scale (None or float or 3-array): If specified, sets the scale for this object. A single number corresponds
to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.
mass (None or float): If specified, mass of this body in kg
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Internal vars stored
self._centroid_idx = None
self._keypoint_idx = None
self._keyface_idx = None
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _post_load(self):
# run super first
super()._post_load()
# Make sure flatcache is not being used -- if so, raise an error, since we lose most of our needed functionality
# (such as R/W to specific particle states) when flatcache is enabled
assert not gm.ENABLE_FLATCACHE, "Cannot use flatcache with ClothPrim!"
self._mass_api = lazy.pxr.UsdPhysics.MassAPI(self._prim) if self._prim.HasAPI(lazy.pxr.UsdPhysics.MassAPI) else \
lazy.pxr.UsdPhysics.MassAPI.Apply(self._prim)
# Possibly set the mass / density
if "mass" in self._load_config and self._load_config["mass"] is not None:
self.mass = self._load_config["mass"]
# Clothify this prim, which is assumed to be a mesh
ClothPrim.cloth_system.clothify_mesh_prim(mesh_prim=self._prim, remesh=self._load_config.get("remesh", True))
# Track generated particle count
positions = self.compute_particle_positions()
self._n_particles = len(positions)
# Sample mesh keypoints / keyvalues and sanity check the AABB of these subsampled points vs. the actual points
success = False
for i in range(10):
self._keypoint_idx, self._keyface_idx = sample_mesh_keypoints(
mesh_prim=self._prim,
n_keypoints=m.N_CLOTH_KEYPOINTS,
n_keyfaces=m.N_CLOTH_KEYFACES,
seed=i,
)
keypoint_positions = positions[self._keypoint_idx]
keypoint_aabb = keypoint_positions.min(axis=0), keypoint_positions.max(axis=0)
true_aabb = positions.min(axis=0), positions.max(axis=0)
overlap_vol = max(min(true_aabb[1][0], keypoint_aabb[1][0]) - max(true_aabb[0][0], keypoint_aabb[0][0]), 0) * \
max(min(true_aabb[1][1], keypoint_aabb[1][1]) - max(true_aabb[0][1], keypoint_aabb[0][1]), 0) * \
max(min(true_aabb[1][2], keypoint_aabb[1][2]) - max(true_aabb[0][2], keypoint_aabb[0][2]), 0)
true_vol = np.product(true_aabb[1] - true_aabb[0])
if true_vol == 0.0 or overlap_vol / true_vol > m.KEYPOINT_COVERAGE_THRESHOLD:
success = True
break
assert success, f"Did not adequately subsample keypoints for cloth {self.name}!"
# Compute centroid particle idx based on AABB
aabb_min, aabb_max = np.min(positions, axis=0), np.max(positions, axis=0)
aabb_center = (aabb_min + aabb_max) / 2.0
dists = np.linalg.norm(positions - aabb_center.reshape(1, 3), axis=-1)
self._centroid_idx = np.argmin(dists)
def _initialize(self):
super()._initialize()
# TODO (eric): hacky way to get cloth rendering to work (otherwise, there exist some rendering artifacts).
self._prim.CreateAttribute("primvars:isVolume", lazy.pxr.Sdf.ValueTypeNames.Bool, False).Set(True)
self._prim.GetAttribute("primvars:isVolume").Set(False)
# Store the default position of the points in the local frame
self._default_positions = np.array(self.get_attribute(attr="points"))
@property
def visual_aabb(self):
return self.aabb
@property
def visual_aabb_extent(self):
return self.aabb_extent
@property
def visual_aabb_center(self):
return self.aabb_center
@classproperty
def cloth_system(cls):
return get_system("cloth")
@property
def n_particles(self):
"""
Returns:
int: Number of particles owned by this cloth prim
"""
return self._n_particles
@property
def kinematic_only(self):
"""
Returns:
bool: Whether this object is a kinematic-only object. For ClothPrim, always return False.
"""
return False
def compute_particle_positions(self, idxs=None):
"""
Compute individual particle positions for this cloth prim
Args:
idxs (n-array or None): If set, will only calculate the requested indexed particle state
Returns:
np.array: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
t, r = self.get_position_orientation()
r = T.quat2mat(r)
s = self.scale
# Don't copy to save compute, since we won't be returning a reference to the underlying object anyways
p_local = np.array(self.get_attribute(attr="points"), copy=False)
p_local = p_local[idxs] if idxs is not None else p_local
p_world = (r @ (p_local * s).T).T + t
return p_world
def set_particle_positions(self, positions, idxs=None):
"""
Sets individual particle positions for this cloth prim
Args:
positions (n-array): (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
idxs (n-array or None): If set, will only set the requested indexed particle state
"""
n_expected = self._n_particles if idxs is None else len(idxs)
assert len(positions) == n_expected, \
f"Got mismatch in particle setting size: {len(positions)}, vs. number of expected particles {n_expected}!"
r = T.quat2mat(self.get_orientation())
t = self.get_position()
s = self.scale
p_local = (r.T @ (positions - t).T).T / s
# Fill the idxs if requested
if idxs is not None:
p_local_old = np.array(self.get_attribute(attr="points"))
p_local_old[idxs] = p_local
p_local = p_local_old
self.set_attribute(attr="points", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(p_local))
@property
def keypoint_idx(self):
"""
Returns:
n-array: (N,) array specifying the keypoint particle IDs
"""
return self._keypoint_idx
@property
def keyface_idx(self):
"""
Returns:
n-array: (N,) array specifying the keyface IDs
"""
return self._keyface_idx
@property
def faces(self):
"""
Grabs particle indexes defining each of the faces for this cloth prim
Returns:
np.array: (N, 3) numpy array, where each of the N faces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return np.array(self.get_attribute("faceVertexIndices")).reshape(-1, 3)
@property
def keyfaces(self):
"""
Grabs particle indexes defining each of the keyfaces for this cloth prim.
Total number of keyfaces is m.N_CLOTH_KEYFACES
Returns:
np.array: (N, 3) numpy array, where each of the N keyfaces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return self.faces[self._keyface_idx]
@property
def keypoint_particle_positions(self):
"""
Grabs individual keypoint particle positions for this cloth prim.
Total number of keypoints is m.N_CLOTH_KEYPOINTS
Returns:
np.array: (N, 3) numpy array, where each of the N keypoint particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=self._keypoint_idx)
@property
def centroid_particle_position(self):
"""
Grabs the individual particle that was pre-computed to be the closest to the centroid of this cloth prim.
Returns:
np.array: centroid particle's (x,y,z) cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=[self._centroid_idx])[0]
@property
def particle_velocities(self):
"""
Grabs individual particle velocities for this cloth prim
Returns:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# the velocities attribute is w.r.t the world frame already
return np.array(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
"""
Set the particle velocities of this cloth
Args:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame
"""
assert vel.shape[0] == self._n_particles, \
f"Got mismatch in particle setting size: {vel.shape[0]}, vs. number of particles {self._n_particles}!"
# the velocities attribute is w.r.t the world frame already
self.set_attribute(attr="velocities", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(vel))
def compute_face_normals(self, face_ids=None):
"""
Grabs individual face normals for this cloth prim
Args:
face_ids (None or n-array): If specified, list of face IDs whose corresponding normals should be computed
If None, all faces will be used
Returns:
np.array: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
faces = self.faces if face_ids is None else self.faces[face_ids]
points = self.compute_particle_positions(idxs=faces.flatten()).reshape(-1, 3, 3)
return self.compute_face_normals_from_particle_positions(positions=points)
def compute_face_normals_from_particle_positions(self, positions):
"""
Grabs individual face normals for this cloth prim
Args:
positions (n-array): (N, 3, 3) array specifying the per-face particle positions
Returns:
np.array: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# Shape [F, 3]
v1 = positions[:, 2, :] - positions[:, 0, :]
v2 = positions[:, 1, :] - positions[:, 0, :]
normals = np.cross(v1, v2)
return normals / np.linalg.norm(normals, axis=1).reshape(-1, 1)
def contact_list(self, keypoints_only=True):
"""
Get list of all current contacts with this cloth body
Args:
keypoints_only (bool): If True, will only check contact with this cloth's keypoints
Returns:
list of CsRawData: raw contact info for this cloth body
"""
contacts = []
def report_hit(hit):
contacts.append(CsRawData(
time=0.0, # dummy value
dt=0.0, # dummy value
body0=self.prim_path,
body1=hit.rigid_body,
position=pos,
normal=np.zeros(3), # dummy value
impulse=np.zeros(3), # dummy value
))
return True
positions = self.keypoint_particle_positions if keypoints_only else self.compute_particle_positions()
for pos in positions:
og.sim.psqi.overlap_sphere(ClothPrim.cloth_system.particle_contact_offset, pos, report_hit, False)
return contacts
def update_handles(self):
# no handles to update
pass
@property
def volume(self):
mesh = mesh_prim_to_trimesh_mesh(self.prim, include_normals=False, include_texcoord=False, world_frame=True)
return mesh.volume if mesh.is_volume else mesh.convex_hull.volume
@volume.setter
def volume(self, volume):
raise NotImplementedError("Cannot set volume directly for a link!")
@property
def mass(self):
"""
Returns:
float: mass of the rigid body in kg.
"""
# We have to read the mass directly in the cloth prim
return self._mass_api.GetMassAttr().Get()
@mass.setter
def mass(self, mass):
"""
Args:
mass (float): mass of the rigid body in kg.
"""
# We have to set the mass directly in the cloth prim
self._mass_api.GetMassAttr().Set(mass)
@property
def density(self):
raise NotImplementedError("Cannot get density for ClothPrim")
@density.setter
def density(self, density):
raise NotImplementedError("Cannot set density for ClothPrim")
@property
def body_name(self):
"""
Returns:
str: Name of this body
"""
return self.prim_path.split("/")[-1]
def get_linear_velocity(self):
"""
Returns:
np.ndarray: current average linear velocity of the particles of the cloth prim. Shape (3,).
"""
return np.array(self._prim.GetAttribute("velocities").Get()).mean(axis=0)
def get_angular_velocity(self):
"""
Returns:
np.ndarray: zero vector as a placeholder because a cloth prim doesn't have an angular velocity. Shape (3,).
"""
return np.zeros(3)
def set_linear_velocity(self, velocity):
"""
Sets the linear velocity of all the particles of the cloth prim.
Args:
velocity (np.ndarray): linear velocity to set all the particles of the cloth prim to. Shape (3,).
"""
vel = self.particle_velocities
vel[:] = velocity
self.particle_velocities = vel
def set_angular_velocity(self, velocity):
"""
Simply returns because a cloth prim doesn't have an angular velocity
Args:
velocity (np.ndarray): linear velocity to set all the particles of the cloth prim to. Shape (3,).
"""
return
def wake(self):
# TODO (eric): Just a pass through for now.
return
@property
def bend_stiffness(self):
"""
Returns:
float: spring bend stiffness of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springBendStiffness")
@bend_stiffness.setter
def bend_stiffness(self, bend_stiffness):
"""
Args:
bend_stiffness (float): spring bend stiffness of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springBendStiffness", bend_stiffness)
@property
def damping(self):
"""
Returns:
float: spring damping of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springDamping")
@damping.setter
def damping(self, damping):
"""
Args:
damping (float): spring damping of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springDamping", damping)
@property
def shear_stiffness(self):
"""
Returns:
float: spring shear_stiffness of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springShearStiffness")
@shear_stiffness.setter
def shear_stiffness(self, shear_stiffness):
"""
Args:
shear_stiffness (float): spring shear_stiffness of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springShearStiffness", shear_stiffness)
@property
def stretch_stiffness(self):
"""
Returns:
float: spring stretch_stiffness of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springStretchStiffness")
@stretch_stiffness.setter
def stretch_stiffness(self, stretch_stiffness):
"""
Args:
stretch_stiffness (float): spring stretch_stiffness of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springStretchStiffness", stretch_stiffness)
@property
def particle_group(self):
"""
Returns:
int: Particle group this instancer belongs to
"""
return self.get_attribute(attr="physxParticle:particleGroup")
@particle_group.setter
def particle_group(self, group):
"""
Args:
group (int): Particle group this instancer belongs to
"""
self.set_attribute(attr="physxParticle:particleGroup", val=group)
def _dump_state(self):
# Run super first
state = super()._dump_state()
state["particle_group"] = self.particle_group
state["n_particles"] = self.n_particles
state["particle_positions"] = self.compute_particle_positions()
state["particle_velocities"] = self.particle_velocities
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# Sanity check the identification number and particle group
assert self.particle_group == state["particle_group"], f"Got mismatch in particle group for this cloth " \
f"when loading state! Should be: {self.particle_group}, got: {state['particle_group']}."
# Set values appropriately
self._n_particles = state["n_particles"]
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
self.particle_velocities = np.array(state["particle_velocities"]) if not isinstance(state["particle_velocities"], np.ndarray) else state["particle_velocities"]
self.set_particle_positions(positions=np.array(state["particle_positions"]) if not isinstance(state["particle_positions"], np.ndarray) else state["particle_positions"])
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
return np.concatenate([
state_flat,
[state["particle_group"], state["n_particles"]],
state["particle_positions"].reshape(-1),
state["particle_velocities"].reshape(-1),
]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
particle_group = int(state[idx])
n_particles = int(state[idx + 1])
# Sanity check the identification number
assert self.particle_group == particle_group, f"Got mismatch in particle group for this particle " \
f"instancer when deserializing state! Should be: {self.particle_group}, got: {particle_group}."
# De-compress from 1D array
state_dict["particle_group"] = particle_group
state_dict["n_particles"] = n_particles
# Process remaining keys and reshape automatically
keys = ("particle_positions", "particle_velocities")
sizes = ((n_particles, 3), (n_particles, 3))
idx += 2
for key, size in zip(keys, sizes):
length = np.product(size)
state_dict[key] = state[idx: idx + length].reshape(size)
idx += length
return state_dict, idx
def reset(self):
"""
Reset the points to their default positions in the local frame, and also zeroes out velocities
"""
if self.initialized:
self.set_attribute(attr="points", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(self._default_positions))
self.particle_velocities = np.zeros((self._n_particles, 3))
| 22,311 | Python | 36.311037 | 176 | 0.622653 |
StanfordVL/OmniGibson/omnigibson/prims/xform_prim.py | from collections.abc import Iterable
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.lazy as lazy
from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.material_prim import MaterialPrim
from omnigibson.utils.transform_utils import quat2euler
from omnigibson.utils.usd_utils import PoseAPI
import omnigibson.utils.transform_utils as T
from scipy.spatial.transform import Rotation as R
from omnigibson.macros import gm
import trimesh.transformations
class XFormPrim(BasePrim):
"""
Provides high level functions to deal with an Xform prim and its attributes/ properties.
If there is an Xform prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created when self.load(...) is called.
Note: the prim will have "xformOp:orient", "xformOp:translate" and "xformOp:scale" only post init,
unless it is a non-root articulation link.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. For this xform prim, the below values can be specified:
scale (None or float or 3-array): If specified, sets the scale for this object. A single number corresponds
to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._binding_api = None
self._material = None
self._collision_filter_api = None
self.original_scale = None
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
return og.sim.stage.DefinePrim(self._prim_path, "Xform")
def _post_load(self):
# run super first
super()._post_load()
# Make sure all xforms have pose and scaling info
self._set_xform_properties()
# Cache the original scale from the USD so that when EntityPrim sets the scale for each link (Rigid/ClothPrim),
# the new scale is with respect to the original scale. XFormPrim's scale always matches the scale in the USD.
self.original_scale = np.array(self.get_attribute("xformOp:scale"))
# Create collision filter API
self._collision_filter_api = lazy.pxr.UsdPhysics.FilteredPairsAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.UsdPhysics.FilteredPairsAPI) else lazy.pxr.UsdPhysics.FilteredPairsAPI.Apply(self._prim)
# Create binding API
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI(self.prim) if \
self._prim.HasAPI(lazy.pxr.UsdShade.MaterialBindingAPI) else lazy.pxr.UsdShade.MaterialBindingAPI.Apply(self.prim)
# Grab the attached material if it exists
if self.has_material():
material_prim_path = self._binding_api.GetDirectBinding().GetMaterialPath().pathString
material_name = f"{self.name}:material"
material = MaterialPrim.get_material(prim_path=material_prim_path, name=material_name)
assert material.loaded, f"Material prim path {material_prim_path} doesn't exist on stage."
material.add_user(self)
self._material = material
# Optionally set the scale and visibility
if "scale" in self._load_config and self._load_config["scale"] is not None:
self.scale = self._load_config["scale"]
def remove(self):
# Remove the material prim if one exists
if self._material is not None:
self._material.remove_user(self)
# Remove the prim
super().remove()
def _set_xform_properties(self):
current_position, current_orientation = self.get_position_orientation()
properties_to_remove = [
"xformOp:rotateX",
"xformOp:rotateXZY",
"xformOp:rotateY",
"xformOp:rotateYXZ",
"xformOp:rotateYZX",
"xformOp:rotateZ",
"xformOp:rotateZYX",
"xformOp:rotateZXY",
"xformOp:rotateXYZ",
"xformOp:transform",
]
prop_names = self.prim.GetPropertyNames()
xformable = lazy.pxr.UsdGeom.Xformable(self.prim)
xformable.ClearXformOpOrder()
# TODO: wont be able to delete props for non root links on articulated objects
for prop_name in prop_names:
if prop_name in properties_to_remove:
self.prim.RemoveProperty(prop_name)
if "xformOp:scale" not in prop_names:
xform_op_scale = xformable.AddXformOp(lazy.pxr.UsdGeom.XformOp.TypeScale, lazy.pxr.UsdGeom.XformOp.PrecisionDouble, "")
xform_op_scale.Set(lazy.pxr.Gf.Vec3d([1.0, 1.0, 1.0]))
else:
xform_op_scale = lazy.pxr.UsdGeom.XformOp(self._prim.GetAttribute("xformOp:scale"))
if "xformOp:translate" not in prop_names:
xform_op_translate = xformable.AddXformOp(
lazy.pxr.UsdGeom.XformOp.TypeTranslate, lazy.pxr.UsdGeom.XformOp.PrecisionDouble, ""
)
else:
xform_op_translate = lazy.pxr.UsdGeom.XformOp(self._prim.GetAttribute("xformOp:translate"))
if "xformOp:orient" not in prop_names:
xform_op_rot = xformable.AddXformOp(lazy.pxr.UsdGeom.XformOp.TypeOrient, lazy.pxr.UsdGeom.XformOp.PrecisionDouble, "")
else:
xform_op_rot = lazy.pxr.UsdGeom.XformOp(self._prim.GetAttribute("xformOp:orient"))
xformable.SetXformOpOrder([xform_op_translate, xform_op_rot, xform_op_scale])
self.set_position_orientation(position=current_position, orientation=current_orientation)
new_position, new_orientation = self.get_position_orientation()
r1 = R.from_quat(current_orientation).as_matrix()
r2 = R.from_quat(new_orientation).as_matrix()
# Make sure setting is done correctly
assert np.allclose(new_position, current_position, atol=1e-4) and np.allclose(r1, r2, atol=1e-4), \
f"{self.prim_path}: old_pos: {current_position}, new_pos: {new_position}, " \
f"old_orn: {current_orientation}, new_orn: {new_orientation}"
def has_material(self):
"""
Returns:
bool: True if there is a visual material bound to this prim. False otherwise
"""
material_path = self._binding_api.GetDirectBinding().GetMaterialPath().pathString
return False if material_path == "" else True
def set_position_orientation(self, position=None, orientation=None):
"""
Sets prim's pose with respect to the world frame
Args:
position (None or 3-array): if specified, (x,y,z) position in the world frame
Default is None, which means left unchanged.
orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the world frame.
Default is None, which means left unchanged.
"""
current_position, current_orientation = self.get_position_orientation()
position = current_position if position is None else np.array(position, dtype=float)
orientation = current_orientation if orientation is None else np.array(orientation, dtype=float)
assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
my_world_transform = T.pose2mat((position, orientation))
parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(self._prim)
parent_path = str(parent_prim.GetPath())
parent_world_transform = PoseAPI.get_world_pose_with_scale(parent_path)
local_transform = np.linalg.inv(parent_world_transform) @ my_world_transform
self.set_local_pose(*T.mat2pose(local_transform))
def get_position_orientation(self):
"""
Gets prim's pose with respect to the world's frame.
Returns:
2-tuple:
- 3-array: (x,y,z) position in the world frame
- 4-array: (x,y,z,w) quaternion orientation in the world frame
"""
return PoseAPI.get_world_pose(self._prim_path)
def set_position(self, position):
"""
Set this prim's position with respect to the world frame
Args:
position (3-array): (x,y,z) global cartesian position to set
"""
self.set_position_orientation(position=position)
def get_position(self):
"""
Get this prim's position with respect to the world frame
Returns:
3-array: (x,y,z) global cartesian position of this prim
"""
return self.get_position_orientation()[0]
def set_orientation(self, orientation):
"""
Set this prim's orientation with respect to the world frame
Args:
orientation (4-array): (x,y,z,w) global quaternion orientation to set
"""
self.set_position_orientation(orientation=orientation)
def get_orientation(self):
"""
Get this prim's orientation with respect to the world frame
Returns:
4-array: (x,y,z,w) global quaternion orientation of this prim
"""
return self.get_position_orientation()[1]
def get_rpy(self):
"""
Get this prim's orientation with respect to the world frame
Returns:
3-array: (roll, pitch, yaw) global euler orientation of this prim
"""
return quat2euler(self.get_orientation())
def get_2d_orientation(self):
"""
Get this prim's orientation on the XY plane of the world frame. This is obtained by
projecting the forward vector onto the XY plane and then computing the angle.
"""
fwd = R.from_quat(self.get_orientation()).apply([1, 0, 0])
fwd[2] = 0.
# If the object is facing close to straight up, then we can't compute a 2D orientation
# in that case, we return zero.
if np.linalg.norm(fwd) < 1e-4:
return 0.
fwd /= np.linalg.norm(fwd)
return np.arctan2(fwd[1], fwd[0])
def get_local_pose(self):
"""
Gets prim's pose with respect to the prim's local frame (its parent frame)
Returns:
2-tuple:
- 3-array: (x,y,z) position in the local frame
- 4-array: (x,y,z,w) quaternion orientation in the local frame
"""
pos, ori = lazy.omni.isaac.core.utils.xforms.get_local_pose(self.prim_path)
return pos, ori[[1, 2, 3, 0]]
def set_local_pose(self, position=None, orientation=None):
"""
Sets prim's pose with respect to the local frame (the prim's parent frame).
Args:
position (None or 3-array): if specified, (x,y,z) position in the local frame of the prim
(with respect to its parent prim). Default is None, which means left unchanged.
orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the local frame of the prim
(with respect to its parent prim). Default is None, which means left unchanged.
"""
properties = self.prim.GetPropertyNames()
if position is not None:
position = lazy.pxr.Gf.Vec3d(*np.array(position, dtype=float))
if "xformOp:translate" not in properties:
lazy.carb.log_error(
"Translate property needs to be set for {} before setting its position".format(self.name)
)
self.set_attribute("xformOp:translate", position)
if orientation is not None:
orientation = np.array(orientation, dtype=float)[[3, 0, 1, 2]]
if "xformOp:orient" not in properties:
lazy.carb.log_error(
"Orient property needs to be set for {} before setting its orientation".format(self.name)
)
xform_op = self._prim.GetAttribute("xformOp:orient")
if xform_op.GetTypeName() == "quatf":
rotq = lazy.pxr.Gf.Quatf(*orientation)
else:
rotq = lazy.pxr.Gf.Quatd(*orientation)
xform_op.Set(rotq)
PoseAPI.invalidate()
if gm.ENABLE_FLATCACHE:
# If flatcache is on, make sure the USD local pose is synced to the fabric local pose.
# Ideally we should call usdrt's set local pose directly, but there is no such API.
# The only available API is SetLocalXformFromUsd, so we update USD first, and then sync to fabric.
xformable_prim = lazy.usdrt.Rt.Xformable(lazy.omni.isaac.core.utils.prims.get_prim_at_path(self.prim_path, fabric=True))
assert not xformable_prim.HasWorldXform(), "Fabric's world pose is set for a non-rigid prim which is unexpected. Please report this."
xformable_prim.SetLocalXformFromUsd()
return
def get_world_scale(self):
"""
Gets prim's scale with respect to the world's frame.
Returns:
np.ndarray: scale applied to the prim's dimensions in the world frame. shape is (3, ).
"""
prim_tf = lazy.pxr.UsdGeom.Xformable(self._prim).ComputeLocalToWorldTransform(lazy.pxr.Usd.TimeCode.Default())
transform = lazy.pxr.Gf.Transform()
transform.SetMatrix(prim_tf)
return np.array(transform.GetScale())
@property
def scaled_transform(self):
"""
Returns the scaled transform of this prim.
"""
return PoseAPI.get_world_pose_with_scale(self._prim_path)
def transform_local_points_to_world(self, points):
return trimesh.transformations.transform_points(points, self.scaled_transform)
@property
def scale(self):
"""
Gets prim's scale with respect to the local frame (the parent's frame).
Returns:
np.ndarray: scale applied to the prim's dimensions in the local frame. shape is (3, ).
"""
return np.array(self.get_attribute("xformOp:scale"))
@scale.setter
def scale(self, scale):
"""
Sets prim's scale with respect to the local frame (the prim's parent frame).
Args:
scale (float or np.ndarray): scale to be applied to the prim's dimensions. shape is (3, ).
Defaults to None, which means left unchanged.
"""
scale = np.array(scale, dtype=float) if isinstance(scale, Iterable) else np.ones(3) * scale
scale = lazy.pxr.Gf.Vec3d(*scale)
properties = self.prim.GetPropertyNames()
if "xformOp:scale" not in properties:
lazy.carb.log_error("Scale property needs to be set for {} before setting its scale".format(self.name))
self.set_attribute("xformOp:scale", scale)
@property
def material(self):
"""
Returns:
None or MaterialPrim: The bound material to this prim, if there is one
"""
return self._material
@material.setter
def material(self, material):
"""
Set the material @material for this prim. This will also bind the material to this prim
Args:
material (MaterialPrim): Material to bind to this prim
"""
self._binding_api.Bind(lazy.pxr.UsdShade.Material(material.prim), bindingStrength=lazy.pxr.UsdShade.Tokens.weakerThanDescendants)
self._material = material
def add_filtered_collision_pair(self, prim):
"""
Adds a collision filter pair with another prim
Args:
prim (XFormPrim): Another prim to filter collisions with
"""
# Add to both this prim's and the other prim's filtered pair
self._collision_filter_api.GetFilteredPairsRel().AddTarget(prim.prim_path)
prim._collision_filter_api.GetFilteredPairsRel().AddTarget(self._prim_path)
def remove_filtered_collision_pair(self, prim):
"""
Removes a collision filter pair with another prim
Args:
prim (XFormPrim): Another prim to remove filter collisions with
"""
# Add to both this prim's and the other prim's filtered pair
self._collision_filter_api.GetFilteredPairsRel().RemoveTarget(prim.prim_path)
prim._collision_filter_api.GetFilteredPairsRel().RemoveTarget(self._prim_path)
def _dump_state(self):
pos, ori = self.get_position_orientation()
return dict(pos=pos, ori=ori)
def _load_state(self, state):
self.set_position_orientation(np.array(state["pos"]), np.array(state["ori"]))
def _serialize(self, state):
return np.concatenate([state["pos"], state["ori"]]).astype(float)
def _deserialize(self, state):
# We deserialize deterministically by knowing the order of values -- pos, ori
return dict(pos=state[0:3], ori=state[3:7]), 7
| 17,280 | Python | 41.669136 | 145 | 0.631771 |
StanfordVL/OmniGibson/omnigibson/prims/joint_prim.py | from collections.abc import Iterable
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros
from omnigibson.prims.prim_base import BasePrim
from omnigibson.utils.usd_utils import PoseAPI, create_joint
from omnigibson.utils.constants import JointType, JointAxis
from omnigibson.utils.python_utils import assert_valid_key
import omnigibson.utils.transform_utils as T
from omnigibson.controllers.controller_base import ControlType
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_MAX_POS = 1000.0
m.DEFAULT_MAX_PRISMATIC_VEL = 1.0
m.DEFAULT_MAX_REVOLUTE_VEL = 15.0
m.DEFAULT_MAX_EFFORT = 100.0
m.INF_POS_THRESHOLD = 1e5
m.INF_VEL_THRESHOLD = 1e5
m.INF_EFFORT_THRESHOLD = 1e10
m.COMPONENT_SUFFIXES = ["x", "y", "z", "rx", "ry", "rz"]
# TODO: Split into non-articulated / articulated Joint Prim classes?
# TODO: Add logic for non Prismatic / Revolute joints (D6, spherical)
class JointPrim(BasePrim):
"""
Provides high level functions to deal with a joint prim and its attributes/ properties.
If there is an joint prim present at the path, it will use it. Otherwise, a new joint prim at
the specified prim path will be created when self.load(...) is called.
Note: the prim will have "xformOp:orient", "xformOp:translate" and "xformOp:scale" only post init,
unless it is a non-root articulation link.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. For this joint prim, the below values can be specified:
joint_type (str): If specified, should be the joint type to create. Valid options are:
{"Joint", "FixedJoint", "PrismaticJoint", "RevoluteJoint", "SphericalJoint"}
(equivalently, one of JointType)
body0 (None or str): If specified, should be the absolute prim path to the parent body that this joint
is connected to. None can also be valid, which corresponds to cases where only a single body may be
specified (e.g.: fixed joints)
body1 (None or str): If specified, should be the absolute prim path to the child body that this joint
is connected to. None can also be valid, which corresponds to cases where only a single body may be
specified (e.g.: fixed joints)
articulation (None or int): if specified, should be handle to pre-existing articulation. This will enable
additional features for this joint prim, e.g.: polling / setting this joint's state. Note that in this
case, the joint must already exist prior to this class instance. Default is None,
which corresponds to a non-articulated joint.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
articulation_view=None,
):
# Grab dynamic control reference and set properties
self._articulation_view_direct = articulation_view
# Other values that will be filled in at runtime
self._joint_type = None
self._control_type = None
self._driven = None
# The following values will only be valid if this joint is part of an articulation
self._n_dof = None # The number of degrees of freedom this joint provides
self._joint_idx = None # The index of this joint in the parent articulation's joint array
self._joint_dof_offset = None # The starting index of the DOFs for this joint in the parent articulation's DOF array
self._joint_name = None # The name of this joint in the parent's articulation tree
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# Make sure this joint isn't articulated
assert not self.articulated, "Joint cannot be created, since this is an articulated joint! We are assuming" \
"the joint already exists in the stage."
# Define a joint prim at the current stage
prim = create_joint(
prim_path=self._prim_path,
joint_type=self._load_config.get("joint_type", JointType.JOINT),
)
return prim
def _post_load(self):
# run super first
super()._post_load()
# Check whether this joint is driven or not
self._driven = self._prim.HasAPI(lazy.pxr.UsdPhysics.DriveAPI)
# Add joint state API if this is a revolute or prismatic joint
self._joint_type = JointType.get_type(self._prim.GetTypeName().split("Physics")[-1])
if self.is_single_dof:
# We MUST already have the joint state API defined beforehand in the USD
# This is because physx complains if we try to add physx APIs AFTER a simulation step occurs, which
# happens because joint prims are usually created externally during an EntityPrim's initialization phase
assert self._prim.HasAPI(lazy.pxr.PhysxSchema.JointStateAPI), \
"Revolute or Prismatic joints must already have JointStateAPI added!"
# Possibly set the bodies
if "body0" in self._load_config and self._load_config["body0"] is not None:
self.body0 = self._load_config["body0"]
if "body1" in self._load_config and self._load_config["body1"] is not None:
self.body1 = self._load_config["body1"]
def _initialize(self):
# Always run super first
super()._initialize()
# Update the joint indices etc.
self.update_handles()
# Get control type
if self.articulated:
control_types = []
stiffnesses, dampings = self._articulation_view.get_gains(joint_indices=self.dof_indices)
for i, (kp, kd) in enumerate(zip(stiffnesses[0], dampings[0])):
# Infer control type based on whether kp and kd are 0 or not, as well as whether this joint is driven or not
# TODO: Maybe assert mutual exclusiveness here?
if not self._driven:
control_type = ControlType.NONE
elif kp == 0.0:
control_type = ControlType.EFFORT if kd == 0.0 else ControlType.VELOCITY
else:
control_type = ControlType.POSITION
control_types.append(control_type)
# Make sure all the control types are the same -- if not, we had something go wrong!
assert len(set(control_types)) == 1, f"Got multiple control types for this single joint: {control_types}"
self._control_type = control_types[0]
def update_handles(self):
"""
Updates all internal handles for this prim, in case they change since initialization
"""
# It's a bit tricky to get the joint index here. We need to find the first dof at this prim path
# first, then get the corresponding joint index from that dof offset.
self._joint_dof_offset = list(self._articulation_view._dof_paths[0]).index(self._prim_path)
joint_dof_offsets = self._articulation_view._metadata.joint_dof_offsets
# Note that we are finding the last occurrence of the dof offset, since that corresponds to the joint index
# The first occurrence can be a fixed link that is 0-dof, meaning the offset will be repeated.
self._joint_idx = next(i for i in reversed(range(len(joint_dof_offsets))) if joint_dof_offsets[i] == self._joint_dof_offset)
self._joint_name = self._articulation_view._metadata.joint_names[self._joint_idx]
self._n_dof = self._articulation_view._metadata.joint_dof_counts[self._joint_idx]
def set_control_type(self, control_type, kp=None, kd=None):
"""
Sets the control type for this joint.
Args:
control_type (ControlType): What type of control to use for this joint.
Valid options are: {ControlType.POSITION, ControlType.VELOCITY, ControlType.EFFORT}
kp (None or float): If specified, sets the kp gain value for this joint. Should only be set if
setting ControlType.POSITION
kd (None or float): If specified, sets the kd gain value for this joint. Should only be set if
setting ControlType.VELOCITY
"""
# Sanity check inputs
assert_valid_key(key=control_type, valid_keys=ControlType.VALID_TYPES, name="control type")
if control_type == ControlType.POSITION:
assert kp is not None, "kp gain must be specified for setting POSITION control!"
assert kd is None, "kd gain must not be specified for setting POSITION control!"
kd = 0.0
elif control_type == ControlType.VELOCITY:
assert kp is None, "kp gain must not be specified for setting VELOCITY control!"
assert kd is not None, "kd gain must be specified for setting VELOCITY control!"
kp = 0.0
else: # Efforts
assert kp is None, "kp gain must not be specified for setting EFFORT control!"
assert kd is None, "kd gain must not be specified for setting EFFORT control!"
kp, kd = 0.0, 0.0
# Set values
kps = np.full((1, self._n_dof), kp)
kds = np.full((1, self._n_dof), kd)
self._articulation_view.set_gains(kps=kps, kds=kds, joint_indices=self.dof_indices)
# Update control type
self._control_type = control_type
@property
def _articulation_view(self):
if self._articulation_view_direct is None:
return None
# Validate that the articulation view is initialized and that if physics is running, the
# view is valid.
if og.sim.is_playing() and self.initialized:
assert self._articulation_view_direct.is_physics_handle_valid() and \
self._articulation_view_direct._physics_view.check(), \
"Articulation view must be valid if physics is running!"
return self._articulation_view_direct
@property
def body0(self):
"""
Gets this joint's body0 relationship.
Returns:
None or str: Absolute prim path to the body prim to set as this joint's parent link, or None if there is
no body0 specified.
"""
targets = self._prim.GetRelationship("physics:body0").GetTargets()
return targets[0].__str__() if len(targets) > 0 else None
@body0.setter
def body0(self, body0):
"""
Sets this joint's body0 relationship.
Args:
body0 (str): Absolute prim path to the body prim to set as this joint's parent link.
"""
# Make sure prim path is valid
assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body0), f"Invalid body0 path specified: {body0}"
self._prim.GetRelationship("physics:body0").SetTargets([lazy.pxr.Sdf.Path(body0)])
@property
def body1(self):
"""
Gets this joint's body1 relationship.
Returns:
None or str: Absolute prim path to the body prim to set as this joint's child link, or None if there is
no body1 specified.
"""
targets = self._prim.GetRelationship("physics:body1").GetTargets()
return targets[0].__str__()
@body1.setter
def body1(self, body1):
"""
Sets this joint's body1 relationship.
Args:
body1 (str): Absolute prim path to the body prim to set as this joint's child link.
"""
# Make sure prim path is valid
assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body1), f"Invalid body1 path specified: {body1}"
self._prim.GetRelationship("physics:body1").SetTargets([lazy.pxr.Sdf.Path(body1)])
@property
def local_orientation(self):
"""
Returns:
4-array: (x,y,z,w) local quaternion orientation of this joint, relative to the parent link
"""
# Grab local rotation to parent and child links
quat0 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(self.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
quat1 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(self.get_attribute("physics:localRot1"))[[1, 2, 3, 0]]
# Invert the child link relationship, and multiply the two rotations together to get the final rotation
return T.quat_multiply(quaternion1=T.quat_inverse(quat1), quaternion0=quat0)
@property
def joint_name(self):
"""
Returns:
str: Name of this joint
"""
return self._joint_name
@property
def joint_type(self):
"""
Gets this joint's type (ignoring the "Physics" prefix)
Returns:
JointType: Joint's type. Should be one corresponding to:
{JOINT_PRISMATIC, JOINT_REVOLUTE, JOINT_FIXED, JOINT_SPHERICAL}
"""
return self._joint_type
@property
def driven(self):
"""
Returns:
bool: Whether this joint can be driven by a motor or not
"""
return self._driven
@property
def control_type(self):
"""
Gets the control types for this joint
Returns:
ControlType: control type for this joint
"""
return self._control_type
@property
def max_velocity(self):
"""
Gets this joint's maximum velocity
Returns:
float: maximum velocity for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_vel = self._articulation_view.get_max_velocities(joint_indices=self.dof_indices)[0][0]
default_max_vel = m.DEFAULT_MAX_REVOLUTE_VEL if self.joint_type == JointType.JOINT_REVOLUTE else m.DEFAULT_MAX_PRISMATIC_VEL
return default_max_vel if raw_vel is None or np.abs(raw_vel) > m.INF_VEL_THRESHOLD else raw_vel
@max_velocity.setter
def max_velocity(self, vel):
"""
Sets this joint's maximum velocity
Args:
vel (float): Velocity to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_velocities(np.array([[vel]]), joint_indices=self.dof_indices)
@property
def max_effort(self):
"""
Gets this joint's maximum effort
Returns:
float: maximum effort for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_effort = self._articulation_view.get_max_efforts(joint_indices=self.dof_indices)[0][0]
return m.DEFAULT_MAX_EFFORT if raw_effort is None or np.abs(raw_effort) > m.INF_EFFORT_THRESHOLD else raw_effort
@max_effort.setter
def max_effort(self, effort):
"""
Sets this joint's maximum effort
Args:
effort (float): effort to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_efforts(np.array([[effort]]), joint_indices=self.dof_indices)
@property
def stiffness(self):
"""
Gets this joint's stiffness
Returns:
float: stiffness for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
stiffnesses = self._articulation_view.get_gains(joint_indices=self.dof_indices)[0]
return stiffnesses[0][0]
@stiffness.setter
def stiffness(self, stiffness):
"""
Sets this joint's stiffness
Args:
stiffness (float): stiffness to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kps=np.array([[stiffness]]), joint_indices=self.dof_indices)
@property
def damping(self):
"""
Gets this joint's damping
Returns:
float: damping for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
dampings = self._articulation_view.get_gains(joint_indices=self.dof_indices)[1]
return dampings[0][0]
@damping.setter
def damping(self, damping):
"""
Sets this joint's damping
Args:
damping (float): damping to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kds=np.array([[damping]]), joint_indices=self.dof_indices)
@property
def friction(self):
"""
Gets this joint's friction
Returns:
float: friction for this joint
"""
return self._articulation_view.get_friction_coefficients(joint_indices=self.dof_indices)[0][0] \
if og.sim.is_playing() else self.get_attribute("physxJoint:jointFriction")
@friction.setter
def friction(self, friction):
"""
Sets this joint's friction
Args:
friction (float): friction to set
"""
self.set_attribute("physxJoint:jointFriction", friction)
if og.sim.is_playing():
self._articulation_view.set_friction_coefficients(np.array([[friction]]), joint_indices=self.dof_indices)
@property
def lower_limit(self):
"""
Gets this joint's lower_limit
Returns:
float: lower_limit for this joint
"""
# TODO: Add logic for non Prismatic / Revolute joints (D6, spherical)
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_pos_lower, raw_pos_upper = self._articulation_view.get_joint_limits(joint_indices=self.dof_indices).flatten()
return -m.DEFAULT_MAX_POS \
if raw_pos_lower is None or raw_pos_lower == raw_pos_upper or np.abs(raw_pos_lower) > m.INF_POS_THRESHOLD \
else raw_pos_lower
@lower_limit.setter
def lower_limit(self, lower_limit):
"""
Sets this joint's lower_limit
Args:
lower_limit (float): lower_limit to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(np.array([[lower_limit, self.upper_limit]]), joint_indices=self.dof_indices)
@property
def upper_limit(self):
"""
Gets this joint's upper_limit
Returns:
float: upper_limit for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_pos_lower, raw_pos_upper = self._articulation_view.get_joint_limits(joint_indices=self.dof_indices).flatten()
return m.DEFAULT_MAX_POS \
if raw_pos_upper is None or raw_pos_lower == raw_pos_upper or np.abs(raw_pos_upper) > m.INF_POS_THRESHOLD \
else raw_pos_upper
@upper_limit.setter
def upper_limit(self, upper_limit):
"""
Sets this joint's upper_limit
Args:
upper_limit (float): upper_limit to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(np.array([[self.lower_limit, upper_limit]]), joint_indices=self.dof_indices)
@property
def has_limit(self):
"""
Returns:
bool: True if this joint has a limit, else False
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
return np.all(np.abs(self._articulation_view.get_joint_limits(joint_indices=self.dof_indices)) < m.INF_POS_THRESHOLD)
@property
def axis(self):
"""
Gets this joint's axis
Returns:
str: axis for this joint, one of "X", "Y, "Z"
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
return self.get_attribute("physics:axis")
@axis.setter
def axis(self, axis):
"""
Sets this joint's axis
Args:
str: axis for this joint, one of "X", "Y, "Z"
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
assert axis in JointAxis, f"Invalid joint axis specified: {axis}!"
self.set_attribute("physics:axis", axis)
@property
def n_dof(self):
"""
Returns:
int: Number of degrees of freedom this joint has
"""
return self._n_dof
@property
def dof_indices(self):
"""
Returns:
list of int: Indices of this joint's DOFs in the parent articulation's DOF array
"""
assert self.articulated, "Can only get DOF indices for articulated joints!"
return list(range(self._joint_dof_offset, self._joint_dof_offset + self._n_dof))
@property
def articulated(self):
"""
Returns:
bool: Whether this joint is articulated or not
"""
return self._articulation_view is not None
@property
def is_revolute(self):
"""
Returns:
bool: Whether this joint is revolute or not
"""
return self._joint_type == JointType.JOINT_REVOLUTE
@property
def is_single_dof(self):
"""
Returns:
bool: Whether this joint has a single DOF or not
"""
return self._joint_type in {JointType.JOINT_REVOLUTE, JointType.JOINT_PRISMATIC}
def get_state(self, normalized=False):
"""
(pos, vel, effort) state of this joint
Args:
normalized (bool): If True, will return normalized state of this joint, where pos, vel, and effort values
are in range [-1, 1].
Returns:
3-tuple:
- n-array: position of this joint, where n = number of DOF for this joint
- n-array: velocity of this joint, where n = number of DOF for this joint
- n-array: effort of this joint, where n = number of DOF for this joint
"""
# Make sure we only call this if we're an articulated joint
assert self.articulated, "Can only get state for articulated joints!"
# Grab raw states
pos = self._articulation_view.get_joint_positions(joint_indices=self.dof_indices)[0]
vel = self._articulation_view.get_joint_velocities(joint_indices=self.dof_indices)[0]
effort = self._articulation_view.get_applied_joint_efforts(joint_indices=self.dof_indices)[0]
# Potentially normalize if requested
if normalized:
pos, vel, effort = self._normalize_pos(pos), self._normalize_vel(vel), self._normalize_effort(effort)
return pos, vel, effort
def get_target(self, normalized=False):
"""
(pos, vel) target of this joint
Args:
normalized (bool): If True, will return normalized target of this joint
Returns:
2-tuple:
- n-array: target position of this joint, where n = number of DOF for this joint
- n-array: target velocity of this joint, where n = number of DOF for this joint
"""
# Make sure we only call this if we're an articulated joint
assert self.articulated, "Can only get targets for articulated joints!"
# Grab raw states
targets = self._articulation_view.get_applied_actions()
pos = targets.joint_positions[0][self.dof_indices]
vel = targets.joint_velocities[0][self.dof_indices]
# Potentially normalize if requested
if normalized:
pos, vel = self._normalize_pos(pos), self._normalize_vel(vel)
return pos, vel
def _normalize_pos(self, pos):
"""
Normalizes raw joint positions @pos
Args:
pos (n-array): n-DOF raw positions to normalize
Returns:
n-array: n-DOF normalized positions in range [-1, 1]
"""
low, high = self.lower_limit, self.upper_limit
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
pos = (pos - mean) / magnitude
return pos
def _denormalize_pos(self, pos):
"""
De-normalizes joint positions @pos
Args:
pos (n-array): n-DOF normalized positions in range [-1, 1]
Returns:
n-array: n-DOF de-normalized positions
"""
low, high = self.lower_limit, self.upper_limit
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
pos = pos * magnitude + mean
return pos
def _normalize_vel(self, vel):
"""
Normalizes raw joint velocities @vel
Args:
vel (n-array): n-DOF raw velocities to normalize
Returns:
n-array: n-DOF normalized velocities in range [-1, 1]
"""
return vel / self.max_velocity
def _denormalize_vel(self, vel):
"""
De-normalizes joint velocities @vel
Args:
vel (n-array): n-DOF normalized velocities in range [-1, 1]
Returns:
n-array: n-DOF de-normalized velocities
"""
return vel * self.max_velocity
def _normalize_effort(self, effort):
"""
Normalizes raw joint effort @effort
Args:
effort (n-array): n-DOF raw effort to normalize
Returns:
n-array: n-DOF normalized effort in range [-1, 1]
"""
return effort / self.max_effort
def _denormalize_effort(self, effort):
"""
De-normalizes joint effort @effort
Args:
effort (n-array): n-DOF normalized effort in range [-1, 1]
Returns:
n-array: n-DOF de-normalized effort
"""
return effort * self.max_effort
def set_pos(self, pos, normalized=False, drive=False):
"""
Set the position of this joint in metric space
Args:
pos (float or n-array of float): Set the position(s) for this joint. Can be a single float or 1-array of
float if the joint only has a single DOF, otherwise it should be an n-array of floats.
normalized (bool): Whether the input is normalized to [-1, 1] (in this case, the values will be
de-normalized first before being executed). Default is False
drive (bool): Whether the joint should be driven naturally via its motor to the position being set or
whether it should be instantaneously set. Default is False, corresponding to an
instantaneous setting of the position
"""
# Sanity checks -- make sure we're the correct control type if we're setting a target and that we're articulated
assert self.articulated, "Can only set position for articulated joints!"
if drive:
assert self._driven, "Can only use set_pos with drive=True if this joint is driven!"
assert self._control_type == ControlType.POSITION, \
"Trying to set joint position target, but control type is not position!"
# Standardize input
pos = np.array([pos]) if self._n_dof == 1 and not isinstance(pos, Iterable) else np.array(pos)
# Potentially de-normalize if the input is normalized
if normalized:
pos = self._denormalize_pos(pos)
# Set the DOF(s) in this joint
if not drive:
self._articulation_view.set_joint_positions(positions=pos, joint_indices=self.dof_indices)
PoseAPI.invalidate()
# Also set the target
self._articulation_view.set_joint_position_targets(positions=pos, joint_indices=self.dof_indices)
def set_vel(self, vel, normalized=False, drive=False):
"""
Set the velocity of this joint in metric space
Args:
vel (float or n-array of float): Set the velocity(s) for this joint. Can be a single float or 1-array of
float if the joint only has a single DOF, otherwise it should be an n-array of floats.
normalized (bool): Whether the input is normalized to [-1, 1] (in this case, the values will be
de-normalized first before being executed). Default is False
drive (bool): Whether the joint should be driven naturally via its motor to the velocity being set or
whether it should be instantaneously set. Default is False, corresponding to an
instantaneous setting of the velocity
"""
# Sanity checks -- make sure we're the correct control type if we're setting a target and that we're articulated
assert self.articulated, "Can only set velocity for articulated joints!"
if drive:
assert self._driven, "Can only use set_vel with drive=True if this joint is driven!"
assert self._control_type == ControlType.VELOCITY, \
f"Trying to set joint velocity target for joint {self.name}, but control type is not velocity!"
# Standardize input
vel = np.array([vel]) if self._n_dof == 1 and not isinstance(vel, Iterable) else np.array(vel)
# Potentially de-normalize if the input is normalized
if normalized:
vel = self._denormalize_vel(vel)
# Set the DOF(s) in this joint
if not drive:
self._articulation_view.set_joint_velocities(velocities=vel, joint_indices=self.dof_indices)
# Also set the target
self._articulation_view.set_joint_velocity_targets(velocities=vel, joint_indices=self.dof_indices)
def set_effort(self, effort, normalized=False):
"""
Set the effort of this joint in metric space
Args:
effort (float or n-array of float): Set the effort(s) for this joint. Can be a single float or 1-array of
float if the joint only has a single DOF, otherwise it should be an n-array of floats.
normalized (bool): Whether the input is normalized to [-1, 1] (in this case, the values will be
de-normalized first before being executed). Default is False
"""
# Sanity checks -- make sure that we're articulated (no control type check like position and velocity
# because we can't set effort targets) and that we're driven
assert self.articulated, "Can only set effort for articulated joints!"
# Standardize input
effort = np.array([effort]) if self._n_dof == 1 and not isinstance(effort, Iterable) else np.array(effort)
# Potentially de-normalize if the input is normalized
if normalized:
effort = self._denormalize_effort(effort)
# Set the DOF(s) in this joint
self._articulation_view.set_joint_efforts(efforts=effort, joint_indices=self.dof_indices)
def keep_still(self):
"""
Zero out all velocities for this prim
"""
self.set_vel(np.zeros(self.n_dof))
# If not driven, set torque equal to zero as well
if not self.driven:
self.set_effort(np.zeros(self.n_dof))
def _dump_state(self):
pos, vel, effort = self.get_state() if self.articulated else (np.array([]), np.array([]), np.array([]))
target_pos, target_vel = self.get_target() if self.articulated else (np.array([]), np.array([]))
return dict(
pos=pos,
vel=vel,
effort=effort,
target_pos=target_pos,
target_vel=target_vel,
)
def _load_state(self, state):
if self.articulated:
self.set_pos(state["pos"], drive=False)
self.set_vel(state["vel"], drive=False)
if self.driven:
self.set_effort(state["effort"])
if self._control_type == ControlType.POSITION:
self.set_pos(state["target_pos"], drive=True)
elif self._control_type == ControlType.VELOCITY:
self.set_vel(state["target_vel"], drive=True)
def _serialize(self, state):
return np.concatenate([
state["pos"],
state["vel"],
state["effort"],
state["target_pos"],
state["target_vel"],
]).astype(float)
def _deserialize(self, state):
# We deserialize deterministically by knowing the order of values -- pos, vel, effort
return dict(
pos=state[0:self.n_dof],
vel=state[self.n_dof:2*self.n_dof],
effort=state[2*self.n_dof:3*self.n_dof],
target_pos=state[3*self.n_dof:4*self.n_dof],
target_vel=state[4*self.n_dof:5*self.n_dof],
), 5*self.n_dof
def duplicate(self, prim_path):
# Cannot directly duplicate a joint prim
raise NotImplementedError("Cannot directly duplicate a joint prim!")
| 34,732 | Python | 39.623392 | 132 | 0.62179 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/timeout.py | from omnigibson.termination_conditions.termination_condition_base import FailureCondition
class Timeout(FailureCondition):
"""
Timeout (failure condition)
Episode terminates if max_step steps have passed
Args:
max_steps (int): Maximum number of episode steps before timeout occurs
"""
def __init__(self, max_steps=500):
# Store internal vars
self._max_steps = max_steps
# Run super
super().__init__()
def _step(self, task, env, action):
# Terminate if number of steps passed exceeds threshold
return env.episode_steps >= self._max_steps
| 627 | Python | 26.304347 | 89 | 0.658692 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/falling.py | from omnigibson.termination_conditions.termination_condition_base import FailureCondition
class Falling(FailureCondition):
"""
Falling (failure condition) used for any navigation-type tasks
Episode terminates if the robot falls out of the world (i.e.: falls below the floor height by at least
@fall_height
Args:
robot_idn (int): robot identifier to evaluate condition with. Default is 0, corresponding to the first
robot added to the scene
fall_height (float): distance (m) > 0 below the scene's floor height under which the the robot is considered
to be falling out of the world
"""
def __init__(self, robot_idn=0, fall_height=0.03):
# Store internal vars
self._robot_idn = robot_idn
self._fall_height = fall_height
# Run super init
super().__init__()
def _step(self, task, env, action):
# Terminate if the specified robot is falling out of the scene
robot_z = env.scene.robots[self._robot_idn].get_position()[2]
return robot_z < (env.scene.get_floor_height() - self._fall_height)
| 1,124 | Python | 37.793102 | 116 | 0.662811 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/__init__.py | from omnigibson.termination_conditions.termination_condition_base import REGISTERED_TERMINATION_CONDITIONS, \
REGISTERED_SUCCESS_CONDITIONS, REGISTERED_FAILURE_CONDITIONS, BaseTerminationCondition
from omnigibson.termination_conditions.falling import Falling
from omnigibson.termination_conditions.max_collision import MaxCollision
from omnigibson.termination_conditions.point_goal import PointGoal
from omnigibson.termination_conditions.predicate_goal import PredicateGoal
from omnigibson.termination_conditions.reaching_goal import ReachingGoal
from omnigibson.termination_conditions.timeout import Timeout
| 613 | Python | 67.222215 | 109 | 0.880914 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/reaching_goal.py | from omnigibson.termination_conditions.termination_condition_base import SuccessCondition
import omnigibson.utils.transform_utils as T
class ReachingGoal(SuccessCondition):
"""
ReachingGoal (success condition) used for reaching-type tasks
Episode terminates if reaching goal is reached within @distance_tol by the @robot_idn robot's base
Args:
Args:
robot_idn (int): robot identifier to evaluate point goal with. Default is 0, corresponding to the first
robot added to the scene
distance_tol (float): Distance (m) tolerance between goal position and @robot_idn's robot eef position
that is accepted as a success
"""
def __init__(self, robot_idn=0, distance_tol=0.5):
self._robot_idn = robot_idn
self._distance_tol = distance_tol
# Run super init
super().__init__()
def _step(self, task, env, action):
# Terminate if point goal is reached (distance below threshold)
return T.l2_distance(env.scene.robots[self._robot_idn].get_eef_position(), task.goal_pos) < \
self._distance_tol
| 1,120 | Python | 36.366665 | 111 | 0.675893 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/point_goal.py | from omnigibson.termination_conditions.termination_condition_base import SuccessCondition
import omnigibson.utils.transform_utils as T
class PointGoal(SuccessCondition):
"""
PointGoal (success condition) used for PointNavFixed/RandomTask
Episode terminates if point goal is reached within @distance_tol by the @robot_idn robot's base
Args:
robot_idn (int): robot identifier to evaluate point goal with. Default is 0, corresponding to the first
robot added to the scene
distance_tol (float): Distance (m) tolerance between goal position and @robot_idn's robot base position
that is accepted as a success
distance_axes (str): Which axes to calculate distances when calculating the goal. Any combination of "x",
"y", and "z" is valid (e.g.: "xy" or "xyz" or "y")
"""
def __init__(self, robot_idn=0, distance_tol=0.5, distance_axes="xyz"):
self._robot_idn = robot_idn
self._distance_tol = distance_tol
self._distance_axes = [i for i, axis in enumerate("xyz") if axis in distance_axes]
# Run super init
super().__init__()
def _step(self, task, env, action):
# Make sure task is of type PointNavigation -- we import at runtime to avoid circular imports
from omnigibson.tasks.point_navigation_task import PointNavigationTask
assert isinstance(task, PointNavigationTask), \
f"Cannot use {self.__class__.__name__} with a non-PointNavigationTask task instance!"
# Terminate if point goal is reached (distance below threshold)
return T.l2_distance(task.get_current_pos(env)[self._distance_axes], task.get_goal_pos()[self._distance_axes]) \
< self._distance_tol
| 1,748 | Python | 48.971427 | 120 | 0.677918 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/termination_condition_base.py | from abc import ABCMeta, abstractmethod
from omnigibson.utils.python_utils import classproperty, Registerable
REGISTERED_TERMINATION_CONDITIONS = dict()
REGISTERED_SUCCESS_CONDITIONS = dict()
REGISTERED_FAILURE_CONDITIONS = dict()
def register_success_condition(cls):
if cls.__name__ not in REGISTERED_SUCCESS_CONDITIONS:
REGISTERED_SUCCESS_CONDITIONS[cls.__name__] = cls
def register_failure_condition(cls):
if cls.__name__ not in REGISTERED_FAILURE_CONDITIONS:
REGISTERED_FAILURE_CONDITIONS[cls.__name__] = cls
class BaseTerminationCondition(Registerable, metaclass=ABCMeta):
"""
Base TerminationCondition class
Condition-specific _step() method is implemented in subclasses
"""
def __init__(self):
# Initialize internal vars that will be filled in at runtime
self._done = None
@abstractmethod
def _step(self, task, env, action):
"""
Step the termination condition and return whether the episode should terminate. Overwritten by subclasses.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
bool: whether environment should terminate or not
"""
raise NotImplementedError()
def step(self, task, env, action):
"""
Step the termination condition and return whether the episode should terminate.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
2-tuple:
- bool: whether environment should terminate or not
- bool: whether a success was reached under this termination condition
"""
# Step internally and store the done state internally as well
self._done = self._step(task=task, env=env, action=action)
# We are successful if done is True AND this is a success condition
success = self._done and self._terminate_is_success
return self._done, success
def reset(self, task, env):
"""
Termination condition-specific reset
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
"""
# Reset internal vars
self._done = None
@property
def done(self):
"""
Returns:
bool: Whether this termination condition has triggered or not
"""
assert self._done is not None, "At least one step() must occur before done can be calculated!"
return self._done
@property
def success(self):
"""
Returns:
bool: Whether this termination condition has been evaluated as a success or not
"""
assert self._done is not None, "At least one step() must occur before success can be calculated!"
return self._done and self._terminate_is_success
@classproperty
def _terminate_is_success(cls):
"""
Returns:
bool: Whether this termination condition corresponds to a success
"""
raise NotImplementedError()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseTerminationCondition")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_TERMINATION_CONDITIONS
return REGISTERED_TERMINATION_CONDITIONS
class SuccessCondition(BaseTerminationCondition):
"""
Termination condition corresponding to a success
"""
def __init_subclass__(cls, **kwargs):
# Register as part of locomotion controllers
super().__init_subclass__(**kwargs)
register_success_condition(cls)
@classproperty
def _terminate_is_success(cls):
# Done --> success
return True
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("SuccessCondition")
return classes
class FailureCondition(BaseTerminationCondition):
"""
Termination condition corresponding to a failure
"""
def __init_subclass__(cls, **kwargs):
# Register as part of locomotion controllers
super().__init_subclass__(**kwargs)
register_failure_condition(cls)
@classproperty
def _terminate_is_success(cls):
# Done --> not success
return False
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("FailureCondition")
return classes
| 5,049 | Python | 30.962025 | 114 | 0.643494 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/max_collision.py | from omnigibson.termination_conditions.termination_condition_base import FailureCondition
from omnigibson.object_states.contact_bodies import ContactBodies
class MaxCollision(FailureCondition):
"""
MaxCollision (failure condition) used for navigation tasks
Episode terminates if the robot has collided more than max_collisions_allowed times
Note that we ignore collisions with any floor objects.
Args:
robot_idn (int): robot identifier to evaluate collision checking with. Default is 0, corresponding to the first
robot added to the scene
ignore_self_collisions (bool): Whether to ignore robot self-collisions or not
max_collisions (int): Maximum number of collisions allowed for any robots in the scene before a termination
is triggered
"""
def __init__(self, robot_idn=0, ignore_self_collisions=True, max_collisions=500):
self._robot_idn = robot_idn
self._ignore_self_collisions = ignore_self_collisions
self._max_collisions = max_collisions
self._n_collisions = 0
# Run super init
super().__init__()
def reset(self, task, env):
# Call super first
super().reset(task, env)
# Also reset collision counter
self._n_collisions = 0
def _step(self, task, env, action):
# Terminate if the robot has collided more than self._max_collisions times
robot = env.robots[self._robot_idn]
floors = list(env.scene.object_registry("category", "floors", []))
ignore_objs = floors if self._ignore_self_collisions is None else floors + [robot]
in_contact = len(env.robots[self._robot_idn].states[ContactBodies].get_value(ignore_objs=tuple(ignore_objs))) > 0
self._n_collisions += int(in_contact)
return self._n_collisions > self._max_collisions
| 1,855 | Python | 42.16279 | 121 | 0.68248 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/predicate_goal.py | from bddl.activity import evaluate_goal_conditions
from omnigibson.termination_conditions.termination_condition_base import SuccessCondition
class PredicateGoal(SuccessCondition):
"""
PredicateGoal (success condition) used for BehaviorTask
Episode terminates if all the predicates are satisfied
Args:
goal_fcn (method): function for calculating goal(s). Function signature should be:
goals = goal_fcn()
where @goals is a list of bddl.condition_evaluation.HEAD -- compiled BDDL goal conditions
"""
def __init__(self, goal_fcn):
# Store internal vars
self._goal_fcn = goal_fcn
self._goal_status = None
# Run super
super().__init__()
def reset(self, task, env):
# Run super first
super().reset(task, env)
# Reset status
self._goal_status = {"satisfied": [], "unsatisfied": []}
def _step(self, task, env, action):
# Terminate if all goal conditions are met in the task
done, self._goal_status = evaluate_goal_conditions(self._goal_fcn())
return done
@property
def goal_status(self):
"""
Returns:
dict: Current goal status for the active predicate(s), mapping "satisfied" and "unsatisfied" to a list
of the predicates matching either of those conditions
"""
return self._goal_status
| 1,417 | Python | 29.826086 | 114 | 0.631616 |
StanfordVL/OmniGibson/omnigibson/object_states/sliceable.py | import numpy as np
from omnigibson.object_states.object_state_base import BaseObjectRequirement
class SliceableRequirement(BaseObjectRequirement):
"""
Class for sanity checking objects that request the "sliceable" ability
"""
@classmethod
def is_compatible(cls, obj, **kwargs):
# Avoid circular imports
from omnigibson.objects.dataset_object import DatasetObject
# Make sure object is dataset object
if not isinstance(obj, DatasetObject):
return False, f"Only compatible with DatasetObject, but {obj} is of type {type(obj)}"
# Check to make sure object parts are properly annotated in this object's metadata
if not obj.metadata["object_parts"]:
return False, f"Missing required metadata 'object_parts'."
return True, None
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Check to make sure object parts are properly annotated in this object's metadata
metadata = prim.GetCustomData().get("metadata", dict())
if not metadata.get("object_parts", None):
return False, f"Missing required metadata 'object_parts'."
return True, None
| 1,198 | Python | 37.677418 | 97 | 0.683639 |
StanfordVL/OmniGibson/omnigibson/object_states/adjacency.py | from collections import namedtuple
import numpy as np
import omnigibson as og
from omnigibson.macros import create_module_macros
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.object_state_base import AbsoluteObjectState
from omnigibson.utils.sampling_utils import raytest_batch, raytest
from omnigibson.utils.constants import PrimType
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.MAX_DISTANCE_VERTICAL = 5.0
m.MAX_DISTANCE_HORIZONTAL = 5.0
# How many 2-D bases to try during horizontal adjacency check. When 1, only the standard axes will be considered.
# When 2, standard axes + 45 degree rotated will be considered. The tried axes will be equally spaced. The higher
# this number, the lower the possibility of false negatives in Inside and NextTo.
m.HORIZONTAL_AXIS_COUNT = 5
AxisAdjacencyList = namedtuple("AxisAdjacencyList", ("positive_neighbors", "negative_neighbors"))
def flatten_planes(planes):
# Converts the body-by-plane logic to a flat body-by-axis setup,
# for when we don't care about the axes' relationship with each other.
return (axis for axes_by_plane in planes for axis in axes_by_plane)
def get_equidistant_coordinate_planes(n_planes):
"""Given a number, sample that many equally spaced coordinate planes.
The samples will cover all 360 degrees (although rotational symmetry
is assumed, e.g. if you take into account the axis index and the
positive/negative directions, only 1/4 of the possible coordinate (1 quadrant, np.pi / 2.0)
planes will be sampled: the ones where the first axis' positive direction
is in the first quadrant).
Args:
n_planes (int): number of planes to sample
Returns:
3D-array: (n_planes, 2, 3) array where the first dimension
is the sampled plane index, the second dimension is the axis index
(0/1), and the third dimension is the 3-D world-coordinate vector
corresponding to the axis.
"""
# Compute the positive directions of the 1st axis of each plane.
first_axis_angles = np.linspace(0, np.pi / 2, n_planes)
first_axes = np.stack(
[np.cos(first_axis_angles), np.sin(first_axis_angles), np.zeros_like(first_axis_angles)], axis=1
)
# Compute the positive directions of the 2nd axes. These axes are
# orthogonal to both their corresponding first axes and to the Z axis.
second_axes = np.cross([0, 0, 1], first_axes)
# Return the axes in the shape (n_planes, 2, 3)
return np.stack([first_axes[:, None, :], second_axes[:, None, :]], axis=1)
def compute_adjacencies(obj, axes, max_distance, use_aabb_center=True):
"""
Given an object and a list of axes, find the adjacent objects in the axes'
positive and negative directions.
If @obj is of PrimType.CLOTH, then adjacent objects are found with respect to the
@obj's centroid particle position
Args:
obj (StatefulObject): The object to check adjacencies of.
axes (2D-array): (n_axes, 3) array defining the axes to check in.
Note that each axis will be checked in both its positive and negative direction.
use_aabb_center (bool): If True and @obj is not of PrimType.CLOTH, will shoot rays from @obj's aabb center.
Otherwise, will dynamically compute starting points based on the requested @axes
Returns:
list of AxisAdjacencyList: List of length len(axes) containing the adjacencies.
"""
# Get vectors for each of the axes' directions.
# The ordering is axes1+, axis1-, axis2+, axis2- etc.
directions = np.empty((len(axes) * 2, 3))
directions[0::2] = axes
directions[1::2] = -axes
# Prepare this object's info for ray casting.
if obj.prim_type == PrimType.CLOTH:
ray_starts = np.tile(obj.root_link.centroid_particle_position, (len(directions), 1))
else:
aabb_lower, aabb_higher = obj.states[AABB].get_value()
object_position = (aabb_lower + aabb_higher) / 2.0
ray_starts = np.tile(object_position, (len(directions), 1))
if not use_aabb_center:
# Dynamically compute start points by iterating over the directions and pre-shooting rays from
# which to shoot back from
# For a given direction, we go in the negative (opposite) direction to the edge of the object extent,
# and then proceed with an additional offset before shooting rays
shooting_offset = 0.01
direction_half_extent = directions * (aabb_higher - aabb_lower).reshape(1, 3) / 2.0
pre_start = object_position.reshape(1, 3) + (direction_half_extent + directions * shooting_offset)
pre_end = object_position.reshape(1, 3) - direction_half_extent
idx = 0
obj_link_paths = {link.prim_path for link in obj.links.values()}
def _ray_callback(hit):
# Check for self-hit -- if so, record the position and terminate early
should_continue = True
if hit.rigid_body in obj_link_paths:
ray_starts[idx] = np.array(hit.position)
should_continue = False
return should_continue
for ray_start, ray_end in zip(pre_start, pre_end):
raytest(
start_point=ray_start,
end_point=ray_end,
only_closest=False,
callback=_ray_callback,
)
idx += 1
# Prepare the rays to cast.
ray_endpoints = ray_starts + (directions * max_distance)
# Cast time.
prim_paths = obj.link_prim_paths
ray_results = raytest_batch(
ray_starts,
ray_endpoints,
only_closest=False,
ignore_bodies=prim_paths,
ignore_collisions=prim_paths
)
# Add the results to the appropriate lists
# For now, we keep our result in the dimensionality of (direction, hit_object_order).
# We convert the hit link into unique objects encountered
objs_by_direction = []
for results in ray_results:
unique_objs = set()
for result in results:
# Check if the inferred hit object is not None, we add it to our set
obj_prim_path = "/".join(result["rigidBody"].split("/")[:-1])
obj = og.sim.scene.object_registry("prim_path", obj_prim_path, None)
if obj is not None:
unique_objs.add(obj)
objs_by_direction.append(unique_objs)
# Reshape so that these have the following indices:
# (axis_idx, direction-one-or-zero, hit_idx)
objs_by_axis = [
AxisAdjacencyList(positive_neighbors, negative_neighbors)
for positive_neighbors, negative_neighbors in zip(objs_by_direction[::2], objs_by_direction[1::2])
]
return objs_by_axis
class VerticalAdjacency(AbsoluteObjectState):
"""
State representing the object's vertical adjacencies.
Value is a AxisAdjacencyList object.
"""
def _get_value(self):
# Call the adjacency computation with th Z axis.
bodies_by_axis = compute_adjacencies(self.obj, np.array([[0, 0, 1]]), m.MAX_DISTANCE_VERTICAL, use_aabb_center=False)
# Return the adjacencies from the only axis we passed in.
return bodies_by_axis[0]
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(AABB)
return deps
# Nothing needs to be done to save/load adjacency since it will happen due to pose caching.
class HorizontalAdjacency(AbsoluteObjectState):
"""
State representing the object's horizontal adjacencies in a preset number of directions.
The HorizontalAdjacency state returns adjacency lists for equally spaced coordinate planes.
Each plane consists of 2 orthogonal axes, and adjacencies are checked for both the positive
and negative directions of each axis.
The value of the state is List[List[AxisAdjacencyList]], where the list dimensions are
m.HORIZONTAL_AXIS_COUNT and 2. The first index is used to choose between the different planes,
the second index to choose between the orthogonal axes of that plane. Given a plane/axis combo,
the item in the list is a AxisAdjacencyList containing adjacencies in both directions of the
axis.
If the idea of orthogonal bases is not relevant (and your use case simply requires checking
adjacencies in each direction), the flatten_planes() function can be used on the state value
to reduce the output to List[AxisAdjacencyList], a list of adjacency lists for all
2 * m.HORIZONTAL_AXIS_COUNT directions.
"""
def _get_value(self):
coordinate_planes = get_equidistant_coordinate_planes(m.HORIZONTAL_AXIS_COUNT)
# Flatten the axis dimension and input into compute_adjacencies.
bodies_by_axis = compute_adjacencies(self.obj, coordinate_planes.reshape(-1, 3), m.MAX_DISTANCE_HORIZONTAL, use_aabb_center=True)
# Now reshape the bodies_by_axis to group by coordinate planes.
bodies_by_plane = list(zip(bodies_by_axis[::2], bodies_by_axis[1::2]))
# Return the adjacencies.
return bodies_by_plane
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(AABB)
return deps
# Nothing needs to be done to save/load adjacency since it will happen due to pose caching.
| 9,467 | Python | 41.267857 | 137 | 0.672652 |
StanfordVL/OmniGibson/omnigibson/object_states/frozen.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin
from omnigibson.object_states.temperature import Temperature
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_FREEZE_TEMPERATURE = 0.0
# When an object is set as frozen, we will sample it between
# the freeze temperature and these offsets.
m.FROZEN_SAMPLING_RANGE_MAX = -10.0
m.FROZEN_SAMPLING_RANGE_MIN = -50.0
class Frozen(AbsoluteObjectState, BooleanStateMixin):
def __init__(self, obj, freeze_temperature=m.DEFAULT_FREEZE_TEMPERATURE):
super(Frozen, self).__init__(obj)
self.freeze_temperature = freeze_temperature
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(Temperature)
return deps
def _set_value(self, new_value):
if new_value:
temperature = np.random.uniform(
self.freeze_temperature + m.FROZEN_SAMPLING_RANGE_MIN,
self.freeze_temperature + m.FROZEN_SAMPLING_RANGE_MAX,
)
return self.obj.states[Temperature].set_value(temperature)
else:
# We'll set the temperature just one degree above freezing. Hopefully the object
# isn't in a fridge.
return self.obj.states[Temperature].set_value(self.freeze_temperature + 1.0)
def _get_value(self):
return self.obj.states[Temperature].get_value() <= self.freeze_temperature
@staticmethod
def get_texture_change_params():
# Increase all channels by 0.3 (to make it white)
albedo_add = 0.3
# No final scaling
diffuse_tint = (1.0, 1.0, 1.0)
return albedo_add, diffuse_tint
# Nothing needs to be done to save/load Frozen since it will happen due to temperature caching.
| 1,910 | Python | 34.388888 | 99 | 0.676963 |
StanfordVL/OmniGibson/omnigibson/object_states/burnt.py | from omnigibson.macros import create_module_macros
from omnigibson.object_states.max_temperature import MaxTemperature
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_BURN_TEMPERATURE = 200
class Burnt(AbsoluteObjectState, BooleanStateMixin):
def __init__(self, obj, burn_temperature=m.DEFAULT_BURN_TEMPERATURE):
super(Burnt, self).__init__(obj)
self.burn_temperature = burn_temperature
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(MaxTemperature)
return deps
def _set_value(self, new_value):
current_max_temp = self.obj.states[MaxTemperature].get_value()
if new_value:
# Set at exactly the burnt temperature (or higher if we have it in history)
desired_max_temp = max(current_max_temp, self.burn_temperature)
else:
# Set at exactly one below burnt temperature (or lower if in history).
desired_max_temp = min(current_max_temp, self.burn_temperature - 1.0)
return self.obj.states[MaxTemperature].set_value(desired_max_temp)
def _get_value(self):
return self.obj.states[MaxTemperature].get_value() >= self.burn_temperature
@staticmethod
def get_texture_change_params():
# Decrease all channels by 0.3 (to make it black)
albedo_add = -0.3
# No final scaling
diffuse_tint = (1.0, 1.0, 1.0)
return albedo_add, diffuse_tint
# Nothing needs to be done to save/load Burnt since it will happen due to
# MaxTemperature caching.
| 1,709 | Python | 35.382978 | 93 | 0.67993 |
StanfordVL/OmniGibson/omnigibson/object_states/cooked.py | from omnigibson.macros import create_module_macros
from omnigibson.object_states.max_temperature import MaxTemperature
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_COOK_TEMPERATURE = 70
class Cooked(AbsoluteObjectState, BooleanStateMixin):
def __init__(self, obj, cook_temperature=m.DEFAULT_COOK_TEMPERATURE):
super(Cooked, self).__init__(obj)
self.cook_temperature = cook_temperature
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(MaxTemperature)
return deps
def _set_value(self, new_value):
current_max_temp = self.obj.states[MaxTemperature].get_value()
if new_value:
# Set at exactly the cook temperature (or higher if we have it in history)
desired_max_temp = max(current_max_temp, self.cook_temperature)
else:
# Set at exactly one below cook temperature (or lower if in history).
desired_max_temp = min(current_max_temp, self.cook_temperature - 1.0)
return self.obj.states[MaxTemperature].set_value(desired_max_temp)
def _get_value(self):
return self.obj.states[MaxTemperature].get_value() >= self.cook_temperature
@staticmethod
def get_texture_change_params():
# Increase all channels by 0.1
albedo_add = 0.1
# Then scale up "brown" color and scale down others
diffuse_tint = (1.5, 0.75, 0.25)
return albedo_add, diffuse_tint
# Nothing needs to be done to save/load Burnt since it will happen due to
# MaxTemperature caching.
| 1,723 | Python | 35.68085 | 93 | 0.68195 |
StanfordVL/OmniGibson/omnigibson/object_states/on_fire.py | from omnigibson.macros import create_module_macros
from omnigibson.object_states.temperature import Temperature
from omnigibson.object_states.heat_source_or_sink import HeatSourceOrSink
# Create settings for this module
m = create_module_macros(module_path=__file__)
# TODO: Delete default values for this and make them required.
m.DEFAULT_IGNITION_TEMPERATURE = 250
m.DEFAULT_FIRE_TEMPERATURE = 1000
m.DEFAULT_HEATING_RATE = 0.04
m.DEFAULT_DISTANCE_THRESHOLD = 0.2
class OnFire(HeatSourceOrSink):
"""
This state indicates the heat source is currently on fire.
Once the temperature is above ignition_temperature, OnFire will become True and stay True.
Its temperature will further raise to fire_temperature, and start heating other objects around it.
It may include a heatsource_link annotation (e.g. candle wick), in which case the fire visualization will be placed
under that meta link. Otherwise (e.g. charcoal), the fire visualization will be placed under the root link.
"""
def __init__(
self,
obj,
ignition_temperature=m.DEFAULT_IGNITION_TEMPERATURE,
fire_temperature=m.DEFAULT_FIRE_TEMPERATURE,
heating_rate=m.DEFAULT_HEATING_RATE,
distance_threshold=m.DEFAULT_DISTANCE_THRESHOLD,
):
"""
Args:
obj (StatefulObject): The object with the heat source ability.
ignition_temperature (float): The temperature threshold above which on fire will become true.
fire_temperature (float): The temperature of the fire (heat source) once on fire is true.
heating_rate (float): Fraction in [0, 1] of the temperature difference with the
heat source temperature should be received every step, per second.
distance_threshold (float): The distance threshold which an object needs
to be closer than in order to receive heat from this heat source.
"""
assert fire_temperature > ignition_temperature, "fire temperature should be higher than ignition temperature."
super().__init__(
obj,
temperature=fire_temperature,
heating_rate=heating_rate,
distance_threshold=distance_threshold,
requires_toggled_on=False,
requires_closed=False,
requires_inside=False,
)
self.ignition_temperature = ignition_temperature
@classmethod
def requires_metalink(cls, **kwargs):
# Does not require metalink to be specified
return False
@property
def _default_link(self):
# Fallback to root link
return self.obj.root_link
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(Temperature)
return deps
def _update(self):
# Call super first
super()._update()
# If it's on fire, maintain the fire temperature
if self.get_value():
self.obj.states[Temperature].set_value(self.temperature)
def _get_value(self):
return self.obj.states[Temperature].get_value() >= self.ignition_temperature
def _set_value(self, new_value):
if new_value:
return self.obj.states[Temperature].set_value(self.temperature)
else:
# We'll set the temperature just one degree below ignition.
return self.obj.states[Temperature].set_value(self.ignition_temperature - 1)
# Nothing needs to be done to save/load OnFire
| 3,508 | Python | 37.141304 | 119 | 0.670468 |
StanfordVL/OmniGibson/omnigibson/object_states/heated.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin
from omnigibson.object_states.temperature import Temperature
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_HEAT_TEMPERATURE = 40
# When an object is set as heated, we will sample it between
# the heat temperature and these offsets.
m.HEATED_SAMPLING_RANGE_MIN = 10.0
m.HEATED_SAMPLING_RANGE_MAX = 20.0
class Heated(AbsoluteObjectState, BooleanStateMixin):
def __init__(self, obj, heat_temperature=m.DEFAULT_HEAT_TEMPERATURE):
super(Heated, self).__init__(obj)
self.heat_temperature = heat_temperature
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(Temperature)
return deps
def _set_value(self, new_value):
if new_value:
temperature = np.random.uniform(
self.heat_temperature + m.HEATED_SAMPLING_RANGE_MIN,
self.heat_temperature + m.HEATED_SAMPLING_RANGE_MAX,
)
return self.obj.states[Temperature].set_value(temperature)
else:
# We'll set the temperature just one degree below heating.
return self.obj.states[Temperature].set_value(self.heat_temperature - 1.0)
def _get_value(self):
return self.obj.states[Temperature].get_value() >= self.heat_temperature
# Nothing needs to be done to save/load Heated since it will happen due to temperature caching.
| 1,587 | Python | 34.288888 | 99 | 0.693132 |
StanfordVL/OmniGibson/omnigibson/object_states/particle_source_or_sink.py | import numpy as np
import omnigibson as og
from omnigibson.macros import create_module_macros
from omnigibson.object_states.particle_modifier import ParticleApplier, ParticleRemover
from omnigibson.systems.system_base import is_physical_particle_system
from omnigibson.utils.constants import ParticleModifyMethod
from omnigibson.utils.python_utils import classproperty
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Metalink naming prefixes
m.SOURCE_LINK_PREFIX = "particlesource"
m.SINK_LINK_PREFIX = "particlesink"
# Default radius and height
m.DEFAULT_SOURCE_RADIUS = 0.0125
m.DEFAULT_SOURCE_HEIGHT = 0.05
m.DEFAULT_SINK_RADIUS = 0.05
m.DEFAULT_SINK_HEIGHT = 0.05
# Maximum number of particles that can be sourced / sunk per step
m.MAX_SOURCE_PARTICLES_PER_STEP = 1000
m.MAX_SINK_PARTICLES_PER_STEP = 1000
# How many steps between sinking particles
m.N_STEPS_PER_SINK = 5
# Upper limit to number of particles that can be sourced / sunk globally by a single object
m.SOURCE_PARTICLES_LIMIT = 1e6
m.SINK_PARTICLES_LIMIT = 1e6
class ParticleSource(ParticleApplier):
"""
ParticleApplier where physical particles are spawned continuously in a cylindrical fashion from the
metalink pose.
Args:
obj (StatefulObject): Object to which this state will be applied
conditions (dict): Dictionary mapping the names of ParticleSystem (str) to None or list of 2-tuples, where
None represents "never", empty list represents "always", or each 2-tuple is interpreted as a single condition in the form of
(ParticleModifyCondition, value) necessary in order for this particle modifier to be
able to modify particles belonging to @ParticleSystem. Expected types of val are as follows:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
For a given ParticleSystem, the list of 2-tuples will be converted into a list of function calls of the
form above -- if all of its conditions evaluate to True and particles are detected within
this particle modifier area, then we potentially modify those particles
source_radius (None or float): Radius of the cylinder representing particles' spawning volume, if specified.
If both @source_radius and @source_height are None, values will be inferred directly from the underlying
object asset, otherwise, it will be set to a default value
source_height (None or float): Height of the cylinder representing particles' spawning volume, if specified.
If both @source_radius and @source_height are None, values will be inferred directly from the underlying
object asset, otherwise, it will be set to a default value
initial_speed (float): The initial speed for generated particles. Note that the
direction of the velocity is inferred from the particle sampling process
"""
def __init__(
self,
obj,
conditions,
source_radius=None,
source_height=None,
initial_speed=0.0,
):
# Initialize variables that will be filled in at runtime
self._n_steps_per_modification = None
# Define projection mesh params based on input kwargs
if source_radius is not None or source_height is not None:
source_radius = m.DEFAULT_SOURCE_RADIUS if source_radius is None else source_radius
source_height = m.DEFAULT_SOURCE_HEIGHT if source_height is None else source_height
projection_mesh_params = {
"type": "Cylinder",
"extents": [source_radius * 2, source_radius * 2, source_height],
}
else:
projection_mesh_params = None
# Convert inputs into arguments to pass to particle applier class
super().__init__(
obj=obj,
conditions=conditions,
method=ParticleModifyMethod.PROJECTION,
projection_mesh_params=projection_mesh_params,
sample_with_raycast=False,
initial_speed=initial_speed,
)
def _initialize(self):
# Run super first
super()._initialize()
# Calculate how many steps we need in between particle cluster spawnings
# This is equivalent to the time it takes for a generated particle to travel @source_height distance
# Note that object state steps are discretized by og.sim.render_step
# Note: t derived from quadratic formula: height = 0.5 g t^2 + v0 t
# Note: height must be considered in the world frame, so we convert the distance from local into world frame
# Extents are in local frame, so we need to convert to world frame using link scale
distance = self.link.scale[2] * self._projection_mesh_params["extents"][2]
t = (-self._initial_speed + np.sqrt(self._initial_speed ** 2 + 2 * og.sim.gravity * distance)) / og.sim.gravity
self._n_steps_per_modification = np.ceil(1 + t / og.sim.get_rendering_dt()).astype(int)
def _get_max_particles_limit_per_step(self, system):
# Check the system
assert is_physical_particle_system(system_name=system.name), \
"ParticleSource only supports PhysicalParticleSystem"
return m.MAX_SOURCE_PARTICLES_PER_STEP
@classmethod
def requires_metalink(cls, **kwargs):
# Always requires metalink since projection is used
return True
@property
def visualize(self):
# Don't visualize this source
return False
@classproperty
def metalink_prefix(cls):
return m.SOURCE_LINK_PREFIX
@property
def n_steps_per_modification(self):
return self._n_steps_per_modification
@property
def physical_particle_modification_limit(self):
return m.SOURCE_PARTICLES_LIMIT
class ParticleSink(ParticleRemover):
"""
ParticleRemover where physical particles are removed continuously within a cylindrical volume located
at the metalink pose.
Args:
obj (StatefulObject): Object to which this state will be applied
conditions (dict): Dictionary mapping the names of ParticleSystem (str) to None or list of 2-tuples, where
None represents "never", empty list represents "always", or each 2-tuple is interpreted as a single condition in the form of
(ParticleModifyCondition, value) necessary in order for this particle modifier to be
able to modify particles belonging to @ParticleSystem. Expected types of val are as follows:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
For a given ParticleSystem, the list of 2-tuples will be converted into a list of function calls of the
form above -- if all of its conditions evaluate to True and particles are detected within
this particle modifier area, then we potentially modify those particles
sink_radius (None or float): Radius of the cylinder representing particles' sinking volume, if specified.
If both @sink_radius and @sink_height are None, values will be inferred directly from the underlying
object asset, otherwise, it will be set to a default value
sink_height (None or float): Height of the cylinder representing particles' sinking volume, if specified.
If both @sink_radius and @sink_height are None, values will be inferred directly from the underlying
object asset, otherwise, it will be set to a default value
default_fluid_conditions (None or list): Condition(s) needed to remove any fluid particles not explicitly
specified in @conditions. If None, then it is assumed that no other physical particles can be removed. If
not None, should be in same format as an entry in @conditions, i.e.: list of (ParticleModifyCondition, val)
2-tuples
default_non_fluid_conditions (None or list): Condition(s) needed to remove any physical (excluding fluid)
particles not explicitly specified in @conditions. If None, then it is assumed that no other physical
particles can be removed. If not None, should be in same format as an entry in @conditions, i.e.: list of
(ParticleModifyCondition, val) 2-tuples
default_visual_conditions (None or list): Condition(s) needed to remove any visual particles not explicitly
specified in @conditions. If None, then it is assumed that no other visual particles can be removed. If
not None, should be in same format as an entry in @conditions, i.e.: list of (ParticleModifyCondition, val)
2-tuples
"""
def __init__(
self,
obj,
conditions,
sink_radius=None,
sink_height=None,
default_fluid_conditions=None,
default_non_fluid_conditions=None,
default_visual_conditions=None,
):
# Initialize variables that will be filled in at runtime
self._n_steps_per_modification = None
# Define projection mesh params based on input kwargs
if sink_radius is not None or sink_height is not None:
sink_radius = m.DEFAULT_SINK_RADIUS if sink_radius is None else sink_radius
sink_height = m.DEFAULT_SINK_HEIGHT if sink_height is None else sink_height
projection_mesh_params = {
"type": "Cylinder",
"extents": [sink_radius * 2, sink_radius * 2, sink_height],
}
else:
projection_mesh_params = None
# Convert inputs into arguments to pass to particle remover class
super().__init__(
obj=obj,
conditions=conditions,
method=ParticleModifyMethod.PROJECTION,
projection_mesh_params=projection_mesh_params,
default_fluid_conditions=default_fluid_conditions,
default_non_fluid_conditions=default_non_fluid_conditions,
default_visual_conditions=default_visual_conditions,
)
def _get_max_particles_limit_per_step(self, system):
# Check the system
assert is_physical_particle_system(system_name=system.name), \
"ParticleSink only supports PhysicalParticleSystem"
return m.MAX_PHYSICAL_PARTICLES_SOURCED_PER_STEP
@property
def requires_overlap(self):
# Not required, always sink particles
return False
@classmethod
def requires_metalink(cls, **kwargs):
# Always requires metalink since projection is used
return True
@classproperty
def metalink_prefix(cls):
return m.SINK_LINK_PREFIX
@property
def n_steps_per_modification(self):
return m.N_STEPS_PER_SINK
@property
def physical_particle_modification_limit(self):
return m.SINK_PARTICLES_LIMIT
| 11,733 | Python | 46.314516 | 136 | 0.67911 |
StanfordVL/OmniGibson/omnigibson/object_states/under.py | import omnigibson as og
from omnigibson.object_states.adjacency import VerticalAdjacency
from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
from omnigibson.utils.object_state_utils import sample_kinematics
from omnigibson.utils.object_state_utils import m as os_m
from omnigibson.utils.constants import PrimType
class Under(RelativeObjectState, KinematicsMixin, BooleanStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(VerticalAdjacency)
return deps
def _set_value(self, other, new_value, reset_before_sampling=False):
if not new_value:
raise NotImplementedError("Under does not support set_value(False)")
if other.prim_type == PrimType.CLOTH:
raise ValueError("Cannot set an object under a cloth object.")
state = og.sim.dump_state(serialized=False)
# Possibly reset this object if requested
if reset_before_sampling:
self.obj.reset()
for _ in range(os_m.DEFAULT_HIGH_LEVEL_SAMPLING_ATTEMPTS):
if sample_kinematics("under", self.obj, other) and self.get_value(other):
return True
else:
og.sim.load_state(state, serialized=False)
return False
def _get_value(self, other):
if other.prim_type == PrimType.CLOTH:
raise ValueError("Cannot detect if an object is under a cloth object.")
adjacency = self.obj.states[VerticalAdjacency].get_value()
other_adjacency = other.states[VerticalAdjacency].get_value()
return other not in adjacency.negative_neighbors and other in adjacency.positive_neighbors and self.obj not in other_adjacency.positive_neighbors
| 1,850 | Python | 40.133332 | 153 | 0.704324 |
StanfordVL/OmniGibson/omnigibson/object_states/contact_particles.py | import omnigibson as og
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import RelativeObjectState
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.systems.system_base import PhysicalParticleSystem, is_physical_particle_system
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Distance tolerance for detecting contact
m.CONTACT_AABB_TOLERANCE = 2.5e-2
m.CONTACT_TOLERANCE = 5e-3
class ContactParticles(RelativeObjectState, KinematicsMixin):
"""
Object state that handles contact checking between rigid bodies and individual particles.
"""
def _get_value(self, system, link=None):
"""
Args:
system (PhysicalParticleSystem): System whose contact particle info should be aggregated
link (None or RigidPrim): If specified, the specific link to check for particles' contact
Returns:
set of int: Set of particle IDs in contact
"""
# Make sure system is valid
assert is_physical_particle_system(system_name=system.name), \
"Can only get ContactParticles for a PhysicalParticleSystem!"
# Variables to update mid-iteration
contacts = set()
idx = 0
# Define callback function to use for omni's overlap_sphere() call
def report_hit(hit):
nonlocal link, idx
link_name = None if link is None else link.prim_path.split("/")[-1]
base, body = "/".join(hit.rigid_body.split("/")[:-1]), hit.rigid_body.split("/")[-1]
continue_traversal = True
# If no links are specified, then we assume checking contact with any link owned by this object
# Otherwise, we check for exact match of link name
if (link is None and base == self.obj.prim_path) or (link is not None and link_name == body):
# Add to contacts and terminate early
contacts.add(idx)
continue_traversal = False
return continue_traversal
# Grab the relaxed AABB of this object or its link for coarse filtering of particles to ignore checking
lower, upper = self.obj.states[AABB].get_value() if link is None else link.visual_aabb
# Add margin for filtering inbound
lower = lower - (system.particle_radius + m.CONTACT_AABB_TOLERANCE)
upper = upper + (system.particle_radius + m.CONTACT_AABB_TOLERANCE)
# Iterate over all particles and aggregate contacts
positions = system.get_particles_position_orientation()[0]
# Only check positions that are within the relaxed AABB of this object
inbound_idxs = ((lower < positions) & (positions < upper)).all(axis=-1).nonzero()[0]
dist = system.particle_contact_radius + m.CONTACT_TOLERANCE
for idx in inbound_idxs:
og.sim.psqi.overlap_sphere(dist, positions[idx], report_hit, False)
# Return contacts
return contacts
def _set_value(self, system, new_value):
raise NotImplementedError("ContactParticles state currently does not support setting.")
def _cache_is_valid(self, get_value_args):
# Cache is never valid since particles always change poses
return False
| 3,360 | Python | 43.813333 | 111 | 0.673214 |
StanfordVL/OmniGibson/omnigibson/object_states/slicer_active.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import BooleanStateMixin
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.object_states.tensorized_value_state import TensorizedValueState
import omnigibson as og
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.usd_utils import RigidContactAPI
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.REACTIVATION_DELAY = 0.5 # number of seconds to wait before reactivating the slicer
class SlicerActive(TensorizedValueState, BooleanStateMixin):
# int: Keep track of how many steps each object is waiting for
STEPS_TO_WAIT = None
# np.ndarray: Keep track of the current delay for a given slicer
DELAY_COUNTER = None
# np.ndarray: Keep track of whether we touched a sliceable in the previous timestep
PREVIOUSLY_TOUCHING = None
# list of list of str: Body prim paths belonging to each slicer obj
SLICER_LINK_PATHS = None
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(ContactBodies)
return deps
@classmethod
def global_initialize(cls):
# Call super first
super().global_initialize()
# Initialize other global variables
cls.STEPS_TO_WAIT = max(1, int(np.ceil(m.REACTIVATION_DELAY / og.sim.get_rendering_dt())))
cls.DELAY_COUNTER = np.array([], dtype=int)
cls.PREVIOUSLY_TOUCHING = np.array([], dtype=bool)
cls.SLICER_LINK_PATHS = []
@classmethod
def global_clear(cls):
# Call super first
super().global_clear()
# Clear other internal state
cls.STEPS_TO_WAIT = None
cls.DELAY_COUNTER = None
cls.PREVIOUSLY_TOUCHING = None
cls.SLICER_LINK_PATHS = None
@classmethod
def _add_obj(cls, obj):
# Call super first
super()._add_obj(obj=obj)
# Add to previously touching and delay counter
cls.DELAY_COUNTER = np.concatenate([cls.DELAY_COUNTER, [0]])
cls.PREVIOUSLY_TOUCHING = np.concatenate([cls.PREVIOUSLY_TOUCHING, [False]])
# Add this object's prim paths to slicer paths
cls.SLICER_LINK_PATHS.append([link.prim_path for link in obj.links.values()])
@classmethod
def _remove_obj(cls, obj):
# Grab idx we'll delete before the object is deleted
deleted_idx = cls.OBJ_IDXS[obj.name]
# Remove from all internal tracked arrays
cls.DELAY_COUNTER = np.delete(cls.DELAY_COUNTER, [deleted_idx])
cls.PREVIOUSLY_TOUCHING = np.delete(cls.PREVIOUSLY_TOUCHING, [deleted_idx])
del cls.SLICER_LINK_PATHS[deleted_idx]
# Call super
super()._remove_obj(obj=obj)
@classmethod
def _update_values(cls, values):
# If we were slicing in the past step, deactivate now
previously_touching_idxs = np.nonzero(cls.PREVIOUSLY_TOUCHING)[0]
values[previously_touching_idxs] = False
cls.DELAY_COUNTER[previously_touching_idxs] = 0 # Reset the counter when we stop touching a sliceable object
# Are we currently touching any sliceables?
currently_touching_sliceables = cls._currently_touching_sliceables()
# If any of our values are False, we need to consider reverting back.
if not np.all(values):
not_active_not_touching = ~values & ~currently_touching_sliceables
not_active_is_touching = ~values & currently_touching_sliceables
not_active_not_touching_idxs = np.where(not_active_not_touching)[0]
not_active_is_touching_idxs = np.where(not_active_is_touching)[0]
# If we are not touching any sliceable objects, we increment the delay "cooldown" counter that will
# eventually re-activate the slicer
cls.DELAY_COUNTER[not_active_not_touching_idxs] += 1
# If we are touching a sliceable object, reset the counter
cls.DELAY_COUNTER[not_active_is_touching_idxs] = 0
# If the delay counter is greater than steps to wait, set to True
values = np.where(cls.DELAY_COUNTER >= cls.STEPS_TO_WAIT, True, values)
# Record if we were touching anything previously
cls.PREVIOUSLY_TOUCHING = currently_touching_sliceables
return values
@classmethod
def _currently_touching_sliceables(cls):
# Initialize return value as all falses
currently_touching = np.zeros_like(cls.PREVIOUSLY_TOUCHING)
# Grab all sliceable objects
sliceable_objs = og.sim.scene.object_registry("abilities", "sliceable", [])
# If there's no sliceables, then obviously no slicer is touching any sliceable so immediately return all Falses
if len(sliceable_objs) == 0:
return currently_touching
# Aggregate all link prim path indices
all_slicer_idxs = [[RigidContactAPI.get_body_row_idx(prim_path) for prim_path in link_paths] for link_paths in cls.SLICER_LINK_PATHS]
sliceable_idxs = [RigidContactAPI.get_body_col_idx(link.prim_path) for obj in sliceable_objs for link in obj.links.values()]
impulses = RigidContactAPI.get_all_impulses()
# Batch check each slicer against all sliceables
for i, slicer_idxs in enumerate(all_slicer_idxs):
if np.any(impulses[slicer_idxs][:, sliceable_idxs]):
# We are touching at least one sliceable
currently_touching[i] = True
return currently_touching
@classproperty
def value_name(cls):
return "value"
@classproperty
def value_type(cls):
return bool
def __init__(self, obj):
# Run super first
super(SlicerActive, self).__init__(obj)
# Set value to be default (True)
self._set_value(True)
@property
def state_size(self):
# Call super first
size = super().state_size
# Add additional 2 to keep track of previously touching and delay counter
return size + 2
# For this state, we simply store its value.
def _dump_state(self):
state = super()._dump_state()
state["previously_touching"] = bool(self.PREVIOUSLY_TOUCHING[self.OBJ_IDXS[self.obj.name]])
state["delay_counter"] = int(self.DELAY_COUNTER[self.OBJ_IDXS[self.obj.name]])
return state
def _load_state(self, state):
super()._load_state(state=state)
self.PREVIOUSLY_TOUCHING[self.OBJ_IDXS[self.obj.name]] = state["previously_touching"]
self.DELAY_COUNTER[self.OBJ_IDXS[self.obj.name]] = state["delay_counter"]
def _serialize(self, state):
state_flat = super()._serialize(state=state)
return np.concatenate([
state_flat,
[state["previously_touching"], state["delay_counter"]],
], dtype=float)
def _deserialize(self, state):
state_dict, idx = super()._deserialize(state=state)
state_dict[f"{self.value_name}"] = bool(state_dict[f"{self.value_name}"])
state_dict["previously_touching"] = bool(state[idx])
state_dict["delay_counter"] = int(state[idx + 1])
return state_dict, idx + 2
| 7,258 | Python | 37.407407 | 141 | 0.659686 |
StanfordVL/OmniGibson/omnigibson/object_states/robot_related_states.py | import numpy as np
import omnigibson as og
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin, RelativeObjectState
from omnigibson.sensors import VisionSensor
_IN_REACH_DISTANCE_THRESHOLD = 2.0
_IN_FOV_PIXEL_FRACTION_THRESHOLD = 0.05
class RobotStateMixin:
@property
def robot(self):
from omnigibson.robots.robot_base import BaseRobot
assert isinstance(self.obj, BaseRobot), "This state only works with robots."
return self.obj
class IsGrasping(RelativeObjectState, BooleanStateMixin, RobotStateMixin):
def _get_value(self, obj):
# TODO: Make this work with non-assisted grasping
return any(
self.robot._ag_obj_in_hand[arm] == obj
for arm in self.robot.arm_names
)
# class InReachOfRobot(AbsoluteObjectState, BooleanStateMixin):
# def _compute_value(self):
# robot = _get_robot(self.simulator)
# if not robot:
# return False
# robot_pos = robot.get_position()
# object_pos = self.obj.get_position()
# return np.linalg.norm(object_pos - np.array(robot_pos)) < _IN_REACH_DISTANCE_THRESHOLD
# class InFOVOfRobot(AbsoluteObjectState, BooleanStateMixin):
# @staticmethod
# def get_optional_dependencies():
# return AbsoluteObjectState.get_optional_dependencies() + [ObjectsInFOVOfRobot]
# def _get_value(self):
# robot = _get_robot(self.simulator)
# if not robot:
# return False
# body_ids = set(self.obj.get_body_ids())
# return not body_ids.isdisjoint(robot.states[ObjectsInFOVOfRobot].get_value())
class ObjectsInFOVOfRobot(AbsoluteObjectState, RobotStateMixin):
def _get_value(self):
"""
Gets all objects in the robot's field of view.
Returns:
list: List of objects in the robot's field of view
"""
if not any(isinstance(sensor, VisionSensor) for sensor in self.robot.sensors.values()):
raise ValueError("No vision sensors found on robot.")
obj_names = []
names_to_exclude = set(['background', 'unlabelled'])
for sensor in self.robot.sensors.values():
if isinstance(sensor, VisionSensor):
_, info = sensor.get_obs()
obj_names.extend([name for name in info['seg_instance'].values() if name not in names_to_exclude])
return [x for x in [og.sim.scene.object_registry("name", x) for x in obj_names] if x is not None]
| 2,529 | Python | 34.633802 | 114 | 0.653223 |
StanfordVL/OmniGibson/omnigibson/object_states/__init__.py | from omnigibson.object_states.object_state_base import REGISTERED_OBJECT_STATES
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.adjacency import HorizontalAdjacency, VerticalAdjacency
from omnigibson.object_states.attached_to import AttachedTo
from omnigibson.object_states.burnt import Burnt
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.object_states.contact_particles import ContactParticles
from omnigibson.object_states.contains import ContainedParticles, Contains
from omnigibson.object_states.cooked import Cooked
from omnigibson.object_states.covered import Covered
from omnigibson.object_states.frozen import Frozen
from omnigibson.object_states.heat_source_or_sink import HeatSourceOrSink
from omnigibson.object_states.heated import Heated
from omnigibson.object_states.inside import Inside
from omnigibson.object_states.max_temperature import MaxTemperature
from omnigibson.object_states.next_to import NextTo
from omnigibson.object_states.on_fire import OnFire
from omnigibson.object_states.on_top import OnTop
from omnigibson.object_states.open_state import Open
from omnigibson.object_states.overlaid import Overlaid
from omnigibson.object_states.particle_modifier import ParticleRemover, ParticleApplier
from omnigibson.object_states.particle_source_or_sink import ParticleSource, ParticleSink
from omnigibson.object_states.particle import ParticleRequirement
from omnigibson.object_states.pose import Pose
from omnigibson.object_states.robot_related_states import IsGrasping, ObjectsInFOVOfRobot
from omnigibson.object_states.saturated import Saturated
from omnigibson.object_states.slicer_active import SlicerActive
from omnigibson.object_states.sliceable import SliceableRequirement
from omnigibson.object_states.temperature import Temperature
from omnigibson.object_states.toggle import ToggledOn
from omnigibson.object_states.touching import Touching
from omnigibson.object_states.under import Under
from omnigibson.object_states.filled import Filled
from omnigibson.object_states.folded import Folded, Unfolded, FoldedLevel
from omnigibson.object_states.draped import Draped
| 2,161 | Python | 59.055554 | 89 | 0.869968 |
StanfordVL/OmniGibson/omnigibson/object_states/heat_source_or_sink.py | import omnigibson as og
from omnigibson.macros import create_module_macros, macros
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.inside import Inside
from omnigibson.object_states.link_based_state_mixin import LinkBasedStateMixin
from omnigibson.object_states.object_state_base import AbsoluteObjectState
from omnigibson.object_states.open_state import Open
from omnigibson.object_states.toggle import ToggledOn
from omnigibson.object_states.update_state_mixin import UpdateStateMixin
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.constants import PrimType
import numpy as np
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.HEATSOURCE_LINK_PREFIX = "heatsource"
m.HEATING_ELEMENT_MARKER_SCALE = [1.0] * 3
# TODO: Delete default values for this and make them required.
m.DEFAULT_TEMPERATURE = 200
m.DEFAULT_HEATING_RATE = 0.04
m.DEFAULT_DISTANCE_THRESHOLD = 0.2
class HeatSourceOrSink(AbsoluteObjectState, LinkBasedStateMixin, UpdateStateMixin):
"""
This state indicates the heat source or heat sink state of the object.
Currently, if the object is not an active heat source/sink, this returns (False, None).
Otherwise, it returns True and the position of the heat source element, or (True, None) if the heat source has no
heating element / only checks for Inside.
E.g. on a stove object, True and the coordinates of the heating element will be returned.
on a microwave object, True and None will be returned.
"""
def __init__(
self,
obj,
temperature=m.DEFAULT_TEMPERATURE,
heating_rate=m.DEFAULT_HEATING_RATE,
distance_threshold=m.DEFAULT_DISTANCE_THRESHOLD,
requires_toggled_on=False,
requires_closed=False,
requires_inside=False,
):
"""
Args:
obj (StatefulObject): The object with the heat source ability.
temperature (float): The temperature of the heat source.
heating_rate (float): Fraction in [0, 1] of the temperature difference with the
heat source temperature should be received every step, per second.
distance_threshold (float): The distance threshold which an object needs
to be closer than in order to receive heat from this heat source.
requires_toggled_on (bool): Whether the heat source object needs to be
toggled on to emit heat. Requires toggleable ability if set to True.
requires_closed (bool): Whether the heat source object needs to be
closed (e.g. in terms of the joints) to emit heat. Requires openable
ability if set to True.
requires_inside (bool): Whether an object needs to be `inside` the
heat source to receive heat. See the Inside state for details. This
will mean that the "heating element" link for the object will be
ignored.
"""
super(HeatSourceOrSink, self).__init__(obj)
self._temperature = temperature
self._heating_rate = heating_rate
self.distance_threshold = distance_threshold
# If the heat source needs to be toggled on, we assert the presence
# of that ability.
if requires_toggled_on:
assert ToggledOn in self.obj.states
self.requires_toggled_on = requires_toggled_on
# If the heat source needs to be closed, we assert the presence
# of that ability.
if requires_closed:
assert Open in self.obj.states
self.requires_closed = requires_closed
# If the heat source needs to contain an object inside to heat it,
# we record that for use in the heat transfer process.
self.requires_inside = requires_inside
# Internal state that gets cached
self._affected_objects = None
@classmethod
def is_compatible(cls, obj, **kwargs):
# Run super first
compatible, reason = super().is_compatible(obj, **kwargs)
if not compatible:
return compatible, reason
# Check whether this state has toggledon if required or open if required
for kwarg, state_type in zip(("requires_toggled_on", "requires_closed"), (ToggledOn, Open)):
if kwargs.get(kwarg, False) and state_type not in obj.states:
return False, f"{cls.__name__} has {kwarg} but obj has no {state_type.__name__} state!"
return True, None
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Run super first
compatible, reason = super().is_compatible_asset(prim, **kwargs)
if not compatible:
return compatible, reason
# Check whether this state has toggledon if required or open if required
for kwarg, state_type in zip(("requires_toggled_on", "requires_closed"), (ToggledOn, Open)):
if kwargs.get(kwarg, False) and not state_type.is_compatible_asset(prim=prim, **kwargs)[0]:
return False, f"{cls.__name__} has {kwarg} but obj has no {state_type.__name__} state!"
return True, None
@classproperty
def metalink_prefix(cls):
return m.HEATSOURCE_LINK_PREFIX
@classmethod
def requires_metalink(cls, **kwargs):
# No metalink required if inside
return not kwargs.get("requires_inside", False)
@property
def _default_link(self):
# Only supported if we require inside
return self.obj.root_link if self.requires_inside else super()._default_link
@property
def heating_rate(self):
"""
Returns:
float: Temperature changing rate of this heat source / sink
"""
return self._heating_rate
@property
def temperature(self):
"""
Returns:
float: Temperature of this heat source / sink
"""
return self._temperature
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.update({AABB, Inside})
return deps
@classmethod
def get_optional_dependencies(cls):
deps = super().get_optional_dependencies()
deps.update({ToggledOn, Open})
return deps
def _initialize(self):
# Run super first
super()._initialize()
self.initialize_link_mixin()
def _get_value(self):
# Check the toggle state.
if self.requires_toggled_on and not self.obj.states[ToggledOn].get_value():
return False
# Check the open state.
if self.requires_closed and self.obj.states[Open].get_value():
return False
return True
def affects_obj(self, obj):
"""
Computes whether this heat source or sink object is affecting object @obj
Computes the temperature delta that may be applied to object @obj. NOTE: This value is agnostic to simulation
stepping speed, and should be scaled accordingly
Args:
obj (StatefulObject): Object whose temperature delta should be computed
Returns:
bool: Whether this heat source or sink is currently affecting @obj's temperature
"""
# No change if we're not on
if not self.get_value():
return False
# If the object is not affected, we return False
if obj not in self._affected_objects:
return False
# If all checks pass, we're actively influencing the object!
return True
def _update(self):
# Avoid circular imports
from omnigibson.object_states.temperature import Temperature
from omnigibson.objects.stateful_object import StatefulObject
# Update the internally tracked nearby objects to accelerate filtering for affects_obj
affected_objects = set()
# Only update if we're valid
if self.get_value():
def overlap_callback(hit):
nonlocal affected_objects
# global affected_objects
obj = og.sim.scene.object_registry("prim_path", "/".join(hit.rigid_body.split("/")[:-1]))
if obj is not None:
affected_objects.add(obj)
# Always continue traversal
return True
if self.requires_inside:
# Use overlap_box check to check for objects inside the box!
aabb_lower, aabb_upper = self.obj.states[AABB].get_value()
half_extent = (aabb_upper - aabb_lower) / 2.0
aabb_center = (aabb_upper + aabb_lower) / 2.0
og.sim.psqi.overlap_box(
halfExtent=half_extent,
pos=aabb_center,
rot=np.array([0, 0, 0, 1.0]),
reportFn=overlap_callback,
)
# Cloth isn't subject to overlap checks, so we also have to manually check their poses as well
cloth_objs = tuple(og.sim.scene.object_registry("prim_type", PrimType.CLOTH, []))
n_cloth_objs = len(cloth_objs)
if n_cloth_objs > 0:
cloth_positions = np.zeros((n_cloth_objs, 3))
for i, obj in enumerate(cloth_objs):
cloth_positions[i] = obj.get_position()
for idx in np.where(np.all((aabb_lower.reshape(1, 3) < cloth_positions) & (cloth_positions < aabb_upper.reshape(1, 3)), axis=-1))[0]:
affected_objects.add(cloth_objs[idx])
# Additionally prune objects based on Inside requirement -- cast to avoid in-place operations
for obj in tuple(affected_objects):
if not obj.states[Inside].get_value(self.obj):
affected_objects.remove(obj)
else:
# Position is either the AABB center of the default link or the metalink position itself
heat_source_pos = self.link.aabb_center if self.link == self._default_link else self.link.get_position()
# Use overlap_sphere check!
og.sim.psqi.overlap_sphere(
radius=self.distance_threshold,
pos=heat_source_pos,
reportFn=overlap_callback,
)
# Cloth isn't subject to overlap checks, so we also have to manually check their poses as well
cloth_objs = tuple(og.sim.scene.object_registry("prim_type", PrimType.CLOTH, []))
n_cloth_objs = len(cloth_objs)
if n_cloth_objs > 0:
cloth_positions = np.zeros((n_cloth_objs, 3))
for i, obj in enumerate(cloth_objs):
cloth_positions[i] = obj.get_position()
for idx in np.where(np.linalg.norm(heat_source_pos.reshape(1, 3) - cloth_positions, axis=-1) <= self.distance_threshold)[0]:
affected_objects.add(cloth_objs[idx])
# Remove self (we cannot affect ourselves) and update the internal set of objects, and remove self
if self.obj in affected_objects:
affected_objects.remove(self.obj)
self._affected_objects = {obj for obj in affected_objects if isinstance(obj, StatefulObject) and Temperature in obj.states}
# Propagate the affected objects' temperatures
if len(self._affected_objects) > 0:
Temperature.update_temperature_from_heatsource_or_sink(
objs=self._affected_objects,
temperature=self.temperature,
rate=self.heating_rate,
)
# Nothing needs to be done to save/load HeatSource
| 11,739 | Python | 40.779359 | 153 | 0.620411 |
StanfordVL/OmniGibson/omnigibson/object_states/factory.py | import networkx as nx
from collections import namedtuple
from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states import *
# states: list of ObjectBaseState
# requirements: list of ObjectBaseRequirement
AbilityDependencies = namedtuple("AbilityDependencies", ("states", "requirements"))
# Maps ability name to list of Object States and / or Ability Requirements that determine
# whether the given ability can be instantiated for a requested object
_ABILITY_DEPENDENCIES = {
"robot": AbilityDependencies(states=[IsGrasping, ObjectsInFOVOfRobot], requirements=[]),
"attachable": AbilityDependencies(states=[AttachedTo], requirements=[]),
"particleApplier": AbilityDependencies(states=[ParticleApplier], requirements=[ParticleRequirement]),
"particleRemover": AbilityDependencies(states=[ParticleRemover], requirements=[ParticleRequirement]),
"particleSource": AbilityDependencies(states=[ParticleSource], requirements=[ParticleRequirement]),
"particleSink": AbilityDependencies(states=[ParticleSink], requirements=[ParticleRequirement]),
"coldSource": AbilityDependencies(states=[HeatSourceOrSink], requirements=[]),
"cookable": AbilityDependencies(states=[Cooked, Burnt], requirements=[]),
"coverable": AbilityDependencies(states=[Covered], requirements=[]),
"freezable": AbilityDependencies(states=[Frozen], requirements=[]),
"heatable": AbilityDependencies(states=[Heated], requirements=[]),
"heatSource": AbilityDependencies(states=[HeatSourceOrSink], requirements=[]),
"meltable": AbilityDependencies(states=[MaxTemperature], requirements=[]),
"mixingTool": AbilityDependencies(states=[], requirements=[]),
"openable": AbilityDependencies(states=[Open], requirements=[]),
"flammable": AbilityDependencies(states=[OnFire], requirements=[]),
"saturable": AbilityDependencies(states=[Saturated], requirements=[]),
"sliceable": AbilityDependencies(states=[], requirements=[SliceableRequirement]),
"slicer": AbilityDependencies(states=[SlicerActive], requirements=[]),
"toggleable": AbilityDependencies(states=[ToggledOn], requirements=[]),
"cloth": AbilityDependencies(states=[Folded, Unfolded, Overlaid, Draped], requirements=[]),
"fillable": AbilityDependencies(states=[Filled, Contains], requirements=[]),
}
_DEFAULT_STATE_SET = frozenset(
[
Inside,
NextTo,
OnTop,
Touching,
Under,
Covered,
]
)
_KINEMATIC_STATE_SET = frozenset(
[state for state in REGISTERED_OBJECT_STATES.values() if issubclass(state, KinematicsMixin)]
)
_FIRE_STATE_SET = frozenset(
[
HeatSourceOrSink,
OnFire,
]
)
_STEAM_STATE_SET = frozenset(
[
Heated,
]
)
_TEXTURE_CHANGE_STATE_SET = frozenset(
[
Frozen,
Burnt,
Cooked,
Saturated,
ToggledOn,
]
)
_SYSTEM_STATE_SET = frozenset(
[
Covered,
Saturated,
Filled,
Contains,
]
)
_VISUAL_STATE_SET = frozenset(_FIRE_STATE_SET | _STEAM_STATE_SET | _TEXTURE_CHANGE_STATE_SET)
_TEXTURE_CHANGE_PRIORITY = {
Frozen: 4,
Burnt: 3,
Cooked: 2,
Saturated: 1,
ToggledOn: 0,
}
def get_system_states():
return _SYSTEM_STATE_SET
def get_fire_states():
return _FIRE_STATE_SET
def get_steam_states():
return _STEAM_STATE_SET
def get_texture_change_states():
return _TEXTURE_CHANGE_STATE_SET
def get_texture_change_priority():
return _TEXTURE_CHANGE_PRIORITY
def get_visual_states():
return _VISUAL_STATE_SET
def get_default_states():
return _DEFAULT_STATE_SET
def get_state_name(state):
# Get the name of the class.
return state.__name__
def get_states_for_ability(ability):
if ability not in _ABILITY_DEPENDENCIES:
return []
return _ABILITY_DEPENDENCIES[ability].states
def get_requirements_for_ability(ability):
if ability not in _ABILITY_DEPENDENCIES:
return []
return _ABILITY_DEPENDENCIES[ability].requirements
def get_state_dependency_graph(states=None):
"""
Args:
states (None or Iterable): If specified, specific state(s) to sort. Otherwise, will generate dependency graph
over all states
Returns:
nx.DiGraph: State dependency graph of supported object states
"""
states = REGISTERED_OBJECT_STATES.values() if states is None else states
dependencies = {state: set.union(state.get_dependencies(), state.get_optional_dependencies()) for state in states}
return nx.DiGraph(dependencies)
def get_states_by_dependency_order(states=None):
"""
Args:
states (None or Iterable): If specified, specific state(s) to sort. Otherwise, will generate dependency graph
over all states
Returns:
list: all states in topological order of dependency
"""
return list(reversed(list(nx.algorithms.topological_sort(get_state_dependency_graph(states)))))
| 4,984 | Python | 29.582822 | 118 | 0.700241 |
StanfordVL/OmniGibson/omnigibson/object_states/filled.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.contains import ContainedParticles
from omnigibson.object_states.object_state_base import RelativeObjectState, BooleanStateMixin
from omnigibson.systems.system_base import PhysicalParticleSystem, is_physical_particle_system
from omnigibson.systems.macro_particle_system import MacroParticleSystem
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Proportion of object's volume that must be filled for object to be considered filled
m.VOLUME_FILL_PROPORTION = 0.2
m.N_MAX_MACRO_PARTICLE_SAMPLES = 500
m.N_MAX_MICRO_PARTICLE_SAMPLES = 100000
class Filled(RelativeObjectState, BooleanStateMixin):
def _get_value(self, system):
# Sanity check to make sure system is valid
assert is_physical_particle_system(system_name=system.name), \
"Can only get Filled state with a valid PhysicalParticleSystem!"
# Check what volume is filled
if system.n_particles > 0:
# Treat particles as cubes
particle_volume = (system.particle_radius * 2) ** 3
n_particles = self.obj.states[ContainedParticles].get_value(system).n_in_volume
prop_filled = particle_volume * n_particles / self.obj.states[ContainedParticles].volume
# If greater than threshold, then the volume is filled
# Explicit bool cast needed here because the type is bool_ instead of bool which is not JSON-Serializable
# This has to do with numpy, see https://stackoverflow.com/questions/58408054/typeerror-object-of-type-bool-is-not-json-serializable
value = bool(prop_filled > m.VOLUME_FILL_PROPORTION)
else:
# No particles exists, so we're obviously empty
value = False
return value
def _set_value(self, system, new_value):
# Sanity check to make sure system is valid
assert is_physical_particle_system(system_name=system.name), \
"Can only set Filled state with a valid PhysicalParticleSystem!"
# First, check our current state
current_state = self.get_value(system)
# Only do something if we're changing state
if current_state != new_value:
contained_particles_state = self.obj.states[ContainedParticles]
if new_value:
# Going from False --> True, sample volume with particles
system.generate_particles_from_link(
obj=self.obj,
link=contained_particles_state.link,
check_contact=True,
max_samples=m.N_MAX_MACRO_PARTICLE_SAMPLES
if issubclass(system, MacroParticleSystem) else m.N_MAX_MICRO_PARTICLE_SAMPLES
)
else:
# Cannot set False
raise NotImplementedError(f"{self.__class__.__name__} does not support set_value(system, False)")
return True
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(ContainedParticles)
return deps
| 3,156 | Python | 43.464788 | 144 | 0.665082 |
StanfordVL/OmniGibson/omnigibson/object_states/draped.py | from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.object_states.cloth_mixin import ClothStateMixin
from omnigibson.utils.constants import PrimType
from omnigibson.utils.object_state_utils import sample_cloth_on_rigid
import omnigibson as og
import numpy as np
class Draped(RelativeObjectState, KinematicsMixin, BooleanStateMixin, ClothStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(ContactBodies)
return deps
def _set_value(self, other, new_value):
if not new_value:
raise NotImplementedError("DrapedOver does not support set_value(False)")
if not (self.obj.prim_type == PrimType.CLOTH and other.prim_type == PrimType.RIGID):
raise ValueError("DrapedOver state requires obj1 is cloth and obj2 is rigid.")
state = og.sim.dump_state(serialized=False)
if sample_cloth_on_rigid(self.obj, other, randomize_xy=True) and self.get_value(other):
return True
else:
og.sim.load_state(state, serialized=False)
return False
def _get_value(self, other):
"""
Check whether the (cloth) object is draped on the other (rigid) object.
The cloth object should touch the rigid object and its CoM should be below the average position of the contact points.
"""
if not (self.obj.prim_type == PrimType.CLOTH and other.prim_type == PrimType.RIGID):
raise ValueError("Draped state requires obj1 is cloth and obj2 is rigid.")
# Find the links of @other that are in contact with @self.obj
contact_links = self.obj.states[ContactBodies].get_value() & set(other.links.values())
if len(contact_links) == 0:
return False
contact_link_prim_paths = {contact_link.prim_path for contact_link in contact_links}
# Filter the contact points to only include the ones that are on the contact links
contact_positions = []
for contact in self.obj.contact_list():
if len({contact.body0, contact.body1} & contact_link_prim_paths) > 0:
contact_positions.append(contact.position)
# The center of mass of the cloth needs to be below the average position of the contact points
mean_contact_position = np.mean(contact_positions, axis=0)
center_of_mass = np.mean(self.obj.root_link.keypoint_particle_positions, axis=0)
return center_of_mass[2] < mean_contact_position[2]
| 2,692 | Python | 45.431034 | 126 | 0.693908 |
StanfordVL/OmniGibson/omnigibson/object_states/attached_to.py | import numpy as np
from collections import defaultdict
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros
import omnigibson.utils.transform_utils as T
from omnigibson.object_states.contact_subscribed_state_mixin import ContactSubscribedStateMixin
from omnigibson.object_states.joint_break_subscribed_state_mixin import JointBreakSubscribedStateMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
from omnigibson.object_states.link_based_state_mixin import LinkBasedStateMixin
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.utils.constants import JointType
from omnigibson.utils.usd_utils import create_joint
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.usd_utils import CollisionAPI
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.ATTACHMENT_LINK_PREFIX = "attachment"
m.DEFAULT_POSITION_THRESHOLD = 0.05 # 5cm
m.DEFAULT_ORIENTATION_THRESHOLD = np.deg2rad(5.0) # 5 degrees
m.DEFAULT_JOINT_TYPE = JointType.JOINT_FIXED
m.DEFAULT_BREAK_FORCE = 1000 # Newton
m.DEFAULT_BREAK_TORQUE = 1000 # Newton-Meter
# TODO: Make AttachedTo into a global state that manages all the attachments in the scene.
# When an attachment of a child and a parent is about to happen:
# 1. stop the sim
# 2. remove all existing attachment joints (and save information to restore later)
# 3. disable collision between the child and the parent
# 4. play the sim
# 5. reload the state
# 6. restore all existing attachment joints
# 7. create the joint
class AttachedTo(RelativeObjectState, BooleanStateMixin, ContactSubscribedStateMixin, JointBreakSubscribedStateMixin, LinkBasedStateMixin):
"""
Handles attachment between two rigid objects, by creating a fixed/spherical joint between self.obj (child) and
other (parent). At any given moment, an object can only be attached to at most one other object, i.e.
a parent can have multiple children, but a child can only have one parent.
Note that generally speaking only child.states[AttachedTo].get_value(parent) will return True.
One of the child's male meta links will be attached to one of the parent's female meta links.
Subclasses ContactSubscribedStateMixin, JointBreakSubscribedStateMixin
on_contact function attempts to attach self.obj to other when a CONTACT_FOUND event happens
on_joint_break function breaks the current attachment
"""
# This is to force the __init__ args to be "self" and "obj" only.
# Otherwise, it will inherit from LinkBasedStateMixin and the __init__ args will be "self", "args", "kwargs".
def __init__(self, obj):
# Run super method
super().__init__(obj=obj)
def initialize(self):
super().initialize()
og.sim.add_callback_on_stop(name=f"{self.obj.name}_detach", callback=self._detach)
self.parents_disabled_collisions = set()
def remove(self):
super().remove()
og.sim.remove_callback_on_stop(name=f"{self.obj.name}_detach")
@classproperty
def metalink_prefix(cls):
"""
Returns:
str: Unique keyword that defines the metalink associated with this object state
"""
return m.ATTACHMENT_LINK_PREFIX
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(ContactBodies)
return deps
def _initialize(self):
super()._initialize()
self.initialize_link_mixin()
# Reference to the parent object (DatasetObject)
self.parent = None
# Reference to the female meta link of the parent object (RigidPrim)
self.parent_link = None
# Mapping from the female meta link names of self.obj to their children (Dict[str, Optional[DatasetObject] = None])
self.children = {link_name: None for link_name in self.links if link_name.split("_")[1].endswith("F")}
# Cache of parent link candidates for other objects (Dict[DatasetObject, Dict[str, str]])
# @other -> (the male meta link names of @self.obj -> the correspounding female meta link names of @other))
self.parent_link_candidates = dict()
def on_joint_break(self, joint_prim_path):
# Note that when this function is invoked when a joint break event happens, @self.obj is the parent of the
# attachment joint, not the child. We access the child of the broken joint, and call the setter with False
child = self.children[joint_prim_path.split("/")[-2]]
child.states[AttachedTo].set_value(self.obj, False)
# Attempts to attach two objects when a CONTACT_FOUND event happens
def on_contact(self, other, contact_headers, contact_data):
for contact_header in contact_headers:
if contact_header.type == lazy.omni.physx.bindings._physx.ContactEventType.CONTACT_FOUND:
# If it has successfully attached to something, break.
if self.set_value(other, True):
break
def _set_value(self, other, new_value, bypass_alignment_checking=False, check_physics_stability=False, can_joint_break=True):
"""
Args:
other (DatasetObject): parent object to attach to.
new_value (bool): whether to attach or detach.
bypass_alignment_checking (bool): whether to bypass alignment checking when finding attachment links.
Normally when finding attachment links, we check if the child and parent links have aligned positions
or poses. This flag allows users to bypass this check and find attachment links solely based on the
attachment meta link types. Default is False.
check_physics_stability (bool): whether to check if the attachment is stable after attachment.
If True, it will check if the child object is not colliding with other objects except the parent object.
If False, it will not check the stability and simply attach the child to the parent.
Default is False.
can_joint_break (bool): whether the joint can break or not.
Returns:
bool: whether the attachment setting was successful or not.
"""
# Attempt to attach
if new_value:
if self.parent == other:
# Already attached to this object. Do nothing.
return True
elif self.parent is not None:
log.debug(f"Trying to attach object {self.obj.name} to object {other.name},"
f"but it is already attached to object {self.parent.name}. Try detaching first.")
return False
else:
# Find attachment links that satisfy the proximity requirements
child_link, parent_link = self._find_attachment_links(other, bypass_alignment_checking)
if child_link is None:
return False
else:
if check_physics_stability:
state = og.sim.dump_state()
self._attach(other, child_link, parent_link, can_joint_break=can_joint_break)
if not check_physics_stability:
return True
else:
og.sim.step_physics()
# self.obj should not collide with other objects except the parent
success = len(self.obj.states[ContactBodies].get_value(ignore_objs=(other,))) == 0
if success:
return True
else:
self._detach()
og.sim.load_state(state)
return False
# Attempt to detach
else:
if self.parent == other:
self._detach()
# Wake up objects so that passive forces like gravity can be applied.
self.obj.wake()
other.wake()
return True
def _get_value(self, other):
# Simply return if the current parent matches other
return other == self.parent
def _find_attachment_links(self,
other,
bypass_alignment_checking=False,
pos_thresh=m.DEFAULT_POSITION_THRESHOLD,
orn_thresh=m.DEFAULT_ORIENTATION_THRESHOLD):
"""
Args:
other (DatasetObject): parent object to find potential attachment links.
bypass_alignment_checking (bool): whether to bypass alignment checking when finding attachment links.
Normally when finding attachment links, we check if the child and parent links have aligned positions
or poses. This flag allows users to bypass this check and find attachment links solely based on the
attachment meta link types. Default is False.
pos_thresh (float): position difference threshold to activate attachment, in meters.
orn_thresh (float): orientation difference threshold to activate attachment, in radians.
Returns:
2-tuple:
- RigidPrim or None: link belonging to @self.obj that should be aligned to that corresponding link of @other
- RigidPrim or None: the corresponding link of @other
"""
parent_candidates = self._get_parent_candidates(other)
if not parent_candidates:
return None, None
for child_link_name, parent_link_names in parent_candidates.items():
child_link = self.links[child_link_name]
for parent_link_name in parent_link_names:
parent_link = other.states[AttachedTo].links[parent_link_name]
if other.states[AttachedTo].children[parent_link_name] is None:
if bypass_alignment_checking:
return child_link, parent_link
pos_diff = np.linalg.norm(child_link.get_position() - parent_link.get_position())
orn_diff = T.get_orientation_diff_in_radian(child_link.get_orientation(), parent_link.get_orientation())
if pos_diff < pos_thresh and orn_diff < orn_thresh:
return child_link, parent_link
return None, None
def _get_parent_candidates(self, other):
"""
Helper function to return the parent link candidates for @other
Returns:
Dict[str, str] or None: mapping from the male meta link names of self.obj to the correspounding female meta
link names of @other. Returns None if @other does not have the AttachedTo state.
"""
if AttachedTo not in other.states:
return None
if other not in self.parent_link_candidates:
parent_link_names = defaultdict(set)
for child_link_name, child_link in self.links.items():
child_category = child_link_name.split("_")[1]
if child_category.endswith("F"):
continue
assert child_category.endswith("M")
parent_category = child_category[:-1] + "F"
for parent_link_name, parent_link in other.states[AttachedTo].links.items():
if parent_category in parent_link_name:
parent_link_names[child_link_name].add(parent_link_name)
self.parent_link_candidates[other] = parent_link_names
return self.parent_link_candidates[other]
@property
def attachment_joint_prim_path(self):
return f"{self.parent_link.prim_path}/{self.obj.name}_attachment_joint" if self.parent_link is not None else None
def _attach(self, other, child_link, parent_link, joint_type=m.DEFAULT_JOINT_TYPE, can_joint_break=True):
"""
Creates a fixed or spherical joint between a male meta link of self.obj (@child_link) and a female meta link of
@other (@parent_link) with a given @joint_type, @break_force and @break_torque
Args:
other (DatasetObject): parent object to attach to.
child_link (RigidPrim): male meta link of @self.obj.
parent_link (RigidPrim): female meta link of @other.
joint_type (JointType): joint type of the attachment, {JointType.JOINT_FIXED, JointType.JOINT_SPHERICAL}
can_joint_break (bool): whether the joint can break or not.
"""
assert joint_type in {JointType.JOINT_FIXED, JointType.JOINT_SPHERICAL}, f"Unsupported joint type {joint_type}"
# Set pose for self.obj so that child_link and parent_link align (6dof alignment for FixedJoint and 3dof alignment for SphericalJoint)
parent_pos, parent_quat = parent_link.get_position_orientation()
child_pos, child_quat = child_link.get_position_orientation()
child_root_pos, child_root_quat = self.obj.get_position_orientation()
if joint_type == JointType.JOINT_FIXED:
# For FixedJoint: find the relation transformation of the two frames and apply it to self.obj.
rel_pos, rel_quat = T.mat2pose(T.pose2mat((parent_pos, parent_quat)) @ T.pose_inv(T.pose2mat((child_pos, child_quat))))
new_child_root_pos, new_child_root_quat = T.pose_transform(rel_pos, rel_quat, child_root_pos, child_root_quat)
else:
# For SphericalJoint: move the position of self.obj to align the two frames and keep the rotation unchanged.
new_child_root_pos = child_root_pos + (parent_pos - child_pos)
new_child_root_quat = child_root_quat
# Actually move the object and also keep it still for stability purposes.
self.obj.set_position_orientation(new_child_root_pos, new_child_root_quat)
self.obj.keep_still()
other.keep_still()
if joint_type == JointType.JOINT_FIXED:
# FixedJoint: the parent link, the child link and the joint frame all align.
parent_local_quat = np.array([0.0, 0.0, 0.0, 1.0])
else:
# SphericalJoint: the same except that the rotation of the parent link doesn't align with the joint frame.
# The child link and the joint frame still align.
_, parent_local_quat = T.relative_pose_transform([0, 0, 0], child_quat, [0, 0, 0], parent_quat)
# Disable collision between the parent and child objects
self._disable_collision_between_child_and_parent(child=self.obj, parent=other)
# Set the parent references
self.parent = other
self.parent_link = parent_link
# Set the child reference for @other
other.states[AttachedTo].children[parent_link.body_name] = self.obj
kwargs = {"break_force": m.DEFAULT_BREAK_FORCE, "break_torque": m.DEFAULT_BREAK_TORQUE} if can_joint_break else dict()
# Create the joint
create_joint(
prim_path=self.attachment_joint_prim_path,
joint_type=joint_type,
body0=f"{parent_link.prim_path}",
body1=f"{child_link.prim_path}",
joint_frame_in_parent_frame_pos=np.zeros(3),
joint_frame_in_parent_frame_quat=parent_local_quat,
joint_frame_in_child_frame_pos=np.zeros(3),
joint_frame_in_child_frame_quat=np.array([0.0, 0.0, 0.0, 1.0]),
**kwargs
)
def _disable_collision_between_child_and_parent(self, child, parent):
"""
Disables collision between the child and parent objects
"""
if parent in self.parents_disabled_collisions:
return
self.parents_disabled_collisions.add(parent)
was_playing = og.sim.is_playing()
if was_playing:
state = og.sim.dump_state()
og.sim.stop()
for child_link in child.links.values():
for parent_link in parent.links.values():
child_link.add_filtered_collision_pair(parent_link)
if parent.category == "wall_nail":
# Temporary hack to disable collision between the attached child object and all walls/floors so that objects
# attached to the wall_nails do not collide with the walls/floors.
for wall in og.sim.scene.object_registry("category", "walls", set()):
for wall_link in wall.links.values():
for child_link in child.links.values():
child_link.add_filtered_collision_pair(wall_link)
for wall in og.sim.scene.object_registry("category", "floors", set()):
for floor_link in wall.links.values():
for child_link in child.links.values():
child_link.add_filtered_collision_pair(floor_link)
# Temporary hack to disable gravity for the attached child object if the parent is kinematic_only
# Otherwise, the parent metalink will oscillate due to the gravity force of the child.
if parent.kinematic_only:
child.disable_gravity()
if was_playing:
og.sim.play()
og.sim.load_state(state)
def _detach(self):
"""
Removes the current attachment joint
"""
if self.parent_link is not None:
# Remove the attachment joint prim from the stage
og.sim.stage.RemovePrim(self.attachment_joint_prim_path)
# Remove child reference from the parent object
self.parent.states[AttachedTo].children[self.parent_link.body_name] = None
# Remove reference to the parent object and link
self.parent = None
self.parent_link = None
@property
def settable(self):
return True
@property
def state_size(self):
return 1
def _dump_state(self):
return dict(attached_obj_uuid=-1 if self.parent is None else self.parent.uuid)
def _load_state(self, state):
uuid = state["attached_obj_uuid"]
if uuid == -1:
attached_obj = None
else:
attached_obj = og.sim.scene.object_registry("uuid", uuid)
assert attached_obj is not None, "attached_obj_uuid does not match any object in the scene."
if self.parent != attached_obj:
# If it's currently attached to something else, detach.
if self.parent is not None:
self.set_value(self.parent, False)
# assert self.parent is None, "parent reference is not cleared after detachment"
if self.parent is not None:
log.warning(f"parent reference is not cleared after detachment")
# If the loaded state requires attachment, attach.
if attached_obj is not None:
self.set_value(attached_obj, True, bypass_alignment_checking=True, check_physics_stability=False, can_joint_break=True)
# assert self.parent == attached_obj, "parent reference is not updated after attachment"
if self.parent != attached_obj:
log.warning(f"parent reference is not updated after attachment")
def _serialize(self, state):
return np.array([state["attached_obj_uuid"]], dtype=float)
def _deserialize(self, state):
return dict(attached_obj_uuid=int(state[0])), 1
| 19,625 | Python | 47.459259 | 142 | 0.63358 |
StanfordVL/OmniGibson/omnigibson/object_states/contains.py | import numpy as np
from collections import namedtuple
from omnigibson.macros import create_module_macros
from omnigibson.object_states.link_based_state_mixin import LinkBasedStateMixin
from omnigibson.object_states.object_state_base import RelativeObjectState, BooleanStateMixin
from omnigibson.systems.system_base import VisualParticleSystem, PhysicalParticleSystem, is_visual_particle_system, \
is_physical_particle_system
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function
from omnigibson.utils.python_utils import classproperty
import omnigibson.utils.transform_utils as T
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.CONTAINER_LINK_PREFIX = "container"
m.VISUAL_PARTICLE_OFFSET = 0.01 # Offset to visual particles' poses when checking overlaps with container volume
"""
ContainedParticlesData contains the following fields:
n_in_volume (int): number of particles in the container volume
positions (np.array): (N, 3) array representing the raw global particle positions
in_volume (np.array): (N,) boolean array representing whether each particle is inside the container volume or not
"""
ContainedParticlesData = namedtuple("ContainedParticlesData", ("n_in_volume", "positions", "in_volume"))
class ContainedParticles(RelativeObjectState, LinkBasedStateMixin):
"""
Object state for computing the number of particles of a given system contained in this object's container volume
"""
def __init__(self, obj):
super().__init__(obj)
self.check_in_volume = None # Function to check whether particles are in volume for this container
self._volume = None # Volume of this container
self._compute_info = None # Intermediate computation information to store
@classproperty
def metalink_prefix(cls):
return m.CONTAINER_LINK_PREFIX
def _get_value(self, system):
"""
Args:
system (VisualParticleSystem or PhysicalParticleSystem): System whose number of particles will be checked inside this object's
container volume
Returns:
ContainedParticlesData: namedtuple with the following keys:
- n_in_volume (int): Number of @system's particles inside this object's container volume
- positions (np.array): (N, 3) Particle positions of all @system's particles
- in_volume (np.array): (N,) boolean array, True if the corresponding particle is inside this
object's container volume, else False
"""
# Value is false by default
n_particles_in_volume, raw_positions, checked_positions, particles_in_volume = 0, np.array([]), np.array([]), np.array([])
# Only run additional computations if there are any particles
if system.n_particles > 0:
# First, we check what type of system
# Currently, we support VisualParticleSystems and PhysicalParticleSystems
if is_visual_particle_system(system_name=system.name):
# Grab global particle poses and offset them in the direction of their orientation
raw_positions, quats = system.get_particles_position_orientation()
unit_z = np.zeros((len(raw_positions), 3, 1))
unit_z[:, -1, :] = m.VISUAL_PARTICLE_OFFSET
checked_positions = (T.quat2mat(quats) @ unit_z).reshape(-1, 3) + raw_positions
elif is_physical_particle_system(system_name=system.name):
raw_positions = system.get_particles_position_orientation()[0]
checked_positions = raw_positions
else:
raise ValueError(f"Invalid system {system} received for getting ContainedParticles state!"
f"Currently, only VisualParticleSystems and PhysicalParticleSystems are supported.")
# Only calculate if we have valid positions
if len(checked_positions) > 0:
particles_in_volume = self.check_in_volume(checked_positions)
n_particles_in_volume = particles_in_volume.sum()
return ContainedParticlesData(n_particles_in_volume, raw_positions, particles_in_volume)
def _initialize(self):
super()._initialize()
self.initialize_link_mixin()
# Generate volume checker function for this object
self.check_in_volume, calculate_volume = \
generate_points_in_volume_checker_function(obj=self.obj, volume_link=self.link)
# Calculate volume
self._volume = calculate_volume()
@property
def volume(self):
"""
Returns:
float: Total volume for this container
"""
return self._volume
class Contains(RelativeObjectState, BooleanStateMixin):
def _get_value(self, system):
# Grab value from Contains state; True if value is greater than 0
return self.obj.states[ContainedParticles].get_value(system=system).n_in_volume > 0
def _set_value(self, system, new_value):
if new_value:
# Cannot set contains = True, only False
raise NotImplementedError(f"{self.__class__.__name__} does not support set_value(system, True)")
else:
# Remove all particles from inside the volume
system.remove_particles(idxs=self.obj.states[ContainedParticles].get_value(system).in_volume.nonzero()[0])
return True
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(ContainedParticles)
return deps
| 5,642 | Python | 45.254098 | 138 | 0.673166 |
StanfordVL/OmniGibson/omnigibson/object_states/contact_bodies.py | import omnigibson as og
from omnigibson.object_states.object_state_base import AbsoluteObjectState
from omnigibson.utils.sim_utils import prims_to_rigid_prim_set
class ContactBodies(AbsoluteObjectState):
def _get_value(self, ignore_objs=None):
# Compute bodies in contact, minus the self-owned bodies
bodies = set()
for contact in self.obj.contact_list():
bodies.update({contact.body0, contact.body1})
bodies -= set(self.obj.link_prim_paths)
rigid_prims = set()
for body in bodies:
tokens = body.split("/")
obj_prim_path = "/".join(tokens[:-1])
link_name = tokens[-1]
obj = og.sim.scene.object_registry("prim_path", obj_prim_path)
if obj is not None:
rigid_prims.add(obj.links[link_name])
# Ignore_objs should either be None or tuple (CANNOT be list because we need to hash these inputs)
assert ignore_objs is None or isinstance(ignore_objs, tuple), \
"ignore_objs must either be None or a tuple of objects to ignore!"
return rigid_prims if ignore_objs is None else rigid_prims - prims_to_rigid_prim_set(ignore_objs)
| 1,196 | Python | 45.03846 | 106 | 0.648829 |
StanfordVL/OmniGibson/omnigibson/object_states/aabb.py | from omnigibson.object_states.object_state_base import AbsoluteObjectState
class AABB(AbsoluteObjectState):
def _get_value(self):
return self.obj.aabb
# Nothing needs to be done to save/load AABB since it will happen due to pose caching.
| 257 | Python | 27.666664 | 90 | 0.750973 |
StanfordVL/OmniGibson/omnigibson/object_states/saturated.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import RelativeObjectState, BooleanStateMixin
from omnigibson.systems.system_base import UUID_TO_SYSTEMS, REGISTERED_SYSTEMS
from omnigibson.utils.python_utils import get_uuid
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Default saturation limit
m.DEFAULT_SATURATION_LIMIT = 1e6
class ModifiedParticles(RelativeObjectState):
"""
Object state tracking number of modified particles for a given object
"""
def __init__(self, obj):
# Run super first
super().__init__(obj=obj)
# Set internal values
self.particle_counts = None
def _initialize(self):
super()._initialize()
# Set internal variables
self.particle_counts = dict()
def _get_value(self, system):
# If system isn't stored, return 0, otherwise, return the actual value
return self.particle_counts.get(system, 0)
def _set_value(self, system, new_value):
assert new_value >= 0, "Cannot set ModifiedParticles value to be less than 0!"
# Remove the value from the dictionary if we're setting it to zero (save memory)
if new_value == 0 and system in self.particle_counts:
self.particle_counts.pop(system)
else:
self.particle_counts[system] = new_value
def _sync_systems(self, systems):
"""
Helper function for forcing internal systems to be synchronized with external list of @systems.
NOTE: This may override internal state
Args:
systems (list of BaseSystem): List of system(s) that should be actively tracked internally
"""
self.particle_counts = {system: -1 for system in systems}
@property
def state_size(self):
# Two entries per system (name + count) + number of systems
return len(self.particle_counts) * 2 + 1
def _dump_state(self):
state = dict(n_systems=len(self.particle_counts))
for system, val in self.particle_counts.items():
state[system.name] = val
return state
def _load_state(self, state):
self.particle_counts = {REGISTERED_SYSTEMS[system_name]: val for system_name, val in state.items() if system_name != "n_systems" and val > 0}
def _serialize(self, state):
state_flat = np.array([state["n_systems"]], dtype=float)
if state["n_systems"] > 0:
system_names = tuple(state.keys())[1:]
state_flat = np.concatenate(
[state_flat,
np.concatenate([(get_uuid(system_name), state[system_name]) for system_name in system_names])]
).astype(float)
return state_flat
def _deserialize(self, state):
n_systems = int(state[0])
state_shaped = state[1:1 + n_systems * 2].reshape(-1, 2)
state_dict = dict(n_systems=n_systems)
systems = []
for uuid, val in state_shaped:
system = UUID_TO_SYSTEMS[int(uuid)]
state_dict[system.name] = int(val)
systems.append(system)
# Sync systems so that state size sanity check succeeds
self._sync_systems(systems=systems)
return state_dict, n_systems * 2 + 1
class Saturated(RelativeObjectState, BooleanStateMixin):
def __init__(self, obj, default_limit=m.DEFAULT_SATURATION_LIMIT):
# Run super first
super().__init__(obj=obj)
# Limits
self._default_limit = default_limit
self._limits = None
def _initialize(self):
super()._initialize()
# Set internal variables
self._limits = dict()
@property
def limits(self):
"""
Returns:
dict: Maps system to limit count for that system, if it exists
"""
return self._limits
def get_limit(self, system):
"""
Grabs the internal particle limit for @system
Args:
system (BaseSystem): System to limit
Returns:
init: Number of particles representing limit for the given @system
"""
return self._limits.get(system, self._default_limit)
def set_limit(self, system, limit):
"""
Sets internal particle limit @limit for @system
Args:
system (BaseSystem): System to limit
limit (int): Number of particles representing limit for the given @system
"""
self._limits[system] = limit
def _get_value(self, system):
limit = self.get_limit(system=system)
# If requested, run sanity check to make sure we're not over the limit with this system's particles
count = self.obj.states[ModifiedParticles].get_value(system)
assert count <= limit, f"{self.__class__.__name__} should not be over the limit! Max: {limit}, got: {count}"
return count == limit
def _set_value(self, system, new_value):
# Only set the value if it's different than what currently exists
if new_value != self.get_value(system):
self.obj.states[ModifiedParticles].set_value(system, self.get_limit(system=system) if new_value else 0)
return True
def get_texture_change_params(self):
colors = []
for system in self._limits.keys():
if self.get_value(system):
colors.append(system.color)
if len(colors) == 0:
# If no fluid system has Soaked=True, keep the default albedo value
albedo_add = 0.0
diffuse_tint = [1.0, 1.0, 1.0]
else:
albedo_add = 0.1
avg_color = np.mean(colors, axis=0)
# Add a tint of avg_color
# We want diffuse_tint to sum to 2.5 to result in the final RGB to sum to 1.5 on average
# This is because an average RGB color sum to 1.5 (i.e. [0.5, 0.5, 0.5])
# (0.5 [original avg RGB per channel] + 0.1 [albedo_add]) * 2.5 = 1.5
diffuse_tint = np.array([0.5, 0.5, 0.5]) + avg_color / np.sum(avg_color)
diffuse_tint = diffuse_tint.tolist()
return albedo_add, diffuse_tint
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(ModifiedParticles)
return deps
def _sync_systems(self, systems):
"""
Helper function for forcing internal systems to be synchronized with external list of @systems.
NOTE: This may override internal state
Args:
systems (list of BaseSystem): List of system(s) that should be actively tracked internally
"""
self._limits = {system: m.DEFAULT_SATURATION_LIMIT for system in systems}
@property
def state_size(self):
# Limit per entry * 2 (UUID, value) + default limit + n limits
return len(self._limits) * 2 + 2
def _dump_state(self):
state = dict(n_systems=len(self._limits), default_limit=self._default_limit)
for system, limit in self._limits.items():
state[system.name] = limit
return state
def _load_state(self, state):
self._limits = dict()
for k, v in state.items():
if k == "n_systems":
continue
elif k == "default_limit":
self._default_limit = v
# TODO: Make this an else once fresh round of sampling occurs (i.e.: no more outdated systems stored)
elif k in REGISTERED_SYSTEMS:
self._limits[REGISTERED_SYSTEMS[k]] = v
def _serialize(self, state):
state_flat = np.array([state["n_systems"], state["default_limit"]], dtype=float)
if state["n_systems"] > 0:
system_names = tuple(state.keys())[2:]
state_flat = np.concatenate(
[state_flat,
np.concatenate([(get_uuid(system_name), state[system_name]) for system_name in system_names])]
).astype(float)
return state_flat
def _deserialize(self, state):
n_systems = int(state[0])
state_dict = dict(n_systems=n_systems, default_limit=int(state[1]))
state_shaped = state[2:2 + n_systems * 2].reshape(-1, 2)
systems = []
for uuid, val in state_shaped:
system = UUID_TO_SYSTEMS[int(uuid)]
state_dict[system.name] = int(val)
systems.append(system)
# Sync systems so that state size sanity check succeeds
self._sync_systems(systems=systems)
return state_dict, 2 + n_systems * 2
| 8,582 | Python | 34.614108 | 149 | 0.60289 |
StanfordVL/OmniGibson/omnigibson/object_states/link_based_state_mixin.py | import numpy as np
from omnigibson.object_states.object_state_base import BaseObjectState
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.python_utils import classproperty
from omnigibson.prims.cloth_prim import ClothPrim
# Create module logger
log = create_module_logger(module_name=__name__)
class LinkBasedStateMixin(BaseObjectState):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._links = dict()
@classmethod
def is_compatible(cls, obj, **kwargs):
# Run super first
compatible, reason = super().is_compatible(obj, **kwargs)
if not compatible:
return compatible, reason
# Check whether this state requires metalink
if not cls.requires_metalink(**kwargs):
return True, None
metalink_prefix = cls.metalink_prefix
for link in obj.links.values():
if metalink_prefix in link.name:
return True, None
return False, f"LinkBasedStateMixin {cls.__name__} requires metalink with prefix {cls.metalink_prefix} " \
f"for obj {obj.name} but none was found! To get valid compatible object models, please use " \
f"omnigibson.utils.asset_utils.get_all_object_category_models_with_abilities(...)"
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Run super first
compatible, reason = super().is_compatible_asset(prim, **kwargs)
if not compatible:
return compatible, reason
# Check whether this state requires metalink
if not cls.requires_metalink(**kwargs):
return True, None
metalink_prefix = cls.metalink_prefix
for child in prim.GetChildren():
if child.GetTypeName() == "Xform":
if metalink_prefix in child.GetName():
return True, None
return False, f"LinkBasedStateMixin {cls.__name__} requires metalink with prefix {cls.metalink_prefix} " \
f"for asset prim {prim.GetName()} but none was found! To get valid compatible object models, " \
f"please use omnigibson.utils.asset_utils.get_all_object_category_models_with_abilities(...)"
@classproperty
def metalink_prefix(cls):
"""
Returns:
str: Unique keyword that defines the metalink associated with this object state
"""
NotImplementedError()
@classmethod
def requires_metalink(cls, **kwargs):
"""
Returns:
Whether an object state instantiated with constructor arguments **kwargs will require a metalink
or not
"""
# True by default
return True
@property
def link(self):
"""
Returns:
None or RigidPrim: The link associated with this link-based state, if it exists
"""
assert self.links, f"LinkBasedStateMixin link not found for {self.obj.name}"
return next(iter(self.links.values()))
@property
def links(self):
"""
Returns:
dict: mapping from link names to links that match the metalink_prefix
"""
return self._links
@property
def _default_link(self):
"""
Returns:
None or RigidPrim: If supported, the fallback link associated with this link-based state if
no valid metalink is found
"""
# No default link by default
return None
def initialize_link_mixin(self):
assert not self._initialized
# TODO: Extend logic to account for multiple instances of the same metalink? e.g: _0, _1, ... suffixes
for name, link in self.obj.links.items():
if self.metalink_prefix in name or (self._default_link is not None and link.name == self._default_link.name):
self._links[name] = link
# Make sure the scale is similar if the link is not a cloth prim
if not isinstance(link, ClothPrim):
assert np.allclose(link.scale, self.obj.scale), \
f"the meta link {name} has a inconsistent scale with the object {self.obj.name}"
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("LinkBasedStateMixin")
return classes
| 4,501 | Python | 36.831932 | 121 | 0.614752 |
StanfordVL/OmniGibson/omnigibson/object_states/pose.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import AbsoluteObjectState
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.POSITIONAL_VALIDATION_EPSILON = 1e-10
m.ORIENTATION_VALIDATION_EPSILON = 0.003 # ~5 degrees error tolerance
class Pose(AbsoluteObjectState):
def _get_value(self):
pos = self.obj.get_position()
orn = self.obj.get_orientation()
return np.array(pos), np.array(orn)
def _has_changed(self, get_value_args, value, info):
# Only changed if the squared distance between old position and current position has
# changed above some threshold
old_pos, old_quat = value
# Get current pose
current_pos, current_quat = self.get_value()
# Check position and orientation -- either changing means we've changed poses
dist_squared = np.sum(np.square(current_pos - old_pos))
if dist_squared > m.POSITIONAL_VALIDATION_EPSILON:
return True
# Calculate quat distance simply as the dot product
# A * B = |A||B|cos(theta)
quat_cos_angle = np.abs(np.dot(old_quat, current_quat))
if (1 - quat_cos_angle) > m.ORIENTATION_VALIDATION_EPSILON:
return True
return False
| 1,341 | Python | 35.270269 | 92 | 0.668158 |
StanfordVL/OmniGibson/omnigibson/object_states/covered.py | from omnigibson.macros import create_module_macros
from omnigibson.object_states import AABB
from omnigibson.object_states.object_state_base import RelativeObjectState, BooleanStateMixin
from omnigibson.object_states.contact_particles import ContactParticles
from omnigibson.systems.system_base import VisualParticleSystem, is_visual_particle_system, is_physical_particle_system
from omnigibson.utils.constants import PrimType
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Number of visual particles needed in order for Covered --> True
m.VISUAL_PARTICLE_THRESHOLD = 1
# Maximum number of visual particles to sample when setting an object to be covered = True
m.MAX_VISUAL_PARTICLES = 20
# Number of physical particles needed in order for Covered --> True
m.PHYSICAL_PARTICLE_THRESHOLD = 1
# Maximum number of physical particles to sample when setting an object to be covered = True
m.MAX_PHYSICAL_PARTICLES = 5000
class Covered(RelativeObjectState, BooleanStateMixin):
def __init__(self, obj):
# Run super first
super().__init__(obj)
# Set internal values
self._visual_particle_group = None
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.update({AABB, ContactParticles})
return deps
def remove(self):
if self._initialized:
self._clear_attachment_groups()
def _clear_attachment_groups(self):
"""
Utility function to destroy all corresponding attachment groups for this object
"""
for system in VisualParticleSystem.get_active_systems().values():
if self._visual_particle_group in system.groups:
system.remove_attachment_group(self._visual_particle_group)
def _initialize(self):
super()._initialize()
# Grab group name
self._visual_particle_group = VisualParticleSystem.get_group_name(obj=self.obj)
def _get_value(self, system):
# Value is false by default
value = False
# First, we check what type of system
# Currently, we support VisualParticleSystems and PhysicalParticleSystems
if system.n_particles > 0:
if is_visual_particle_system(system_name=system.name):
if self._visual_particle_group in system.groups:
# check whether the current number of particles assigned to the group is greater than the threshold
value = system.num_group_particles(group=self._visual_particle_group) >= m.VISUAL_PARTICLE_THRESHOLD
elif is_physical_particle_system(system_name=system.name):
# Make sure we're not cloth -- not supported yet
assert self.obj.prim_type != PrimType.CLOTH, \
"Cloth objects currently cannot be Covered by physical particles!"
# We've already cached particle contacts, so we merely search through them to see if any particles are
# touching the object and are visible (the non-visible ones are considered already "removed")
n_near_particles = len(self.obj.states[ContactParticles].get_value(system))
# Heuristic: If the number of near particles is above the threshold, we consdier this covered
value = n_near_particles >= m.PHYSICAL_PARTICLE_THRESHOLD
else:
raise ValueError(f"Invalid system {system} received for getting Covered state!"
f"Currently, only VisualParticleSystems and PhysicalParticleSystems are supported.")
return value
def _set_value(self, system, new_value):
# Default success value is True
success = True
# First, we check what type of system
# Currently, we support VisualParticleSystems and PhysicalParticleSystems
if is_visual_particle_system(system_name=system.name):
# Create the group if it doesn't exist already
if self._visual_particle_group not in system.groups:
system.create_attachment_group(obj=self.obj)
# Check current state and only do something if we're changing state
if self.get_value(system) != new_value:
if new_value:
# Generate particles
success = system.generate_group_particles_on_object(
group=self._visual_particle_group,
max_samples=m.MAX_VISUAL_PARTICLES,
min_samples_for_success=m.VISUAL_PARTICLE_THRESHOLD,
)
else:
# We remove all of this group's particles
system.remove_all_group_particles(group=self._visual_particle_group)
elif is_physical_particle_system(system_name=system.name):
# Make sure we're not cloth -- not supported yet
assert self.obj.prim_type != PrimType.CLOTH, \
"Cloth objects currently cannot be Covered by physical particles!"
# Check current state and only do something if we're changing state
if self.get_value(system) != new_value:
if new_value:
# Sample particles on top of the object
success = system.generate_particles_on_object(
obj=self.obj,
max_samples=m.MAX_PHYSICAL_PARTICLES,
min_samples_for_success=m.PHYSICAL_PARTICLE_THRESHOLD,
)
else:
# We remove all particles touching this object
system.remove_particles(idxs=list(self.obj.states[ContactParticles].get_value(system)))
else:
raise ValueError(f"Invalid system {system} received for setting Covered state!"
f"Currently, only VisualParticleSystems and PhysicalParticleSystems are supported.")
return success
| 5,994 | Python | 46.579365 | 120 | 0.639139 |
StanfordVL/OmniGibson/omnigibson/object_states/max_temperature.py | from omnigibson.object_states.temperature import Temperature
from omnigibson.object_states.tensorized_value_state import TensorizedValueState
import numpy as np
from omnigibson.utils.python_utils import classproperty
class MaxTemperature(TensorizedValueState):
"""
This state remembers the highest temperature reached by an object.
"""
# np.ndarray: Array of Temperature.VALUE indices that correspond to the internally tracked MaxTemperature objects
TEMPERATURE_IDXS = None
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(Temperature)
return deps
@classmethod
def global_initialize(cls):
# Call super first
super().global_initialize()
# Initialize other global variables
cls.TEMPERATURE_IDXS = np.array([], dtype=int)
# Add global callback to Temperature state so that temperature idxs will be updated
def _update_temperature_idxs(obj):
# Decrement all remaining temperature idxs -- they're strictly increasing so we can simply
# subtract 1 from all downstream indices
deleted_idx = Temperature.OBJ_IDXS[obj.name]
cls.TEMPERATURE_IDXS = np.where(cls.TEMPERATURE_IDXS >= deleted_idx, cls.TEMPERATURE_IDXS - 1, cls.TEMPERATURE_IDXS)
Temperature.add_callback_on_remove(name="MaxTemperature_temperature_idx_update", callback=_update_temperature_idxs)
@classmethod
def global_clear(cls):
# Call super first
super().global_clear()
# Clear other internal state
cls.TEMPERATURE_IDXS = None
@classmethod
def _add_obj(cls, obj):
# Call super first
super()._add_obj(obj=obj)
# Add to temperature index
cls.TEMPERATURE_IDXS = np.concatenate([cls.TEMPERATURE_IDXS, [Temperature.OBJ_IDXS[obj.name]]])
@classmethod
def _remove_obj(cls, obj):
# Grab idx we'll delete before the object is deleted
deleted_idx = cls.OBJ_IDXS[obj.name]
# Remove from temperature index
cls.TEMPERATURE_IDXS = np.delete(cls.TEMPERATURE_IDXS, [deleted_idx])
# Decrement all remaining temperature idxs -- they're strictly increasing so we can simply
# subtract 1 from all downstream indices
if deleted_idx < len(cls.TEMPERATURE_IDXS):
cls.TEMPERATURE_IDXS[deleted_idx:] -= 1
# Call super
super()._remove_obj(obj=obj)
@classmethod
def _update_values(cls, values):
# Value is max between stored values and current temperature values
return np.maximum(values, Temperature.VALUES[cls.TEMPERATURE_IDXS])
@classproperty
def value_name(cls):
return "max_temperature"
def __init__(self, obj):
super(MaxTemperature, self).__init__(obj)
# Set value to be default
self._set_value(-np.inf)
| 2,897 | Python | 33.5 | 128 | 0.670694 |
StanfordVL/OmniGibson/omnigibson/object_states/next_to.py | import numpy as np
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.adjacency import HorizontalAdjacency, flatten_planes
from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
class NextTo(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(HorizontalAdjacency)
return deps
def _get_value(self, other):
objA_states = self.obj.states
objB_states = other.states
assert AABB in objA_states
assert AABB in objB_states
objA_aabb = objA_states[AABB].get_value()
objB_aabb = objB_states[AABB].get_value()
objA_lower, objA_upper = objA_aabb
objB_lower, objB_upper = objB_aabb
distance_vec = []
for dim in range(3):
glb = max(objA_lower[dim], objB_lower[dim])
lub = min(objA_upper[dim], objB_upper[dim])
distance_vec.append(max(0, glb - lub))
distance = np.linalg.norm(np.array(distance_vec))
objA_dims = objA_upper - objA_lower
objB_dims = objB_upper - objB_lower
avg_aabb_length = np.mean(objA_dims + objB_dims)
# If the distance is longer than acceptable, return False.
if distance > avg_aabb_length * (1.0 / 6.0):
return False
# Otherwise, check if the other object shows up in the adjacency list.
adjacency_this = self.obj.states[HorizontalAdjacency].get_value()
in_any_horizontal_adjacency_of_this = any(
(
other in adjacency_list.positive_neighbors or
other in adjacency_list.negative_neighbors
)
for adjacency_list in flatten_planes(adjacency_this)
)
if in_any_horizontal_adjacency_of_this:
return True
# If not, check in the adjacency lists of `other`. Maybe it's shorter than us etc.
adjacency_other = other.states[HorizontalAdjacency].get_value()
in_any_horizontal_adjacency_of_other = any(
(
self.obj in adjacency_list.positive_neighbors or
self.obj in adjacency_list.negative_neighbors
)
for adjacency_list in flatten_planes(adjacency_other)
)
return in_any_horizontal_adjacency_of_other
| 2,470 | Python | 36.439393 | 93 | 0.639271 |
StanfordVL/OmniGibson/omnigibson/object_states/folded.py | import numpy as np
from collections import namedtuple
from scipy.spatial import ConvexHull, distance_matrix, QhullError
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import BooleanStateMixin, AbsoluteObjectState
from omnigibson.object_states.cloth_mixin import ClothStateMixin
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Criterion #1: the threshold on the area ratio of the convex hull of the projection on the XY plane
m.FOLDED_AREA_THRESHOLD = 0.75
m.UNFOLDED_AREA_THRESHOLD = 0.9
# Criterion #2: the threshold on the diagonal ratio of the convex hull of the projection on the XY plane
m.FOLDED_DIAGONAL_THRESHOLD = 0.85
m.UNFOLDED_DIAGONAL_THRESHOLD = 0.9
# Criterion #3: the percentage of face normals that need to be close to the z-axis
m.NORMAL_Z_PERCENTAGE = 0.5
# Whether to visualize the convex hull of the projection on the XY plane
m.DEBUG_CLOTH_PROJ_VIS = False
# Angle threshold for checking smoothness of the cloth; surface normals need to be close enough to the z-axis
m.NORMAL_Z_ANGLE_DIFF = np.deg2rad(45.0)
"""
FoldedLevelData contains the following fields:
smoothness (float): percentage of surface normals that are sufficiently close to the z-axis
area (float): the area of the convex hull of the projected points compared to the initial unfolded state
diagonal (float): the diagonal of the convex hull of the projected points compared to the initial unfolded state
"""
FoldedLevelData = namedtuple("FoldedLevelData", ("smoothness", "area", "diagonal"))
class FoldedLevel(AbsoluteObjectState, ClothStateMixin):
"""
State representing the object's folded level.
Value is a FoldedLevelData object.
"""
def _initialize(self):
super()._initialize()
# Assume the initial state is unfolded
self.area_unfolded, self.diagonal_unfolded = self.calculate_projection_area_and_diagonal_maximum()
def _get_value(self):
smoothness = self.calculate_smoothness()
area, diagonal = self.calculate_projection_area_and_diagonal([0, 1])
return FoldedLevelData(smoothness, area / self.area_unfolded, diagonal / self.diagonal_unfolded)
def calculate_smoothness(self):
"""
Calculate the percantage of surface normals that are sufficiently close to the z-axis.
"""
cloth = self.obj.root_link
normals = cloth.compute_face_normals(face_ids=cloth.keyface_idx)
# projection onto the z-axis
proj = np.abs(np.dot(normals, np.array([0.0, 0.0, 1.0])))
percentage = np.mean(proj > np.cos(m.NORMAL_Z_ANGLE_DIFF))
return percentage
def calculate_projection_area_and_diagonal_maximum(self):
"""
Calculate the maximum projection area and the diagonal length along different axes
Returns:
area_max (float): area of the convex hull of the projected points
diagonal_max (float): diagonal of the convex hull of the projected points
"""
# use the largest projection area as the unfolded area
area_max = 0.0
diagonal_max = 0.0
dims_list = [[0, 1], [0, 2], [1, 2]] # x-y plane, x-z plane, y-z plane
for dims in dims_list:
area, diagonal = self.calculate_projection_area_and_diagonal(dims)
if area > area_max:
area_max = area
diagonal_max = diagonal
return area_max, diagonal_max
def calculate_projection_area_and_diagonal(self, dims):
"""
Calculate the projection area and the diagonal length when projecting to the plane defined by the input dims
E.g. if dims is [0, 1], the points will be projected onto the x-y plane.
Args:
dims (2-array): Global axes to project area onto. Options are {0, 1, 2}.
E.g. if dims is [0, 1], project onto the x-y plane.
Returns:
area (float): area of the convex hull of the projected points
diagonal (float): diagonal of the convex hull of the projected points
"""
cloth = self.obj.root_link
points = cloth.keypoint_particle_positions[:, dims]
try:
hull = ConvexHull(points)
# The points may be 2D-degenerate, so catch the error and return 0 if so
except QhullError:
# This is a degenerate hull, so return 0 area and diagonal
return 0.0, 0.0
# When input points are 2-dimensional, this is the area of the convex hull.
# Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html
area = hull.volume
diagonal = distance_matrix(points[hull.vertices], points[hull.vertices]).max()
if m.DEBUG_CLOTH_PROJ_VIS:
import matplotlib.pyplot as plt
ax = plt.gca()
ax.set_aspect('equal')
plt.plot(points[:, dims[0]], points[:, dims[1]], 'o')
for simplex in hull.simplices:
plt.plot(points[simplex, dims[0]], points[simplex, dims[1]], 'k-')
plt.plot(points[hull.vertices, dims[0]], points[hull.vertices, dims[1]], 'r--', lw=2)
plt.plot(points[hull.vertices[0], dims[0]], points[hull.vertices[0], dims[1]], 'ro')
plt.show()
return area, diagonal
class Folded(AbsoluteObjectState, BooleanStateMixin, ClothStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(FoldedLevel)
return deps
def _get_value(self):
# Check the smoothness of the cloth
folded_level = self.obj.states[FoldedLevel].get_value()
return folded_level.smoothness >= m.NORMAL_Z_PERCENTAGE and \
folded_level.area < m.FOLDED_AREA_THRESHOLD and \
folded_level.diagonal < m.FOLDED_DIAGONAL_THRESHOLD
def _set_value(self, new_value):
if not new_value:
raise NotImplementedError("Folded does not support set_value(False)")
# TODO (eric): add this support
raise NotImplementedError("Folded does not support set_value(True)")
# We don't need to dump / load anything since the cloth objects should handle it themselves
class Unfolded(AbsoluteObjectState, BooleanStateMixin, ClothStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(FoldedLevel)
return deps
def _get_value(self):
# Check the smoothness of the cloth
folded_level = self.obj.states[FoldedLevel].get_value()
return folded_level.smoothness >= m.NORMAL_Z_PERCENTAGE and \
folded_level.area >= m.UNFOLDED_AREA_THRESHOLD and \
folded_level.diagonal >= m.UNFOLDED_DIAGONAL_THRESHOLD
def _set_value(self, new_value):
if not new_value:
raise NotImplementedError("Unfolded does not support set_value(False)")
self.obj.root_link.reset()
return True
# We don't need to dump / load anything since the cloth objects should handle it themselves
| 7,106 | Python | 39.380682 | 116 | 0.663805 |
StanfordVL/OmniGibson/omnigibson/object_states/toggle.py | import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros
from omnigibson.prims.geom_prim import VisualGeomPrim
from omnigibson.object_states.link_based_state_mixin import LinkBasedStateMixin
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin
from omnigibson.object_states.update_state_mixin import UpdateStateMixin, GlobalUpdateStateMixin
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.usd_utils import create_primitive_mesh, RigidContactAPI
from omnigibson.utils.constants import PrimType
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.TOGGLE_LINK_PREFIX = "togglebutton"
m.DEFAULT_SCALE = 0.1
m.CAN_TOGGLE_STEPS = 5
class ToggledOn(AbsoluteObjectState, BooleanStateMixin, LinkBasedStateMixin, UpdateStateMixin, GlobalUpdateStateMixin):
# Set of prim paths defining robot finger links belonging to any manipulation robots
_robot_finger_paths = None
# Set of objects that are contacting any manipulation robots
_finger_contact_objs = None
def __init__(self, obj, scale=None):
self.scale = scale
self.value = False
self.robot_can_toggle_steps = 0
self.visual_marker = None
# We also generate the function for checking overlaps at runtime
self._check_overlap = None
super().__init__(obj)
@classmethod
def global_update(cls):
# Avoid circular imports
from omnigibson.robots.manipulation_robot import ManipulationRobot
# Clear finger contact objects since it will be refreshed now
cls._finger_contact_objs = set()
# detect marker and hand interaction
robot_finger_links = set(link
for robot in og.sim.scene.robots if isinstance(robot, ManipulationRobot)
for finger_links in robot.finger_links.values()
for link in finger_links)
cls._robot_finger_paths = set(link.prim_path for link in robot_finger_links)
# If there aren't any valid robot link paths, immediately return
if len(cls._robot_finger_paths) == 0:
return
finger_idxs = [RigidContactAPI.get_body_col_idx(prim_path) for prim_path in cls._robot_finger_paths]
finger_impulses = RigidContactAPI.get_all_impulses()[:, finger_idxs, :]
n_bodies = len(finger_impulses)
touching_bodies = np.any(finger_impulses.reshape(n_bodies, -1), axis=-1)
touching_bodies_idxs = np.where(touching_bodies)[0]
if len(touching_bodies_idxs) > 0:
for idx in touching_bodies_idxs:
body_prim_path = RigidContactAPI.get_row_idx_prim_path(idx=idx)
obj = og.sim.scene.object_registry("prim_path", "/".join(body_prim_path.split("/")[:-1]))
if obj is not None:
cls._finger_contact_objs.add(obj)
@classproperty
def metalink_prefix(cls):
return m.TOGGLE_LINK_PREFIX
def _get_value(self):
return self.value
def _set_value(self, new_value):
self.value = new_value
# Choose which color to apply to the toggle marker
self.visual_marker.color = np.array([0, 1.0, 0]) if self.value else np.array([1.0, 0, 0])
return True
def _initialize(self):
super()._initialize()
self.initialize_link_mixin()
# Make sure this object is not cloth
assert self.obj.prim_type != PrimType.CLOTH, f"Cannot create ToggledOn state for cloth object {self.obj.name}!"
mesh_prim_path = f"{self.link.prim_path}/mesh_0"
pre_existing_mesh = lazy.omni.isaac.core.utils.prims.get_prim_at_path(mesh_prim_path)
# Create a primitive mesh if it doesn't already exist
if not pre_existing_mesh:
self.scale = m.DEFAULT_SCALE if self.scale is None else self.scale
# Note: We have to create a mesh (instead of a sphere shape) because physx complains about non-uniform
# scaling for non-meshes
mesh = create_primitive_mesh(
prim_path=mesh_prim_path,
primitive_type="Sphere",
extents=1.0,
)
else:
# Infer radius from mesh if not specified as an input
lazy.omni.isaac.core.utils.bounds.recompute_extents(prim=pre_existing_mesh)
self.scale = np.array(pre_existing_mesh.GetAttribute("xformOp:scale").Get())
# Create the visual geom instance referencing the generated mesh prim
self.visual_marker = VisualGeomPrim(prim_path=mesh_prim_path, name=f"{self.obj.name}_visual_marker")
self.visual_marker.scale = self.scale
self.visual_marker.initialize()
self.visual_marker.visible = True
# Store the projection mesh's IDs
projection_mesh_ids = lazy.pxr.PhysicsSchemaTools.encodeSdfPath(self.visual_marker.prim_path)
# Define function for checking overlap
valid_hit = False
def overlap_callback(hit):
nonlocal valid_hit
valid_hit = hit.rigid_body in self._robot_finger_paths
# Continue traversal only if we don't have a valid hit yet
return not valid_hit
# Set this value to be False by default
self._set_value(False)
def check_overlap():
nonlocal valid_hit
valid_hit = False
if self.visual_marker.prim.GetTypeName() == "Mesh":
og.sim.psqi.overlap_mesh(*projection_mesh_ids, reportFn=overlap_callback)
else:
og.sim.psqi.overlap_shape(*projection_mesh_ids, reportFn=overlap_callback)
return valid_hit
self._check_overlap = check_overlap
def _update(self):
# If we're not nearby any fingers, we automatically can't toggle
if self.obj not in self._finger_contact_objs:
robot_can_toggle = False
else:
# Check to make sure fingers are actually overlapping the toggle button mesh
robot_can_toggle = self._check_overlap()
if robot_can_toggle:
self.robot_can_toggle_steps += 1
else:
self.robot_can_toggle_steps = 0
if self.robot_can_toggle_steps == m.CAN_TOGGLE_STEPS:
self.set_value(not self.value)
@staticmethod
def get_texture_change_params():
# By default, it keeps the original albedo unchanged.
albedo_add = 0.0
diffuse_tint = (1.0, 1.0, 1.0)
return albedo_add, diffuse_tint
@property
def state_size(self):
return 2
# For this state, we simply store its value and the robot_can_toggle steps.
def _dump_state(self):
return dict(value=self.value, hand_in_marker_steps=self.robot_can_toggle_steps)
def _load_state(self, state):
# Nothing special to do here when initialized vs. uninitialized
self._set_value(state["value"])
self.robot_can_toggle_steps = state["hand_in_marker_steps"]
def _serialize(self, state):
return np.array([state["value"], state["hand_in_marker_steps"]], dtype=float)
def _deserialize(self, state):
return dict(value=bool(state[0]), hand_in_marker_steps=int(state[1])), 2
| 7,349 | Python | 38.72973 | 119 | 0.647027 |
StanfordVL/OmniGibson/omnigibson/object_states/joint_break_subscribed_state_mixin.py | from omnigibson.object_states.object_state_base import BaseObjectState
from abc import abstractmethod
from omnigibson.utils.python_utils import classproperty
class JointBreakSubscribedStateMixin(BaseObjectState):
"""
Handles JOINT_BREAK event.
The subclass should implement its own on_joint_break method
"""
@abstractmethod
def on_joint_break(self, joint_prim_path):
raise NotImplementedError("Subclasses of JointBreakSubscribedStateMixin should implement the on_joint_break method.")
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("JointBreakSubscribedStateMixin")
return classes
| 775 | Python | 34.272726 | 125 | 0.747097 |
StanfordVL/OmniGibson/omnigibson/object_states/particle_modifier.py | from abc import abstractmethod
from collections import defaultdict
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, macros, gm
from omnigibson.prims.geom_prim import VisualGeomPrim
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.object_states.contact_particles import ContactParticles
from omnigibson.object_states.covered import Covered
from omnigibson.object_states.link_based_state_mixin import LinkBasedStateMixin
from omnigibson.object_states.object_state_base import IntrinsicObjectState
from omnigibson.object_states.saturated import ModifiedParticles, Saturated
from omnigibson.object_states.toggle import ToggledOn
from omnigibson.object_states.update_state_mixin import UpdateStateMixin
from omnigibson.prims.prim_base import BasePrim
from omnigibson.systems.system_base import BaseSystem, VisualParticleSystem, PhysicalParticleSystem, get_system, \
is_visual_particle_system, is_physical_particle_system, is_fluid_system, is_system_active, REGISTERED_SYSTEMS
from omnigibson.utils.constants import ParticleModifyMethod, ParticleModifyCondition, PrimType
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function, \
get_particle_positions_in_frame, get_particle_positions_from_frame
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.ui_utils import suppress_omni_log
from omnigibson.utils.usd_utils import create_primitive_mesh, FlatcacheAPI
import omnigibson.utils.transform_utils as T
from omnigibson.utils.sampling_utils import sample_cuboid_on_object
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.APPLICATION_LINK_PREFIX = "particleapplier"
m.REMOVAL_LINK_PREFIX = "particleremover"
# How many samples within the application area to generate per update step
m.MAX_VISUAL_PARTICLES_APPLIED_PER_STEP = 2
m.MAX_PHYSICAL_PARTICLES_APPLIED_PER_STEP = 10
# How many steps between generating particle samples
m.N_STEPS_PER_APPLICATION = 5
m.N_STEPS_PER_REMOVAL = 1
# Application thresholds -- maximum number of particles that can be applied by a ParticleApplier
m.VISUAL_PARTICLES_APPLICATION_LIMIT = 1000000
m.PHYSICAL_PARTICLES_APPLICATION_LIMIT = 1000000
# Saturation thresholds -- maximum number of particles that can be removed ("absorbed") by a ParticleRemover
m.VISUAL_PARTICLES_REMOVAL_LIMIT = 40
m.PHYSICAL_PARTICLES_REMOVAL_LIMIT = 400
# Fallback particle visualization radius for visualizing projected visual particles
m.VISUAL_PARTICLE_PROJECTION_PARTICLE_RADIUS = 0.01
# The margin (> 0) to add to the remover area's AABB when detecting overlaps with other objects
m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN = 0.05
# Settings for determining how the projection particles are visualized as they're projected
m.PROJECTION_VISUALIZATION_CONE_TIP_RADIUS = 0.001
m.PROJECTION_VISUALIZATION_RATE = 200
m.PROJECTION_VISUALIZATION_SPEED = 2.0
m.PROJECTION_VISUALIZATION_ORIENTATION_BIAS = 1e6
m.PROJECTION_VISUALIZATION_SPREAD_FACTOR = 0.8
def create_projection_visualization(
prim_path,
shape,
projection_name,
projection_radius,
projection_height,
particle_radius,
parent_scale,
material=None,
):
"""
Helper function to generate a projection visualization using Omniverse's particle visualization system
Args:
prim_path (str): Stage location for where to generate the projection visualization
shape (str): Shape of the projection to generate. Valid options are: {Sphere, Cone}
projection_name (str): Name associated with this projection visualization. Should be unique!
projection_radius (float): Radius of the generated projection visualization overall volume
(specified in local frame)
projection_height (float): Height of the generated projection visualization overall volume
(specified in local frame)
particle_radius (float): Radius of the particles composing the projection visualization
parent_scale (3-array): If specified, specifies the (x,y,z) scale of the parent Xform prim of the
generated source sphere prim at @prim_path. This will be used to scale the visualization accordingly
material (None or MaterialPrim): If specified, specifies the material to associate with the generated
particles within the projection visualization
Returns:
2-tuple:
- UsdPrim: Generated ParticleSystem (ComputeGraph) prim generated
- UsdPrim: Generated Emitter (ComputeGraph) prim generated
"""
# Create the desired shape which will be used as the source input prim into the generated projection visualization
source = lazy.pxr.UsdGeom.Sphere.Define(og.sim.stage, lazy.pxr.Sdf.Path(prim_path))
# Modify the radius according to the desired @shape (and also infer the desired spread values)
if shape == "Cylinder":
source_radius = projection_radius
spread = np.zeros(3)
elif shape == "Cone":
# Default to close to singular point otherwise
source_radius = m.PROJECTION_VISUALIZATION_CONE_TIP_RADIUS
spread_ratio = projection_radius * 2.0 / projection_height
spread = np.ones(3) * spread_ratio * m.PROJECTION_VISUALIZATION_SPREAD_FACTOR
else:
raise ValueError(f"Invalid shape specified for projection visualization! Valid options are: [Cone, Cylinder], got: {shape}")
# Set the radius
source.GetRadiusAttr().Set(source_radius)
# Also make the prim invisible
lazy.pxr.UsdGeom.Imageable(source.GetPrim()).MakeInvisible()
# Generate the ComputeGraph nodes to render the projection
# Import now to avoid too-eager load of Omni classes due to inheritance
from omnigibson.utils.deprecated_utils import Core
core = Core(lambda val: None, particle_system_name=projection_name)
# Scale radius and height by the parent scale -- projection always points in the negative-z direction of the
# parent frame
# We do this AFTER we create the source sphere because the actual projection is scaled in the world frame, whereas
# the source sphere is already scaled by its own parent frame
# NOTE: The generated projection visualization will NOT match the underlying projection mesh if the parent link is
# scaled non-uniformly!!
projection_radius *= np.mean(parent_scale[:2])
projection_height *= parent_scale[2]
# Suppress omni warnings here -- we don't have control over this API, but omni likes to complain about this
with suppress_omni_log(channels=["omni.graph.core.plugin", "omni.usd", "rtx.neuraylib.plugin"]):
system_path, _, emitter_path, vis_path, instancer_path, sprite_path, mat_path, output_path = \
core.create_particle_system(display="point_instancer", paths=[prim_path])
# Override the prototype with our own sphere with optional material
prototype_path = "/".join(sprite_path.split("/")[:-1]) + "/prototype"
create_primitive_mesh(prototype_path, primitive_type="Sphere")
prototype = VisualGeomPrim(prim_path=prototype_path, name=f"{projection_name}_prototype")
prototype.initialize()
# Set the scale (native scaling --> radius 0.5) and possibly update the material
prototype.scale = particle_radius * 2.0
if material is not None:
prototype.material = material
# Override the prototype used by the instancer
instancer_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(instancer_path)
instancer_prim.GetProperty("inputs:prototypes").SetTargets([prototype_path])
# Destroy the old mat path since we don't use the sprites
lazy.omni.isaac.core.utils.prims.delete_prim(mat_path)
# Modify the settings of the emitter to match the desired shape from inputs
emitter_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(emitter_path)
emitter_prim.GetProperty("inputs:active").Set(True)
emitter_prim.GetProperty("inputs:rate").Set(m.PROJECTION_VISUALIZATION_RATE)
emitter_prim.GetProperty("inputs:lifespan").Set(projection_height / m.PROJECTION_VISUALIZATION_SPEED)
emitter_prim.GetProperty("inputs:speed").Set(m.PROJECTION_VISUALIZATION_SPEED)
emitter_prim.GetProperty("inputs:alongAxis").Set(m.PROJECTION_VISUALIZATION_ORIENTATION_BIAS)
emitter_prim.GetProperty("inputs:scale").Set(lazy.pxr.Gf.Vec3f(1.0, 1.0, 1.0))
emitter_prim.GetProperty("inputs:directionRandom").Set(lazy.pxr.Gf.Vec3f(*spread))
emitter_prim.GetProperty("inputs:addSourceVelocity").Set(1.0)
# Make sure we render 4 times to fully propagate changes (validated empirically)
# Omni likes to complain here again, but we have no control over the low-level information, so we suppress warnings
with suppress_omni_log(channels=["omni.particle.system.core.plugin", "omni.hydra.scene_delegate.plugin", "omni.usd"]):
for i in range(4):
og.sim.render()
# Return the particle system prim which "owns" everything
return lazy.omni.isaac.core.utils.prims.get_prim_at_path(system_path), emitter_prim
class ParticleModifier(IntrinsicObjectState, LinkBasedStateMixin, UpdateStateMixin):
"""
Object state representing an object that has the ability to modify visual and / or physical particles within the
active simulation.
Args:
obj (StatefulObject): Object to which this state will be applied
conditions (dict): Dictionary mapping the names of ParticleSystem (str) to None or list of 2-tuples, where
None represents "never", empty list represents "always", or each 2-tuple is interpreted as a single condition in the form of
(ParticleModifyCondition, value) necessary in order for this particle modifier to be
able to modify particles belonging to @ParticleSystem. Expected types of val are as follows:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
For a given ParticleSystem, the list of 2-tuples will be converted into a list of function calls of the
form above -- if all of its conditions evaluate to True and particles are detected within
this particle modifier area, then we potentially modify those particles
method (ParticleModifyMethod): Method to modify particles. Current options supported are ADJACENCY (i.e.:
"touching" particles) or PROJECTION (i.e.: "spraying" particles)
projection_mesh_params (None or dict): If specified and @method is ParticleModifyMethod.PROJECTION,
manually overrides any data inferred directly from this object to infer what projection volume to generate
for this particle modifier. Expected entries are as follows:
"type": (str), one of {"Cylinder", "Cone", "Cube", "Sphere"}
"extents": (3-array), the (x,y,z) extents of the generated volume (specified in local link frame!)
If None, information found from @obj.metadata will be used instead.
NOTE: x-direction should align with the projection mesh's height (i.e.: z) parameter in @extents!
"""
def __init__(self, obj, conditions, method=ParticleModifyMethod.ADJACENCY, projection_mesh_params=None):
# Store internal variables
self.method = method
self.projection_source_sphere = None
self.projection_mesh = None
self._check_in_mesh = None
self._check_overlap = None
self._link_prim_paths = None
self._current_step = None
self._projection_mesh_params = projection_mesh_params
# Parse conditions
self._conditions = self._parse_conditions(conditions=conditions)
# Run super method
super().__init__(obj)
@property
def conditions(self):
"""
dict: Dictionary mapping the names of ParticleSystem (str) to a list of function calls that must evaluate to
True in order for this particle modifier to be able to modify particles belonging to @ParticleSystem.
The list of functions at least contains the limit condition, which is a function that checks whether the
applier has applied or the remover has removed the maximum number of particles allowed. If the systen name is
not in the dictionary, then the modifier cannot modify particles of that system.
"""
return self._conditions
@classmethod
def is_compatible(cls, obj, **kwargs):
# Run super first
compatible, reason = super().is_compatible(obj, **kwargs)
if not compatible:
return compatible, reason
# Check whether this state has toggledon if required or saturated if required for any condition
conditions = kwargs.get("conditions", dict())
cond_types = {cond[0] for _, conds in conditions.items() if conds is not None for cond in conds}
for cond_type, state_type in zip((ParticleModifyCondition.TOGGLEDON,), (ToggledOn,)):
if cond_type in cond_types and state_type not in obj.states:
return False, f"{cls.__name__} requires {state_type.__name__} state!"
return True, None
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Run super first
compatible, reason = super().is_compatible_asset(prim, **kwargs)
if not compatible:
return compatible, reason
# Check whether this state has toggledon if required or saturated if required for any condition
conditions = kwargs.get("conditions", dict())
cond_types = {cond[0] for _, conds in conditions.items() if conds is not None for cond in conds}
for cond_type, state_type in zip((ParticleModifyCondition.TOGGLEDON,), (ToggledOn,)):
if cond_type in cond_types and not state_type.is_compatible_asset(prim=prim, **kwargs):
return False, f"{cls.__name__} requires {state_type.__name__} state!"
return True, None
@classmethod
def postprocess_ability_params(cls, params):
"""
Post-processes ability parameters to ensure the system names (rather than synsets) are used for conditions.
"""
# Import here to avoid circular imports
from omnigibson.utils.bddl_utils import get_system_name_by_synset
for sys in list(params["conditions"].keys()):
# The original key can be either a system name or a system synset. If it's a synset, we need to convert it.
system_name = sys if sys in REGISTERED_SYSTEMS.keys() else get_system_name_by_synset(sys)
params["conditions"][system_name] = params["conditions"].pop(sys)
conds = params["conditions"][system_name]
if conds is None:
continue
for cond in conds:
cond_type, cond_sys = cond
if cond_type == ParticleModifyCondition.SATURATED:
cond[1] = cond_sys if cond_sys in REGISTERED_SYSTEMS.keys() else get_system_name_by_synset(cond_sys)
return params
def _initialize(self):
super()._initialize()
# Run link initialization
self.initialize_link_mixin()
# Sanity check scale if requested
if self.requires_overlap:
# Run sanity check to make sure compatibility with omniverse physx
if self.method == ParticleModifyMethod.PROJECTION and not np.isclose(self.obj.scale.max(), self.obj.scale.min(), atol=1e-04):
raise ValueError(f"{self.__class__.__name__} for obj {self.obj.name} using PROJECTION method cannot be "
f"created with non-uniform scale and requires_overlap! Got scale: {self.obj.scale}")
# Initialize internal variables
self._current_step = 0
# Grab link prim paths and potentially update projection mesh params
self._link_prim_paths = set(self.obj.link_prim_paths)
# Define callback used during overlap method
# We want to ignore any hits that are with this object itself
valid_hit = False
def overlap_callback(hit):
nonlocal valid_hit
valid_hit = hit.rigid_body not in self._link_prim_paths
# Continue traversal only if we don't have a valid hit yet
return not valid_hit
# Possibly create a projection volume if we're using the projection method
if self.method == ParticleModifyMethod.PROJECTION:
# Construct naming prefix to apply to generated prims
name_prefix = f"{self.obj.name}_{self.__class__.__name__}"
shape_defaults = {
"radius": 0.5,
"height": 1.0,
"size": 1.0,
}
mesh_prim_path = f"{self.link.prim_path}/mesh_0"
# Create a primitive shape if it doesn't already exist
pre_existing_mesh = lazy.omni.isaac.core.utils.prims.get_prim_at_path(mesh_prim_path)
if not pre_existing_mesh:
# Projection mesh params must be specified in order to determine scalings
assert self._projection_mesh_params is not None, \
f"Must specify projection_mesh_params for {self.obj.name}'s {self.__class__.__name__} " \
f"since it has no pre-existing projection mesh!"
mesh = getattr(lazy.pxr.UsdGeom, self._projection_mesh_params["type"]).Define(og.sim.stage, mesh_prim_path).GetPrim()
property_names = set(mesh.GetPropertyNames())
for shape_attr, default_val in shape_defaults.items():
if shape_attr in property_names:
mesh.GetAttribute(shape_attr).Set(default_val)
else:
# Potentially populate projection mesh params if the prim exists
mesh_type = pre_existing_mesh.GetTypeName()
if self._projection_mesh_params is None:
self._projection_mesh_params = {
"type": mesh_type,
"extents": np.array(pre_existing_mesh.GetAttribute("xformOp:scale").Get()),
}
# Otherwise, make sure we don't have a mismatch between the pre-existing shape type and the
# desired type since we can't delete the original mesh
else:
assert self._projection_mesh_params["type"] == mesh_type, \
f"Got mismatch in requested projection mesh type ({self._projection_mesh_params['type']}) and " \
f"pre-existing mesh type ({mesh_type})"
# Create the visual geom instance referencing the generated mesh prim, and then hide it
self.projection_mesh = VisualGeomPrim(prim_path=mesh_prim_path, name=f"{name_prefix}_projection_mesh")
self.projection_mesh.initialize()
self.projection_mesh.visible = False
# Make sure the shape-based attributes are not set, and only the scaling is set
property_names = set(self.projection_mesh.prim.GetPropertyNames())
for shape_attr, default_val in shape_defaults.items():
if shape_attr in property_names:
val = self.projection_mesh.get_attribute(shape_attr)
assert val == default_val, \
f"Projection mesh should have shape-based attribute {shape_attr} == {default_val}! Got: {val}"
# Set the scale based on projection mesh params
self.projection_mesh.scale = np.array(self._projection_mesh_params["extents"])
# Make sure the object updates its meshes, and assert that there's only a single visual mesh
self.link.update_meshes()
assert len(self.link.visual_meshes) == 1, \
f"Expected only a single projection mesh for {self.link}, got: {len(self.link.visual_meshes)}"
# Make sure the mesh is translated so that its tip lies at the metalink origin, and rotated so the vector
# from tip to tail faces the positive x axis
z_offset = 0.0 if self._projection_mesh_params["type"] == "Sphere" else self._projection_mesh_params["extents"][2] / 2
self.projection_mesh.set_local_pose(
position=np.array([0, 0, -z_offset]),
orientation=T.euler2quat([0, 0, 0]),
)
# Generate the function for checking whether points are within the projection mesh
self._check_in_mesh, _ = generate_points_in_volume_checker_function(obj=self.obj, volume_link=self.link)
# Store the projection mesh's IDs
projection_mesh_ids = lazy.pxr.PhysicsSchemaTools.encodeSdfPath(self.projection_mesh.prim_path)
# We also generate the function for checking overlaps at runtime
def check_overlap():
nonlocal valid_hit
valid_hit = False
og.sim.psqi.overlap_shape(*projection_mesh_ids, reportFn=overlap_callback)
return valid_hit
elif self.method == ParticleModifyMethod.ADJACENCY:
# Define the function for checking whether points are within the adjacency mesh
def check_in_adjacency_mesh(particle_positions):
# Define the AABB bounds
lower, upper = self.link.visual_aabb
# Add the margin
lower -= m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN
upper += m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN
return ((lower < particle_positions) & (particle_positions < upper)).all(axis=-1)
self._check_in_mesh = check_in_adjacency_mesh
# Define the function for checking overlaps at runtime
def check_overlap():
nonlocal valid_hit
valid_hit = False
aabb = self.link.visual_aabb
og.sim.psqi.overlap_box(
halfExtent=(aabb[1] - aabb[0]) / 2.0 + m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN,
pos=(aabb[1] + aabb[0]) / 2.0,
rot=np.array([0, 0, 0, 1.0]),
reportFn=overlap_callback,
)
return valid_hit
else:
raise ValueError(f"Unsupported ParticleModifyMethod: {self.method}!")
# Store check overlap function
self._check_overlap = check_overlap
# We abuse the Saturated state to store the limit for particle modifier (including both applier and remover)
for system_name in self.conditions.keys():
system = get_system(system_name, force_active=False)
limit = self.visual_particle_modification_limit \
if is_visual_particle_system(system_name=system.name) \
else self.physical_particle_modification_limit
self.obj.states[Saturated].set_limit(system=system, limit=limit)
def _generate_condition(self, condition_type, value):
"""
Generates a valid condition function given @condition_type and its corresponding @value
Args:
condition_type (ParticleModifyCondition): Type of condition to generate
value (any): Corresponding value whose type depends on @condition_type:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
Returns:
function: Condition function whose signature is identical to FUNCTION listed above
"""
# Avoid circular imports
from omnigibson.object_states.saturated import Saturated
if condition_type == ParticleModifyCondition.FUNCTION:
cond = value
elif condition_type == ParticleModifyCondition.SATURATED:
cond = lambda obj: is_system_active(value) and obj.states[Saturated].get_value(get_system(value))
elif condition_type == ParticleModifyCondition.TOGGLEDON:
cond = lambda obj: obj.states[ToggledOn].get_value() == value
elif condition_type == ParticleModifyCondition.GRAVITY:
# Particles spawn in negative z-axis direction, so check positive dot product of link frame with global
cond = lambda obj: (np.dot(T.quat2mat(obj.states[self.__class__].link.get_orientation()) @ np.array([0, 0, 1]), np.array([0, 0, 1])) > 0) == value
else:
raise ValueError(f"Got invalid ParticleModifyCondition: {condition_type}")
return cond
def _parse_conditions(self, conditions):
"""
Parse conditions and store them internally
Args:
conditions (dict): Dictionary mapping the names of ParticleSystem (str) to None or list of 2-tuples, where
None represents "never", empty list represents "always", or each 2-tuple is interpreted as a single condition in the form of
(ParticleModifyCondition, value) necessary in order for this particle modifier to be
able to modify particles belonging to @ParticleSystem. Expected types of val are as follows:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
For a given ParticleSystem, the list of 2-tuples will be converted into a list of function calls of the
form above -- if all of its conditions evaluate to True and particles are detected within
this particle modifier area, then we potentially modify those particles
Returns:
dict: Dictionary mapping the names of ParticleSystem (str) to list of condition functions
"""
parsed_conditions = dict()
# Standardize the conditions (make sure every system has at least one condition, which to make sure
# the particle modifier isn't already limited with the specific number of particles)
for system_name, conds in conditions.items():
# Make sure the system is supported
assert is_visual_particle_system(system_name) or is_physical_particle_system(system_name), \
f"Unsupported system for ParticleModifier: {system_name}"
# Make sure conds isn't empty and is a list
if conds is None:
continue
assert type(conds) == list, f"Expected list of conditions for system {system_name}, got {conds}"
system_conditions = []
for cond_type, cond_val in conds:
cond = self._generate_condition(condition_type=cond_type, value=cond_val)
system_conditions.append(cond)
# Always add limit condition at the end
system_conditions.append(self._generate_limit_condition(system_name))
parsed_conditions[system_name] = system_conditions
return parsed_conditions
@abstractmethod
def _modify_particles(self, system):
"""
Helper function to modify any particles belonging to @system.
NOTE: This should handle both cases for @self.method:
ParticleModifyMethod.ADJACENCY: modify any particles that are overlapping within the relaxed AABB
defining adjacency to this object's modification link.
ParticleModifyMethod.PROJECTION: modify any particles that are overlapping within the projection mesh.
Must be implemented by subclass.
Args:
system (BaseSystem): Particle system whose corresponding particles will be checked for modification
"""
raise NotImplementedError()
def _generate_limit_condition(self, system_name):
"""
Generates a limit function condition for specific system of name @system_name
Args:
system_name (str): Name of the particle system for which to generate a limit checker function
Returns:
function: Limit checker function, with signature condition(obj) --> bool, where @obj is the specific object
that this ParticleModifier state belongs to
"""
system = get_system(system_name, force_active=False)
def condition(obj):
return not self.obj.states[Saturated].get_value(system=system)
return condition
def supports_system(self, system_name):
"""
Checks whether this particle modifier supports adding/removing a particle from the specified
system, e.g. whether there exists any configuration (toggled on, etc.) in which this modifier
can be used to interact with any particles of this system.
Args:
system_name (str): Name of the particle system to check
Returns:
bool: Whether this particle modifier can add or remove a particle from the specified system
"""
return system_name in self.conditions
def check_conditions_for_system(self, system_name):
"""
Checks whether this particle modifier can add or remove a particle from the specified system
in its current configuration, e.g. all of the conditions for addition/removal other than
physical position are met.
Args:
system_name (str): Name of the particle system to check
Returns:
bool: Whether this particle modifier can add or remove a particle from the specified system
"""
if not self.supports_system(system_name):
return False
return all(condition(self.obj) for condition in self.conditions[system_name])
def _update(self):
# If we're using projection method and flatcache, we need to manually update this object's transforms on the USD
# so the corresponding visualization and overlap meshes are updated properly
# This is expensive, so only do it if the object is not a fixed object and we have an active projection
if (
self.method == ParticleModifyMethod.PROJECTION
and gm.ENABLE_FLATCACHE
and not self.obj.fixed_base
and self.projection_is_active
):
FlatcacheAPI.sync_raw_object_transforms_in_usd(prim=self.obj)
# Check if there's any overlap and if we're at the correct step
if self._current_step == 0:
# Iterate over all systems to check
for system_name in self.systems_to_check:
if system_name in self.conditions:
# Check if all conditions are met
if self.check_conditions_for_system(system_name):
system = get_system(system_name)
# Sanity check to see if the modifier has reached its limit for this system
if self.obj.states[Saturated].get_value(system=system):
continue
# Potentially modify particles within the volume
self._modify_particles(system=system)
# Update the current step
self._current_step = (self._current_step + 1) % self.n_steps_per_modification
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.update({AABB, Saturated, ModifiedParticles})
return deps
@classmethod
def get_optional_dependencies(cls):
deps = super().get_optional_dependencies()
deps.update({Covered, ToggledOn, ContactBodies, ContactParticles})
return deps
@classproperty
def requires_overlap(self):
"""
Returns:
bool: Whether overlap checks should be executed as a guard condition against modifying particles
"""
raise NotImplementedError()
@classproperty
def supported_active_systems(cls):
"""
Returns:
dict: Maps system names to corresponding systems used in this state that are active, dynamic across time
"""
return dict(**VisualParticleSystem.get_active_systems(), **PhysicalParticleSystem.get_active_systems())
@property
def systems_to_check(self):
"""
Returns:
tuple of str: System names that should be actively checked for particle modification at the current timestep
"""
# Default is all supported active systems
return tuple(self.supported_active_systems.keys())
@property
def projection_is_active(self):
"""
Returns:
bool: If using ParticleModifyMethod.PROJECTION, should return whether the projection mesh is currently
active or not (e.g.: whether all conditions are met for a projection modification to potentially occur)
"""
# Return True by default
return True
@property
def n_steps_per_modification(self):
"""
Returns:
int: How many steps to take in between potentially modifying particles within the simulation
"""
raise NotImplementedError()
@property
def visual_particle_modification_limit(self):
"""
Returns:
int: Maximum number of visual particles from a specific system that can be modified by this object
"""
raise NotImplementedError()
@property
def physical_particle_modification_limit(self):
"""
Returns:
int: Maximum number of physical particles from a specific system that can be modified by this object
"""
raise NotImplementedError()
@property
def state_size(self):
# Only store the current_step
return 1
def _dump_state(self):
return dict(current_step=int(self._current_step))
def _load_state(self, state):
self._current_step = state["current_step"]
def _serialize(self, state):
return np.array([state["current_step"]], dtype=float)
def _deserialize(self, state):
current_step = int(state[0])
state_dict = dict(current_step=current_step)
return state_dict, 1
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ParticleModifier")
return classes
class ParticleRemover(ParticleModifier):
"""
ParticleModifier where the modification results in potentially removing particles from the simulation.
Args:
obj (StatefulObject): Object to which this state will be applied
conditions (dict): Dictionary mapping the names of ParticleSystem (str) to None or list of 2-tuples, where
None represents "never", empty list represents "always", or each 2-tuple is interpreted as a single condition in the form of
(ParticleModifyCondition, value) necessary in order for this particle modifier to be
able to modify particles belonging to @ParticleSystem. Expected types of val are as follows:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
For a given ParticleSystem, the list of 2-tuples will be converted into a list of function calls of the
form above -- if all of its conditions evaluate to True and particles are detected within
this particle modifier area, then we potentially modify those particles
method (ParticleModifyMethod): Method to modify particles. Current options supported are ADJACENCY (i.e.:
"touching" particles) or PROJECTION (i.e.: "spraying" particles)
projection_mesh_params (None or dict): If specified and @method is ParticleModifyMethod.PROJECTION,
manually overrides any data inferred directly from this object to infer what projection volume to generate
for this particle modifier. Expected entries are as follows:
"type": (str), one of {"Cylinder", "Cone", "Cube", "Sphere"}
"extents": (3-array), the (x,y,z) extents of the generated volume (specified in local link frame!)
If None, information found from @obj.metadata will be used instead.
NOTE: x-direction should align with the projection mesh's height (i.e.: z) parameter in @extents!
default_fluid_conditions (None or list): Condition(s) needed to remove any fluid particles not explicitly
specified in @conditions. If None, then it is assumed that no other physical particles can be removed. If
not None, should be in same format as an entry in @conditions, i.e.: list of (ParticleModifyCondition, val)
2-tuples
default_non_fluid_conditions (None or list): Condition(s) needed to remove any physical (excluding fluid)
particles not explicitly specified in @conditions. If None, then it is assumed that no other physical
particles can be removed. If not None, should be in same format as an entry in @conditions, i.e.: list of
(ParticleModifyCondition, val) 2-tuples
default_visual_conditions (None or list): Condition(s) needed to remove any visual particles not explicitly
specified in @conditions. If None, then it is assumed that no other visual particles can be removed. If
not None, should be in same format as an entry in @conditions, i.e.: list of (ParticleModifyCondition, val)
2-tuples
"""
def __init__(
self,
obj,
conditions,
method=ParticleModifyMethod.ADJACENCY,
projection_mesh_params=None,
default_fluid_conditions=None,
default_non_fluid_conditions=None,
default_visual_conditions=None,
):
# Store values
self._default_fluid_conditions = default_fluid_conditions if default_fluid_conditions is None else \
[self._generate_condition(cond_type, cond_val) for cond_type, cond_val in default_fluid_conditions]
self._default_non_fluid_conditions = default_non_fluid_conditions if default_non_fluid_conditions is None else \
[self._generate_condition(cond_type, cond_val) for cond_type, cond_val in default_non_fluid_conditions]
self._default_visual_conditions = default_visual_conditions if default_visual_conditions is None else \
[self._generate_condition(cond_type, cond_val) for cond_type, cond_val in default_visual_conditions]
# Run super
super().__init__(obj=obj, conditions=conditions, method=method, projection_mesh_params=projection_mesh_params)
def _parse_conditions(self, conditions):
# Run super first
parsed_conditions = super()._parse_conditions(conditions=conditions)
# Create set of default system to condition mappings based on settings
all_conditions = dict()
for system_name in REGISTERED_SYSTEMS.keys():
# If the system is already explicitly specified in conditions, continue
if system_name in conditions:
continue
# Since fluid system is a subclass of physical system, we need to check for fluid first
elif is_fluid_system(system_name):
default_system_conditions = self._default_fluid_conditions
elif is_physical_particle_system(system_name):
default_system_conditions = self._default_non_fluid_conditions
elif is_visual_particle_system(system_name):
default_system_conditions = self._default_visual_conditions
else:
# Don't process any other systems, continue
continue
if default_system_conditions is not None:
# Always make sure to add on condition for checking count of particles (can't remove any particles if
# there are 0 particles of the given system!)
all_conditions[system_name] = (
[self._generate_nonempty_system_condition(system_name),
self._generate_limit_condition(system_name)] +
default_system_conditions
)
# Overwrite conditions based on manually-specified ones
all_conditions.update(parsed_conditions)
return all_conditions
def _modify_particles(self, system):
# If the system has no particles, return
if system.n_particles == 0:
return
# Check the system
if is_visual_particle_system(system_name=system.name):
# Iterate over all particles and remove any that are within the relaxed AABB of the remover volume
particle_positions = system.get_particles_position_orientation()[0]
inbound_idxs = self._check_in_mesh(particle_positions).nonzero()[0]
modification_limit = self.visual_particle_modification_limit
# Physical system
else:
# If the object is a cloth, we have to use check_in_mesh with the relaxed AABB since we can't detect
# collisions via scene query interface. Alternatively, if we're using the projection method,
# we also need to use check_in_mesh to check for overlap with the projection mesh.
inbound_idxs = self._check_in_mesh(system.get_particles_position_orientation()[0]).nonzero()[0] \
if self.obj.prim_type == PrimType.CLOTH or self.method == ParticleModifyMethod.PROJECTION else \
np.array(list(self.obj.states[ContactParticles].get_value(system, self.link)))
modification_limit = self.physical_particle_modification_limit
n_modified_particles = self.obj.states[ModifiedParticles].get_value(system)
n_particles_absorbed = min(len(inbound_idxs), modification_limit - n_modified_particles)
system.remove_particles(inbound_idxs[:n_particles_absorbed])
self.obj.states[ModifiedParticles].set_value(system, n_modified_particles + n_particles_absorbed)
def _generate_nonempty_system_condition(self, system_name):
"""
Internal helper function to programatically generate a condition checker to make sure that at least one
particle exists in a given system
Args:
system_name (str): Name of the system
Returns:
function: Generated condition function with signature fcn(obj) --> bool, returning True if there is at least
one particle in the given system @system_name
"""
system = get_system(system_name, force_active=False)
return lambda obj: system.initialized and system.n_particles > 0
@property
def requires_overlap(self):
# No overlap check needed for particle removers
return False
@classproperty
def metalink_prefix(cls):
return m.REMOVAL_LINK_PREFIX
@classmethod
def requires_metalink(cls, **kwargs):
# No metalink required for adjacency
return kwargs.get("method", ParticleModifyMethod.ADJACENCY) != ParticleModifyMethod.ADJACENCY
@property
def _default_link(self):
# Only supported for adjacency, NOT projection
return self.obj.root_link if self.method == ParticleModifyMethod.ADJACENCY else None
@property
def n_steps_per_modification(self):
return m.N_STEPS_PER_REMOVAL
@property
def visual_particle_modification_limit(self):
return m.VISUAL_PARTICLES_REMOVAL_LIMIT
@property
def physical_particle_modification_limit(self):
return m.PHYSICAL_PARTICLES_REMOVAL_LIMIT
class ParticleApplier(ParticleModifier):
"""
ParticleModifier where the modification results in potentially adding particles into the simulation.
Args:
obj (StatefulObject): Object to which this state will be applied
conditions (dict): Dictionary mapping the names of ParticleSystem (str) to None or list of 2-tuples, where
None represents "never", empty list represents "always", or each 2-tuple is interpreted as a single condition in the form of
(ParticleModifyCondition, value) necessary in order for this particle modifier to be
able to modify particles belonging to @ParticleSystem. Expected types of val are as follows:
SATURATED: string name of the desired system that this modifier must be saturated by, e.g., "water"
TOGGLEDON: boolean T/F; whether this modifier must be toggled on or not
GRAVITY: boolean T/F; whether this modifier must be pointing downwards (T) or upwards (F)
FUNCTION: a function, whose signature is as follows:
def condition(obj) --> bool
Where @obj is the specific object that this ParticleModifier state belongs to.
For a given ParticleSystem, the list of 2-tuples will be converted into a list of function calls of the
form above -- if all of its conditions evaluate to True and particles are detected within
this particle modifier area, then we potentially modify those particles
method (ParticleModifyMethod): Method to modify particles. Current options supported are ADJACENCY (i.e.:
"touching" particles) or PROJECTION (i.e.: "spraying" particles)
projection_mesh_params (None or dict): If specified and @method is ParticleModifyMethod.PROJECTION,
manually overrides any data inferred directly from this object to infer what projection volume to generate
for this particle modifier. Expected entries are as follows:
"type": (str), one of {"Cylinder", "Cone", "Cube", "Sphere"}
"extents": (3-array), the (x,y,z) extents of the generated volume (specified in local link frame!)
If None, information found from @obj.metadata will be used instead.
sample_with_raycast (bool): If True, will only sample particles at raycast hits. Otherwise, will bypass sampling
and immediately sample particles at the sampled particle locations. Note that this will only work
for PhysicalParticleSystem-based ParticleAppliers that use the Projection method!
initial_speed (float): For physical particles, the initial speed for generated particles. Note that the
direction of the velocity is inferred from the particle sampling process.
"""
def __init__(
self,
obj,
conditions,
method=ParticleModifyMethod.ADJACENCY,
projection_mesh_params=None,
sample_with_raycast=True,
initial_speed=0.0,
):
# Store internal value
self._sample_particle_locations = None
self._sample_with_raycast = sample_with_raycast
self._initial_speed = initial_speed
# Pre-cached values for where particles should be spawned, and in what direction, when this state is
# initialized so we can quickly spawn them at runtime
self._in_mesh_local_particle_positions = None
self._in_mesh_local_particle_directions = None
self.projection_system = None
self.projection_system_prim = None
self.projection_emitter = None
# Run super
super().__init__(obj=obj, method=method, conditions=conditions, projection_mesh_params=projection_mesh_params)
def _initialize(self):
# Run super
super()._initialize()
system_name = list(self.conditions.keys())[0]
# get_system will initialize the system if it's not initialized already.
system = get_system(system_name)
if self.visualize:
assert self._projection_mesh_params["type"] in {"Cylinder", "Cone"}, \
f"{self.__class__.__name__} visualization only supports Cylinder and Cone types!"
radius, height = np.mean(self._projection_mesh_params["extents"][:2]) / 2.0, self._projection_mesh_params["extents"][2]
# Generate the projection visualization
particle_radius = m.VISUAL_PARTICLE_PROJECTION_PARTICLE_RADIUS if \
is_visual_particle_system(system_name=system.name) else system.particle_radius
name_prefix = f"{self.obj.name}_{self.__class__.__name__}"
# Create the projection visualization if it doesn't already exist, otherwise we reference it directly
projection_name = f"{name_prefix}_projection_visualization"
projection_path = f"/OmniGraph/{projection_name}"
projection_visualization_path = f"{self.link.prim_path}/projection_visualization"
if lazy.omni.isaac.core.utils.prims.is_prim_path_valid(projection_path):
self.projection_system = lazy.omni.isaac.core.utils.prims.get_prim_at_path(projection_path)
self.projection_emitter = lazy.omni.isaac.core.utils.prims.get_prim_at_path(f"{projection_path}/emitter")
else:
self.projection_system, self.projection_emitter = create_projection_visualization(
prim_path=projection_visualization_path,
shape=self._projection_mesh_params["type"],
projection_name=projection_name,
projection_radius=radius,
projection_height=height,
particle_radius=particle_radius,
parent_scale=self.link.scale,
material=system.material,
)
self.projection_system_prim = BasePrim(prim_path=self.projection_system.GetPrimPath().pathString,
name=projection_name)
# Create the visual geom instance referencing the generated source mesh prim, and then hide it
self.projection_source_sphere = VisualGeomPrim(prim_path=projection_visualization_path, name=f"{name_prefix}_projection_source_sphere")
self.projection_source_sphere.initialize()
self.projection_source_sphere.visible = False
# Rotate by 90 degrees in y-axis so that the projection visualization aligns with the projection mesh
self.projection_source_sphere.set_local_pose(orientation=T.euler2quat([0, np.pi / 2, 0]))
# Make sure the meta mesh is aligned with the meta link if visualizing
# This corresponds to checking (a) position of tip of projection mesh should align with origin of
# metalink, and (b) zero relative orientation between the metalink and the projection mesh
local_pos, local_quat = self.projection_mesh.get_local_pose()
assert np.all(np.isclose(local_pos + np.array([0, 0, height / 2.0]), 0.0)), \
"Projection mesh tip should align with metalink position!"
assert np.all(np.isclose(T.quat2euler(local_quat), 0.0)), \
"Projection mesh orientation should align with metalink orientation!"
# Store which method to use for sampling particle locations
if self._sample_with_raycast:
if self.method == ParticleModifyMethod.PROJECTION:
self._sample_particle_locations = self._sample_particle_locations_from_projection_volume
elif self.method == ParticleModifyMethod.ADJACENCY:
self._sample_particle_locations = self._sample_particle_locations_from_adjacency_area
else:
raise ValueError(f"Unsupported ParticleModifyMethod: {self.method}!")
else:
# Make sure we're only using a physical particle system and the projection method
assert issubclass(system, PhysicalParticleSystem), \
"If not sampling with raycast, ParticleApplier only supports PhysicalParticleSystems!"
assert self.method == ParticleModifyMethod.PROJECTION, \
"If not sampling with raycast, ParticleApplier only supports ParticleModifyMethod.PROJECTION method!"
# Compute particle spawning information once
self._compute_particle_spawn_information(system=system)
def _parse_conditions(self, conditions):
# Run super first
parsed_conditions = super()._parse_conditions(conditions=conditions)
# sanity check to make sure only one system is being applied, since unlike a ParticleRemover, which
# can potentially remove multiple types of particles, a ParticleApplier should only apply one type of particle
assert len(parsed_conditions) == 1, f"A ParticleApplier can only have a single ParticleSystem associated " \
f"with it! Got: {[system_name for system_name in self.conditions.keys()]}"
# Append an additional condition for checking overlaps if required
if self.requires_overlap:
system_name = next(iter(parsed_conditions))
parsed_conditions[system_name].append(lambda obj: self._check_overlap())
return parsed_conditions
def _compute_particle_spawn_information(self, system):
"""
Helper function to compute where particles should be spawned. This is to save computation time at runtime
if @self._sample_with_raycast is False, meaning that we were deterministically sample particles.
Args:
system (BaseSystem): Particle system whose particles will be spawned from this ParticleApplier
"""
# We now pre-compute local particle positions that are within the projection mesh used to infer spawn pos
# We sample over the entire object AABB, assuming most will be filtered out
sampling_distance = 2 * system.particle_radius
extent = np.array(self._projection_mesh_params["extents"])
h = extent[2]
low, high = self.obj.aabb
n_particles_per_axis = ((high - low) / sampling_distance).astype(int)
assert np.all(n_particles_per_axis), f"link {self.link.name} is too small to sample any particle of radius {system.particle_radius}."
# 1e-10 is added because the extent might be an exact multiple of particle radius
arrs = [np.arange(l + system.particle_radius, h - system.particle_radius + 1e-10, system.particle_radius * 2)
for l, h, n in zip(low, high, n_particles_per_axis)]
# Generate 3D-rectangular grid of points, and only keep the ones inside the mesh
points = np.stack([arr.flatten() for arr in np.meshgrid(*arrs)]).T
pos, quat = self.link.get_position_orientation()
points = points[np.where(self._check_in_mesh(points))[0]]
# Convert the points into local frame
points_in_local_frame = get_particle_positions_in_frame(
pos=pos,
quat=quat,
scale=self.obj.scale,
particle_positions=points,
)
n_max_particles = self._get_max_particles_limit_per_step(system=system)
# Potentially sub-sample points based on max particle limit per step
self._in_mesh_local_particle_positions = points_in_local_frame if n_max_particles > len(points) else \
points_in_local_frame[np.random.choice(len(points_in_local_frame), n_max_particles, replace=False)]
# Also programmatically compute the directions of each particle position -- this is the normalized
# vector pointing from source to the particle
projection_type = self._projection_mesh_params["type"]
if projection_type == "Cone":
# Particles point from source ([0, 0, 0]) to point location
directions = np.copy(self._in_mesh_local_particle_positions)
elif projection_type == "Cylinder":
# All particle points in the same parallel direction towards the -z direction
directions = np.zeros_like(self._in_mesh_local_particle_positions)
directions[:, 2] = -h
else:
raise ValueError(
"If not sampling with raycast, ParticleApplier only supports `Cone` or `Cylinder` projection types!")
self._in_mesh_local_particle_directions = directions / np.linalg.norm(directions, axis=-1).reshape(-1, 1)
def _update(self):
# If we're about to check for modification, update whether it the visualization should be active or not
if self.visualize and self._current_step == 0:
# Only one system in our conditions, so next(iter()) suffices
# is_active = bool(np.all([condition(self.obj) for condition in next(iter(self.conditions.values()))]))
is_active = all(condition(self.obj) for condition in next(iter(self.conditions.values())))
self.projection_emitter.GetProperty("inputs:active").Set(is_active)
# Run super
super()._update()
def remove(self):
# We need to remove the projection visualization if it exists
if self.projection_system_prim is not None:
og.sim.remove_prim(self.projection_system_prim)
def _modify_particles(self, system):
if self._sample_with_raycast:
# Sample potential locations to apply particles, and then apply them
start_points, end_points = self._sample_particle_locations(system=system)
n_samples = len(start_points)
is_visual = is_visual_particle_system(system_name=system.name)
if is_visual:
group = system.get_group_name(obj=self.obj)
# Create an attachment group if necessary
if group not in system.groups:
system.create_attachment_group(obj=self.obj)
avg_scale = np.cbrt(np.product(self.obj.scale))
scales = system.sample_scales_by_group(group=group, n=len(start_points))
cuboid_dimensions = scales * system.particle_object.aabb_extent.reshape(1, 3) * avg_scale
else:
scales = None
cuboid_dimensions = np.zeros(3)
# Sample the rays to see where particle can be generated
results = sample_cuboid_on_object(
obj=None,
start_points=start_points.reshape(n_samples, 1, 3),
end_points=end_points.reshape(n_samples, 1, 3),
cuboid_dimensions=cuboid_dimensions,
ignore_objs=[self.obj],
hit_proportion=0.0, # We want all hits
cuboid_bottom_padding=macros.utils.sampling_utils.DEFAULT_CUBOID_BOTTOM_PADDING if
is_visual else system.particle_radius,
undo_cuboid_bottom_padding=is_visual, # micro particles have zero cuboid dimensions so we need to maintain padding
verify_cuboid_empty=False,
)
hits = [result for result in results if result[0] is not None]
scales = [scale for scale, result in zip(scales, results) if result[0] is not None] if scales is not None else scales
self._apply_particles_at_raycast_hits(system=system, hits=hits, scales=scales)
else:
self._apply_particles_in_projection_volume(system=system)
def _apply_particles_at_raycast_hits(self, system, hits, scales=None):
"""
Helper function to apply particles from system @system given raycast hits @hits,
which are the filtered results from omnigibson.utils.sampling_utils.raytest_batch that include only
the results with a valid hit
Args:
system (BaseSystem): System to apply particles from
hits (list of dict): Valid hit results from a batched raycast representing locations for sampling particles
scales (list of numpy arrays or None): None or scales of the particles that should be sampled, same length as hits
"""
assert system.name in self.conditions, f"System {system.name} is not defined in the conditions."
# Check the system
n_modified_particles = self.obj.states[ModifiedParticles].get_value(system)
if is_visual_particle_system(system_name=system.name):
assert scales is not None, "applying visual particles at raycast hits requires scales."
assert len(hits) == len(scales), "length of hits and scales are different when spawning visual particles."
# Sample potential application points
z_up = np.zeros(3)
z_up[-1] = 1.0
n_particles = min(len(hits), m.VISUAL_PARTICLES_APPLICATION_LIMIT - n_modified_particles)
# Generate particle info -- maps group name to particle info for that group,
# i.e.: positions, orientations, and link_prim_paths
particles_info = defaultdict(lambda: defaultdict(lambda: []))
modifier_avg_scale = np.cbrt(np.product(self.obj.scale))
for hit, scale in zip(hits[:n_particles], scales[:n_particles]):
# Infer which object was hit
hit_obj = og.sim.scene.object_registry("prim_path", "/".join(hit[3].split("/")[:-1]), None)
if hit_obj is not None:
# Create an attachment group if necessary
group = system.get_group_name(obj=hit_obj)
if group not in system.groups:
system.create_attachment_group(obj=hit_obj)
# Add to info
particles_info[group]["positions"].append(hit[0])
particles_info[group]["orientations"].append(hit[2])
# Since particles' scales are sampled with respect to the modifier object, but are being placed
# (in the USD hierarchy) underneath the in_contact object, we need to compensate for the relative
# scale differences between the two objects, so that "moving" the particle to the new object won't
# cause it to unexpectedly shrink / grow based on that parent's (potentially) different scale
particles_info[group]["scales"].append(scale * modifier_avg_scale / np.cbrt(np.product(hit_obj.scale)))
particles_info[group]["link_prim_paths"].append(hit[3])
# Generate all the particles for each group
for group, particle_info in particles_info.items():
# Generate particles for this group
system.generate_group_particles(
group=group,
positions=np.array(particle_info["positions"]),
orientations=np.array(particle_info["orientations"]),
scales=np.array(particles_info[group]["scales"]),
link_prim_paths=particle_info["link_prim_paths"],
)
# Update our particle count
self.obj.states[ModifiedParticles].set_value(system, n_modified_particles + len(particle_info["link_prim_paths"]))
# Physical system
else:
# Compile the particle poses to generate and sample the particles
n_particles = min(len(hits), m.PHYSICAL_PARTICLES_APPLICATION_LIMIT - n_modified_particles)
# Generate particles
if n_particles > 0:
velocities = None if self._initial_speed == 0 else -self._initial_speed * np.array([hit[1] for hit in hits[:n_particles]])
system.generate_particles(
positions=np.array([hit[0] for hit in hits[:n_particles]]),
velocities=velocities,
)
# Update our particle count
self.obj.states[ModifiedParticles].set_value(system, n_modified_particles + n_particles)
def _apply_particles_in_projection_volume(self, system):
"""
Helper function to apply particles form system @system within the projection volume owned by this
ParticleApplier.
NOTE: This function only supports PhysicalParticleSystems and ParticleModifyMethod.PROJECTION method, which
should have been asserted during this ParticleApplier's initialize() call
Args:
system (BaseSystem): System to apply particles from
"""
assert self.method == ParticleModifyMethod.PROJECTION, \
"Can only apply particles within projection volume if ParticleModifyMethod.PROJECTION method is used!"
assert is_physical_particle_system(system_name=system.name), \
"Can only apply particles within projection volume if system is PhysicalParticleSystem!"
# Transform pre-cached particle positions into the world frame
pos, quat = self.link.get_position_orientation()
points = get_particle_positions_from_frame(
pos=pos,
quat=quat,
scale=self.obj.scale,
particle_positions=self._in_mesh_local_particle_positions,
)
directions = self._in_mesh_local_particle_directions @ T.quat2mat(quat).T
# Compile the particle poses to generate and sample the particles
n_modified_particles = self.obj.states[ModifiedParticles].get_value(system)
n_particles = min(len(points), m.PHYSICAL_PARTICLES_APPLICATION_LIMIT - n_modified_particles)
# Generate particles
if n_particles > 0:
velocities = None if self._initial_speed == 0 else self._initial_speed * directions[:n_particles]
system.generate_particles(
positions=points[:n_particles],
velocities=velocities,
)
# Update our particle count
self.obj.states[ModifiedParticles].set_value(system, n_modified_particles + n_particles)
def _sample_particle_locations_from_projection_volume(self, system):
"""
Helper function for generating potential particle locations from projection volume
Args:
system (BaseSystem): System to sample potential particle positions for
Returns:
2-tuple:
- (n, 3) array: Ray start points to sample
- (n, 3) array: Ray end points to sample
"""
# Randomly sample end points from the base of the cone / cylinder
n_samples = self._get_max_particles_limit_per_step(system=system)
r, h = self._projection_mesh_params["extents"][0] / 2, self._projection_mesh_params["extents"][2]
sampled_r_theta = np.random.rand(n_samples, 2)
sampled_r_theta = sampled_r_theta * np.array([r, np.pi * 2]).reshape(1, 2)
# Get start, end points in local link frame, start points to end points along the -z direction
end_points = np.stack([
sampled_r_theta[:, 0] * np.cos(sampled_r_theta[:, 1]),
sampled_r_theta[:, 0] * np.sin(sampled_r_theta[:, 1]),
-h * np.ones(n_samples),
], axis=1)
projection_type = self._projection_mesh_params["type"]
if projection_type == "Cone":
# All start points are the cone tip, which is the local link origin
start_points = np.zeros((n_samples, 3))
elif projection_type == "Cylinder":
# All start points are the parallel point for their corresponding end point
# i.e.: (x, y, 0)
start_points = end_points + np.array([0, 0, h]).reshape(1, 3)
else:
# Other types not supported
raise ValueError(f"Unsupported projection mesh type: {projection_type}!")
# Convert sampled normalized radius and angle into 3D points
# We convert r, theta --> 3D point in local link frame --> 3D point in global world frame
# We also combine start and end points for efficiency when doing the transform, then split them up again
points = np.concatenate([start_points, end_points], axis=0)
pos, quat = self.link.get_position_orientation()
points = get_particle_positions_from_frame(
pos=pos,
quat=quat,
scale=self.obj.scale,
particle_positions=points,
)
return points[:n_samples, :], points[n_samples:, :]
def _sample_particle_locations_from_adjacency_area(self, system):
"""
Helper function for generating potential particle locations from adjacency area
Args:
system (BaseSystem): System to sample potential particle positions for
Returns:
2-tuple:
- (n, 3) array: Ray start points to sample
- (n, 3) array: Ray end points to sample
"""
# Randomly sample end points from within the object's AABB
n_samples = self._get_max_particles_limit_per_step(system=system)
lower, upper = self.link.visual_aabb
lower = lower.reshape(1, 3) - m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN
upper = upper.reshape(1, 3) + m.PARTICLE_MODIFIER_ADJACENCY_AREA_MARGIN
lower_upper = np.concatenate([lower, upper], axis=0)
# Sample in all directions, shooting from the center of the link / object frame
pos = self.link.get_position()
start_points = np.ones((n_samples, 3)) * pos.reshape(1, 3)
end_points = np.random.uniform(low=lower, high=upper, size=(n_samples, 3))
sides, axes = np.random.randint(2, size=(n_samples,)), np.random.randint(3, size=(n_samples,))
end_points[np.arange(n_samples), axes] = lower_upper[sides, axes]
return start_points, end_points
def _get_max_particles_limit_per_step(self, system):
"""
Helper function for grabbing the maximum particle limit per step
Args:
system (BaseSystem): System for which to get max particle limit per step
Returns:
int: Maximum particles to apply per step for the given system @system
"""
assert system.name in self.conditions, f"System {system.name} is not defined in the conditions."
return m.MAX_VISUAL_PARTICLES_APPLIED_PER_STEP if is_visual_particle_system(system_name=system.name) else \
m.MAX_PHYSICAL_PARTICLES_APPLIED_PER_STEP
@property
def requires_overlap(self):
# Overlap required only if sampling with raycast
return self._sample_with_raycast
@property
def visualize(self):
"""
Returns:
bool: Whether this Applier should be visualized or not
"""
# Visualize if projection method is used
return self.method == ParticleModifyMethod.PROJECTION
@property
def systems_to_check(self):
# Only should check the systems in the owned conditions
return tuple(self.conditions.keys())
@property
def projection_is_active(self):
# Only active if the projection mesh is enabled
return self.projection_emitter.GetProperty("inputs:active").Get()
@classproperty
def metalink_prefix(cls):
return m.APPLICATION_LINK_PREFIX
@classmethod
def requires_metalink(cls, **kwargs):
# No metalink required for adjacency
return kwargs.get("method", ParticleModifyMethod.ADJACENCY) != ParticleModifyMethod.ADJACENCY
@classmethod
def is_compatible(cls, obj, **kwargs):
# Run super first
compatible, reason = super().is_compatible(obj, **kwargs)
if not compatible:
return compatible, reason
# Check whether GPU dynamics are enabled (necessary for this object state)
if not gm.USE_GPU_DYNAMICS:
return False, f"gm.USE_GPU_DYNAMICS must be True in order to use object state {cls.__name__}."
return True, None
@property
def _default_link(self):
# Only supported for adjacency, NOT projection
return self.obj.root_link if self.method == ParticleModifyMethod.ADJACENCY else None
@property
def n_steps_per_modification(self):
return m.N_STEPS_PER_APPLICATION
@property
def visual_particle_modification_limit(self):
return m.VISUAL_PARTICLES_APPLICATION_LIMIT
@property
def physical_particle_modification_limit(self):
return m.PHYSICAL_PARTICLES_APPLICATION_LIMIT
| 73,493 | Python | 51.123404 | 158 | 0.655899 |
StanfordVL/OmniGibson/omnigibson/object_states/open_state.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import BooleanStateMixin, AbsoluteObjectState
from omnigibson.utils.constants import JointType
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Joint position threshold before a joint is considered open.
# Should be a number in the range [0, 1] which will be transformed
# to a position in the joint's min-max range.
m.JOINT_THRESHOLD_BY_TYPE = {
JointType.JOINT_REVOLUTE: 0.05,
JointType.JOINT_PRISMATIC: 0.05,
}
m.OPEN_SAMPLING_ATTEMPTS = 5
m.METADATA_FIELD = "openable_joint_ids"
m.BOTH_SIDES_METADATA_FIELD = "openable_both_sides"
def _compute_joint_threshold(joint, joint_direction):
"""
Computes the joint threshold for opening and closing
Args:
joint (JointPrim): Joint to calculate threshold for
joint_direction (int): If 1, assumes opening direction is positive angle change. Otherwise,
assumes opening direction is negative angle change.
Returns:
3-tuple:
- float: Joint value at which closed <--> open
- float: Extreme joint value for being opened
- float: Extreme joint value for being closed
"""
global m
# Convert fractional threshold to actual joint position.
f = m.JOINT_THRESHOLD_BY_TYPE[joint.joint_type]
closed_end = joint.lower_limit if joint_direction == 1 else joint.upper_limit
open_end = joint.upper_limit if joint_direction == 1 else joint.lower_limit
threshold = (1 - f) * closed_end + f * open_end
return threshold, open_end, closed_end
def _is_in_range(position, threshold, range_end):
"""
Calculates whether a joint's position @position is in its opening / closing range
Args:
position (float): Joint value
threshold (float): Joint value at which closed <--> open
range_end (float): Extreme joint value for being opened / closed
Returns:
bool: Whether the joint position is past @threshold in the direction of @range_end
"""
# Note that we are unable to do an actual range check here because the joint positions can actually go
# slightly further than the denoted joint limits.
return position > threshold if range_end > threshold else position < threshold
def _get_relevant_joints(obj):
"""
Grabs the relevant joints for object @obj
Args:
obj (StatefulObject): Object to grab relevant joints for
Returns:
3-tuple:
- bool: If True, check open/closed state for objects whose joints can switch positions
- list of JointPrim: Relevant joints for determining whether @obj is open or closed
- list of int: Joint directions for each corresponding relevant joint
"""
global m
default_both_sides = False
default_relevant_joints = list(obj.joints.values())
# 1 means the open direction corresponds to positive joint angle change and -1 means the opposite
default_joint_directions = [1] * len(default_relevant_joints)
if not hasattr(obj, "metadata") or obj.metadata is None:
log.debug("No openable joint metadata found for object %s" % obj.name)
return default_both_sides, default_relevant_joints, default_joint_directions
# Get joint IDs and names from metadata annotation. If not, return default values.
if m.METADATA_FIELD not in obj.metadata or len(obj.metadata[m.METADATA_FIELD]) == 0:
log.debug(f"No openable joint metadata found for object {obj.name}")
return default_both_sides, default_relevant_joints, default_joint_directions
both_sides = obj.metadata[m.BOTH_SIDES_METADATA_FIELD] if m.BOTH_SIDES_METADATA_FIELD in obj.metadata else False
joint_metadata = obj.metadata[m.METADATA_FIELD].items()
# The joint metadata is in the format of [(joint_id, joint_name), ...] for legacy annotations and
# [(joint_id, joint_name, joint_direction), ...] for direction-annotated objects.
joint_names = [m[1] for m in joint_metadata]
joint_directions = [m[2] if len(m) > 2 else 1 for m in joint_metadata]
relevant_joints = []
for key in joint_names:
assert key in obj.joints, f"Unexpected joint name from Open metadata for object {obj.name}: {key}"
relevant_joints.append(obj.joints[key])
assert all(joint.joint_type in m.JOINT_THRESHOLD_BY_TYPE.keys() for joint in relevant_joints)
return both_sides, relevant_joints, joint_directions
class Open(AbsoluteObjectState, BooleanStateMixin):
def __init__(self, obj):
self.relevant_joints_info = None
# Run super method
super().__init__(obj=obj)
def _initialize(self):
# Run super first
super()._initialize()
# Check the metadata info to get relevant joints information
self.relevant_joints_info = _get_relevant_joints(self.obj)
assert self.relevant_joints_info[1], f"No relevant joints for Open state found for object {self.obj.name}"
@classmethod
def is_compatible(cls, obj, **kwargs):
# Run super first
compatible, reason = super().is_compatible(obj, **kwargs)
if not compatible:
return compatible, reason
# Check whether this object has any openable joints
return (True, None) if obj.n_joints > 0 else \
(False, f"No relevant joints for Open state found for object {obj.name}")
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Run super first
compatible, reason = super().is_compatible_asset(prim, **kwargs)
if not compatible:
return compatible, reason
def _find_articulated_joints(prim):
for child in prim.GetChildren():
child_type = child.GetTypeName().lower()
if "joint" in child_type and "fixed" not in child_type:
return True
for gchild in child.GetChildren():
gchild_type = gchild.GetTypeName().lower()
if "joint" in gchild_type and "fixed" not in gchild_type:
return True
return False
# Check whether this object has any openable joints
return (True, None) if _find_articulated_joints(prim=prim) else \
(False, f"No relevant joints for Open state found for asset prim {prim.GetName()}")
def _get_value(self):
both_sides, relevant_joints, joint_directions = self.relevant_joints_info
if not relevant_joints:
return False
# The "sides" variable is used to check open/closed state for objects whose joints can switch
# positions. These objects are annotated with the both_sides annotation and the idea is that switching
# the directions of *all* of the joints results in a similarly valid checkable state. As a result, to check
# each "side", we multiply *all* of the joint directions with the coefficient belonging to that side, which
# may be 1 or -1.
sides = [1, -1] if both_sides else [1]
sides_openness = []
for side in sides:
# Compute a boolean openness state for each joint by comparing positions to thresholds.
joint_thresholds = (
_compute_joint_threshold(joint, joint_direction * side)
for joint, joint_direction in zip(relevant_joints, joint_directions)
)
joint_positions = [joint.get_state()[0] for joint in relevant_joints]
joint_openness = (
_is_in_range(position, threshold, open_end)
for position, (threshold, open_end, closed_end) in zip(joint_positions, joint_thresholds)
)
# Looking from this side, the object is open if any of its joints is open.
sides_openness.append(any(joint_openness))
# The object is open only if it's open from all of its sides.
return all(sides_openness)
def _set_value(self, new_value, fully=False):
"""
Set the openness state, either to a random joint position satisfying the new value, or fully open/closed.
Args:
new_value (bool): The new value for the openness state of the object.
fully (bool): Whether the object should be fully opened/closed (e.g. all relevant joints to 0/1).
Returns:
bool: A boolean indicating the success of the setter. Failure may happen due to unannotated objects.
"""
both_sides, relevant_joints, joint_directions = self.relevant_joints_info
if not relevant_joints:
return False
# The "sides" variable is used to check open/closed state for objects whose joints can switch
# positions. These objects are annotated with the both_sides annotation and the idea is that switching
# the directions of *all* of the joints results in a similarly valid checkable state. We want our object to be
# open from *both* of the two sides, and I was too lazy to implement the logic for this without rejection
# sampling, so that's what we do.
# TODO: Implement a sampling method that's guaranteed to be correct, ditch the rejection method.
sides = [1, -1] if both_sides else [1]
for _ in range(m.OPEN_SAMPLING_ATTEMPTS):
side = np.random.choice(sides)
# All joints are relevant if we are closing, but if we are opening let's sample a subset.
if new_value and not fully:
num_to_open = np.random.randint(1, len(relevant_joints) + 1)
random_indices = np.random.choice(range(len(relevant_joints)), size=num_to_open, replace=False)
relevant_joints = [relevant_joints[i] for i in random_indices]
joint_directions = [joint_directions[i] for i in random_indices]
# Go through the relevant joints & set random positions.
for joint, joint_direction in zip(relevant_joints, joint_directions):
threshold, open_end, closed_end = _compute_joint_threshold(joint, joint_direction * side)
# Get the range
if new_value:
joint_range = (threshold, open_end)
else:
joint_range = (threshold, closed_end)
if fully:
joint_pos = joint_range[1]
else:
# Convert the range to the format numpy accepts.
low = min(joint_range)
high = max(joint_range)
# Sample a position.
joint_pos = np.random.uniform(low, high)
# Save sampled position.
joint.set_pos(joint_pos)
# If we succeeded, return now.
if self._get_value() == new_value:
return True
# We exhausted our attempts and could not find a working sample.
return False
# We don't need to load / save anything since the joints are saved elsewhere
| 11,225 | Python | 42.343629 | 118 | 0.646325 |
StanfordVL/OmniGibson/omnigibson/object_states/update_state_mixin.py | from omnigibson.object_states.object_state_base import BaseObjectState
from omnigibson.utils.python_utils import classproperty
class UpdateStateMixin(BaseObjectState):
"""
A state-mixin that allows for per-sim-step updates via the update() call
"""
def update(self):
"""
Updates the object state. This function will be called for every simulator step
"""
assert self._initialized, "Cannot update uninitialized state."
return self._update()
def _update(self):
"""
This function will be called once for every simulator step. Must be implemented by subclass.
"""
# Explicitly raise not implemented error to avoid silent bugs -- update should never be called otherwise
raise NotImplementedError
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("UpdateStateMixin")
return classes
class GlobalUpdateStateMixin(BaseObjectState):
"""
A state-mixin that allows for per-sim-step global updates via the global_update() call
"""
@classmethod
def global_initialize(cls):
"""
Executes a global initialization sequence for this state. Default is no-op
"""
pass
@classmethod
def global_update(cls):
"""
Executes a global update for this object state. Default is no-op
"""
pass
@classmethod
def global_clear(cls):
"""
Executes a global clear sequence for this object state. Default is no-op
"""
pass
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("GlobalUpdateStateMixin")
return classes
| 1,926 | Python | 30.080645 | 112 | 0.65109 |
StanfordVL/OmniGibson/omnigibson/object_states/kinematics_mixin.py | from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.object_states.object_state_base import BaseObjectState
from omnigibson.object_states.pose import Pose
from omnigibson.utils.python_utils import classproperty
class KinematicsMixin(BaseObjectState):
"""
This class is a subclass of BaseObjectState that adds dependencies
on the default kinematics states.
"""
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.update({Pose, AABB, ContactBodies})
return deps
def cache_info(self, get_value_args):
# Import here to avoid circular imports
from omnigibson.objects.stateful_object import StatefulObject
# Run super first
info = super().cache_info(get_value_args=get_value_args)
# Store this object as well as any other objects from @get_value_args
info[self.obj] = self.obj.states[Pose].get_value()
for arg in get_value_args:
if isinstance(arg, StatefulObject):
info[arg] = arg.states[Pose].get_value()
return info
def _cache_is_valid(self, get_value_args):
# Import here to avoid circular imports
from omnigibson.objects.stateful_object import StatefulObject
# Cache is valid if and only if all of our cached objects have not changed
t = self._cache[get_value_args]["t"]
for obj, pose in self._cache[get_value_args]["info"].items():
if isinstance(obj, StatefulObject):
if obj.states[Pose].has_changed(get_value_args=(), value=pose, info={}, t=t):
return False
return True
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("KinematicsMixin")
return classes
| 1,966 | Python | 36.113207 | 93 | 0.667345 |
StanfordVL/OmniGibson/omnigibson/object_states/contact_subscribed_state_mixin.py | from abc import abstractmethod
from omnigibson.object_states.object_state_base import BaseObjectState
from omnigibson.utils.python_utils import classproperty
class ContactSubscribedStateMixin(BaseObjectState):
"""
Handles contact events (including CONTACT_FOUND, CONTACT_PERSIST, and CONTACT_LOST).
The subclass should implement its own on_contact method
"""
@abstractmethod
def on_contact(self, other, contact_headers, contact_data):
raise NotImplementedError("Subclasses of ContactSubscribedStateMixin should implement the on_contact method.")
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ContactSubscribedStateMixin")
return classes
| 832 | Python | 38.666665 | 118 | 0.751202 |
StanfordVL/OmniGibson/omnigibson/object_states/particle.py | import numpy as np
from omnigibson.object_states.object_state_base import BaseObjectRequirement
class ParticleRequirement(BaseObjectRequirement):
"""
Class for sanity checking objects that requires particle systems
"""
@classmethod
def is_compatible(cls, obj, **kwargs):
from omnigibson.macros import gm
if not gm.USE_GPU_DYNAMICS:
return False, f"Particle systems are not enabled when GPU dynamics is off."
return True, None
| 486 | Python | 27.647057 | 87 | 0.709877 |
StanfordVL/OmniGibson/omnigibson/object_states/touching.py | from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
from omnigibson.utils.constants import PrimType
from omnigibson.utils.usd_utils import RigidContactAPI
class Touching(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
@staticmethod
def _check_contact(obj_a, obj_b):
return len(set(obj_a.links.values()) & obj_b.states[ContactBodies].get_value()) > 0
def _get_value(self, other):
if self.obj.prim_type == PrimType.CLOTH and other.prim_type == PrimType.CLOTH:
raise ValueError("Cannot detect contact between two cloth objects.")
# If one of the objects is the cloth object, the contact will be asymmetrical.
# The rigid object will appear in the ContactBodies of the cloth object, but not the other way around.
elif self.obj.prim_type == PrimType.CLOTH:
return self._check_contact(other, self.obj)
elif other.prim_type == PrimType.CLOTH:
return self._check_contact(self.obj, other)
# elif not self.obj.kinematic_only and not other.kinematic_only:
# # Use optimized check for rigid bodies
# # TODO: Use once NVIDIA fixes their absolutely broken API
# return RigidContactAPI.in_contact(
# prim_paths_a=[link.prim_path for link in self.obj.links.values()],
# prim_paths_b=[link.prim_path for link in other.links.values()],
# )
else:
return self._check_contact(other, self.obj) and self._check_contact(self.obj, other)
| 1,709 | Python | 52.437498 | 110 | 0.691047 |
StanfordVL/OmniGibson/omnigibson/object_states/cloth_mixin.py | from omnigibson.macros import gm
from omnigibson.object_states.object_state_base import BaseObjectState
from omnigibson.utils.constants import PrimType
from omnigibson.utils.python_utils import classproperty
class ClothStateMixin(BaseObjectState):
"""
This class is a subclass of BaseObjectState that adds dependencies assuming the owned object is PrimType.CLOTH
"""
@classmethod
def is_compatible(cls, obj, **kwargs):
# Only compatible with cloth objects
compatible, reason = super().is_compatible(obj, **kwargs)
if not compatible:
return compatible, reason
# Check for cloth type
if obj.prim_type != PrimType.CLOTH:
return False, f"Cannot use ClothStateMixin {cls.__name__} with rigid object, make sure object is created " \
f"with prim_type=PrimType.CLOTH!"
# Check for GPU dynamics
if not gm.USE_GPU_DYNAMICS:
return False, f"gm.USE_GPU_DYNAMICS must be True in order to use object state {cls.__name__}."
return True, None
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ClothStateMixin")
return classes
| 1,319 | Python | 35.666666 | 120 | 0.670963 |
StanfordVL/OmniGibson/omnigibson/object_states/object_state_base.py | from abc import ABC
import inspect
import omnigibson as og
from omnigibson.utils.python_utils import classproperty, Serializable, Registerable, Recreatable
# Global dicts that will contain mappings
REGISTERED_OBJECT_STATES = dict()
class BaseObjectRequirement:
"""
Base ObjectRequirement class. This allows for sanity checking a given asset / BaseObject to check whether a set
of conditions are met or not. This can be useful for sanity checking dependencies for properties such as requested
abilities or object states.
"""
@classmethod
def is_compatible(cls, obj, **kwargs):
"""
Determines whether this requirement is compatible with object @obj or not (i.e.: whether this requirement is
satisfied by @obj given other constructor arguments **kwargs).
NOTE: Must be implemented by subclass.
Args:
obj (StatefulObject): Object whose compatibility with this state should be checked
Returns:
2-tuple:
- bool: Whether the given object is compatible with this requirement or not
- None or str: If not compatible, the reason why it is not compatible. Otherwise, None
"""
raise NotImplementedError
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
"""
Determines whether this requirement is compatible with prim @prim or not (i.e.: whether this requirement is
satisfied by @prim given other constructor arguments **kwargs).
This is a useful check to evaluate an object's USD that hasn't been explicitly imported into OmniGibson yet.
NOTE: Must be implemented by subclass
Args:
prim (Usd.Prim): Object prim whose compatibility with this requirement should be checked
Returns:
2-tuple:
- bool: Whether the given prim is compatible with this requirement or not
- None or str: If not compatible, the reason why it is not compatible. Otherwise, None
"""
raise NotImplementedError
class BaseObjectState(BaseObjectRequirement, Serializable, Registerable, Recreatable, ABC):
"""
Base ObjectState class. Do NOT inherit from this class directly - use either AbsoluteObjectState or
RelativeObjectState.
"""
@classmethod
def get_dependencies(cls):
"""
Get the dependency states for this state, e.g. states that need to be explicitly enabled on the current object
before the current state is usable. States listed here will be enabled for all objects that have this current
state, and all dependency states will be processed on *all* objects prior to this state being processed on
*any* object.
Returns:
set of str: Set of strings corresponding to state keys.
"""
return set()
@classmethod
def get_optional_dependencies(cls):
"""
Get states that should be processed prior to this state if they are already enabled. These states will not be
enabled because of this state's dependency on them, but if they are already enabled for another reason (e.g.
because of an ability or another state's dependency etc.), they will be processed on *all* objects prior to this
state being processed on *any* object.
Returns:
set of str: Set of strings corresponding to state keys.
"""
return set()
def __init__(self, obj):
super().__init__()
self.obj = obj
self._initialized = False
self._cache = None
self._changed = None
self._last_t_updated = -1 # Last timestep when this state was updated
@classmethod
def is_compatible(cls, obj, **kwargs):
# Make sure all required dependencies are included in this object's state dictionary
for dep in cls.get_dependencies():
if dep not in obj.states:
return False, f"Missing required dependency state {dep.__name__}"
# Make sure all required kwargs are specified
default_kwargs = inspect.signature(cls.__init__).parameters
for kwarg, val in default_kwargs.items():
if val.default == inspect._empty and kwarg not in kwargs and kwarg not in {"obj", "self", "args", "kwargs"}:
return False, f"Missing required kwarg '{kwarg}'"
# Default is True if all kwargs are met
return True, None
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Make sure all required kwargs are specified
default_kwargs = inspect.signature(cls.__init__).parameters
for kwarg, val in default_kwargs.items():
if val.default == inspect._empty and kwarg not in kwargs and kwarg not in {"obj", "self"}:
return False, f"Missing required kwarg '{kwarg}'"
# Default is True if all kwargs are met
return True, None
@classmethod
def postprocess_ability_params(cls, params):
"""
Post-processes ability parameters if needed. The default implementation is a simple passthrough.
"""
return params
@property
def stateful(self):
"""
Returns:
bool: True if this object has a state that can be directly dumped / loaded via dump_state() and
load_state(), otherwise, returns False. Note that any sub object states that are NOT stateful do
not need to implement any of _dump_state(), _load_state(), _serialize(), or _deserialize()!
"""
# Default is whether state size > 0
return self.state_size > 0
@property
def state_size(self):
return 0
@property
def cache(self):
"""
Returns:
dict: Dictionary mapping specific argument combinations from @self.get_value() to cached values and
information stored for that specific combination
"""
return self._cache
def _initialize(self):
"""
This function will be called once; should be used for any object state-related objects have been loaded.
"""
pass
def initialize(self):
"""
Initialize this object state
"""
assert not self._initialized, "State is already initialized."
# Validate compatibility with the created object
init_args = {k: v for k, v in self.get_init_info()["args"].items() if k != "obj"}
assert self.is_compatible(obj=self.obj, **init_args), \
f"ObjectState {self.__class__.__name__} is not compatible with object {self.obj.name}."
# Clear cache
self.clear_cache()
self._initialize()
self._initialized = True
def clear_cache(self):
"""
Clears the internal cache
"""
# Clear all entries
self._cache = dict()
self._changed = dict()
self._last_t_updated = -1
def update_cache(self, get_value_args):
"""
Updates the internal cached value based on the evaluation of @self._get_value(*get_value_args)
Args:
get_value_args (tuple): Specific argument combinations (usually tuple of objects) passed into
@self.get_value / @self._get_value
"""
t = og.sim.current_time_step_index
# Compute value and update cache
val = self._get_value(*get_value_args)
self._cache[get_value_args] = dict(value=val, info=self.cache_info(get_value_args=get_value_args), t=t)
def cache_info(self, get_value_args):
"""
Helper function to cache relevant information at the current timestep.
Stores it under @self._cache[<KEY>]["info"]
Args:
get_value_args (tuple): Specific argument combinations (usually tuple of objects) passed into
@self.get_value whose caching information should be computed
Returns:
dict: Any caching information to include at the current timestep when this state's value is computed
"""
# Default is an empty dictionary
return dict()
def cache_is_valid(self, get_value_args):
"""
Helper function to check whether the current cached value is valid or not at the current timestep.
Default is False unless we're at the current timestep.
Args:
get_value_args (tuple): Specific argument combinations (usually tuple of objects) passed into
@self.get_value whose cached values should be validated
Returns:
bool: True if the cache is valid, else False
"""
# If t == the current timestep, then our cache is obviously valid otherwise we assume it isn't
return True if self._cache[get_value_args]["t"] == og.sim.current_time_step_index else \
self._cache_is_valid(get_value_args=get_value_args)
def _cache_is_valid(self, get_value_args):
"""
Helper function to check whether the current cached value is valid or not at the current timestep.
Default is False. Subclasses should implement special logic otherwise.
Args:
get_value_args (tuple): Specific argument combinations (usually tuple of objects) passed into
@self.get_value whose cached values should be validated
Returns:
bool: True if the cache is valid, else False
"""
return False
def has_changed(self, get_value_args, value, info, t):
"""
A helper function to query whether this object state has changed between the current timestep and an arbitrary
previous timestep @t with the corresponding cached value @value and cache information @info
Note that this may require some non-trivial compute, so we leverage @t, in addition to @get_value_args,
as a unique key into an internal dictionary, such that specific @t will result in a computation conducted
exactly once.
This is done for performance reasons; so that multiple states relying on the same state dependency can all
query whether that state has changed between the same timesteps with only a single computation.
Args:
get_value_args (tuple): Specific argument combinations (usually tuple of objects) passed into
@self.get_value
value (any): Cached value computed at timestep @t for this object state
info (dict): Information calculated at timestep @t when computing this state's value
t (int): Initial timestep to compare against. This should be an index of the steps taken,
i.e. a value queried from og.sim.current_time_step_index at some point in time. It is assumed @value
and @info were computed at this timestep
Returns:
bool: Whether this object state has changed between @t and the current timestep index for the specific
@get_value_args
"""
# Check current sim step index; if it doesn't match the internal value, we need to clear the changed history
current_t = og.sim.current_time_step_index
if self._last_t_updated != current_t:
self._changed = dict()
self._last_t_updated = current_t
# Compile t, args, and kwargs deterministically
history_key = (t, *get_value_args)
# If t == the current timestep, then we obviously haven't changed so our value is False
if t == current_t:
val = False
# Otherwise, check if it already exists in our has changed dictionary; we return that value if so
elif history_key in self._changed:
val = self._changed[history_key]
# Otherwise, we calculate the value and store it in our changed dictionary
else:
val = self._has_changed(get_value_args=get_value_args, value=value, info=info)
self._changed[history_key] = val
return val
def _has_changed(self, get_value_args, value, info):
"""
Checks whether the previous value evaluated at time @t has changed with the current timestep.
By default, it returns True.
Any custom checks should be overridden by subclass.
Args:
get_value_args (tuple): Specific argument combinations (usually tuple of objects) passed into
@self.get_value
value (any): Cached value computed at timestep @t for this object state
info (dict): Information calculated at timestep @t when computing this state's value
Returns:
bool: Whether the value has changed between @value and @info and the coresponding value and info computed
at the current timestep
"""
return True
def get_value(self, *args, **kwargs):
"""
Get this state's value
Returns:
any: Object state value given input @args and @kwargs
"""
assert self._initialized
# Compile args and kwargs deterministically
key = (*args, *tuple(kwargs.values()))
# We need to see if we need to update our cache -- we do so if and only if one of the following conditions are met:
# (a) key is NOT in the cache
# (b) Our cache is not valid
if key not in self._cache or not self.cache_is_valid(get_value_args=key):
# Update the cache
self.update_cache(get_value_args=key)
# Value is the cached value
val = self._cache[key]["value"]
return val
def _get_value(self, *args, **kwargs):
raise NotImplementedError(f"_get_value not implemented for {self.__class__.__name__} state.")
def set_value(self, *args, **kwargs):
"""
Set this state's value
Returns:
bool: True if setting the value was successful, otherwise False
"""
assert self._initialized
# Clear cache because the state may be changed
self.clear_cache()
# Set the value
val = self._set_value(*args, **kwargs)
return val
def _set_value(self, *args, **kwargs):
raise NotImplementedError(f"_set_value not implemented for {self.__class__.__name__} state.")
def remove(self):
"""
Any cleanup functionality to deploy when @self.obj is removed from the simulator
"""
pass
def dump_state(self, serialized=False):
assert self._initialized
assert self.stateful
return super().dump_state(serialized=serialized)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseObjectState")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_OBJECT_STATES
return REGISTERED_OBJECT_STATES
class AbsoluteObjectState(BaseObjectState):
"""
This class is used to track object states that are absolute, e.g. do not require a second object to compute
the value.
"""
def _get_value(self):
raise NotImplementedError(f"_get_value not implemented for {self.__class__.__name__} state.")
def _set_value(self, new_value):
raise NotImplementedError(f"_set_value not implemented for {self.__class__.__name__} state.")
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("AbsoluteObjectState")
return classes
class RelativeObjectState(BaseObjectState):
"""
This class is used to track object states that are relative, e.g. require two objects to compute a value.
Note that subclasses will typically compute values on-the-fly.
"""
def _get_value(self, other):
raise NotImplementedError(f"_get_value not implemented for {self.__class__.__name__} state.")
def _set_value(self, other, new_value):
raise NotImplementedError(f"_set_value not implemented for {self.__class__.__name__} state.")
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("RelativeObjectState")
return classes
class IntrinsicObjectState(BaseObjectState):
"""
This class is used to track object states that should NOT have getters / setters implemented, since the associated
ability / state is intrinsic to the state
"""
def _get_value(self):
raise NotImplementedError(f"_get_value not implemented for IntrinsicObjectState {self.__class__.__name__} state.")
def _set_value(self, new_value):
raise NotImplementedError(f"_set_value not implemented for IntrinsicObjectState {self.__class__.__name__} state.")
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("IntrinsicObjectState")
return classes
class BooleanStateMixin(BaseObjectState):
"""
This class is a mixin used to indicate that a state has a boolean value.
"""
pass
| 17,376 | Python | 38.673516 | 123 | 0.640021 |
StanfordVL/OmniGibson/omnigibson/object_states/overlaid.py | from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
from omnigibson.object_states.touching import Touching
from omnigibson.utils.constants import PrimType
import omnigibson.utils.transform_utils as T
from omnigibson.utils.object_state_utils import sample_cloth_on_rigid
from omnigibson.macros import create_module_macros
import omnigibson as og
from scipy.spatial import ConvexHull, HalfspaceIntersection, QhullError
import numpy as np
import trimesh
import itertools
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Percentage of xy-plane of the object's base aligned bbox that needs to covered by the cloth
m.OVERLAP_AREA_PERCENTAGE = 0.5
# z-offset for sampling
m.SAMPLING_Z_OFFSET = 0.01
class Overlaid(KinematicsMixin, RelativeObjectState, BooleanStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(Touching)
return deps
def _set_value(self, other, new_value):
if not new_value:
raise NotImplementedError("Overlaid does not support set_value(False)")
state = og.sim.dump_state(serialized=False)
if sample_cloth_on_rigid(self.obj, other, randomize_xy=False) and self.get_value(other):
return True
else:
og.sim.load_state(state, serialized=False)
return False
def _get_value(self, other):
"""
Check whether the (cloth) object is overlaid on the other (rigid) object.
First, the cloth object needs to be touching the rigid object.
Then, the convex hull of the particles of the cloth object needs to cover a decent percentage of the
base aligned bounding box of the other rigid object.
"""
if not (self.obj.prim_type == PrimType.CLOTH and other.prim_type == PrimType.RIGID):
raise ValueError("Overlaid state requires obj1 is cloth and obj2 is rigid.")
if not self.obj.states[Touching].get_value(other):
return False
# Compute the convex hull of the particles of the cloth object.
points = self.obj.root_link.keypoint_particle_positions[:, :2]
cloth_hull = ConvexHull(points)
# Compute the base aligned bounding box of the rigid object.
bbox_center, bbox_orn, bbox_extent, _ = other.get_base_aligned_bbox(xy_aligned=True)
vertices_local = np.array(list(itertools.product((1, -1), repeat=3))) * (bbox_extent / 2)
vertices = trimesh.transformations.transform_points(vertices_local, T.pose2mat((bbox_center, bbox_orn)))
rigid_hull = ConvexHull(vertices[:, :2])
# The goal is to find the intersection of the convex hull and the bounding box.
# We can do so with HalfspaceIntersection, which takes as input a list of equations that define the half spaces,
# and an interior point. We assume the center of the bounding box is an interior point.
interior_pt = vertices.mean(axis=0)[:2]
half_spaces = np.vstack((cloth_hull.equations, rigid_hull.equations))
try:
half_space_intersection = HalfspaceIntersection(half_spaces, interior_pt)
except QhullError:
# The bbox center of the rigid body does not lie in the intersection, return False.
return False
# Compute the ratio between the intersection area and the bounding box area in the x-y plane.
# When input points are 2-dimensional, this is the area of the convex hull.
# Ref: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.ConvexHull.html
intersection_area = ConvexHull(half_space_intersection.intersections).volume
rigid_xy_area = bbox_extent[0] * bbox_extent[1]
return (intersection_area / rigid_xy_area) > m.OVERLAP_AREA_PERCENTAGE
| 3,916 | Python | 44.022988 | 120 | 0.701992 |
StanfordVL/OmniGibson/omnigibson/object_states/tensorized_value_state.py | from omnigibson.object_states.object_state_base import AbsoluteObjectState
from omnigibson.object_states.update_state_mixin import GlobalUpdateStateMixin
from omnigibson.utils.python_utils import classproperty
import numpy as np
class TensorizedValueState(AbsoluteObjectState, GlobalUpdateStateMixin):
"""
A state-mixin that implements optimized global value updates across all object state instances
of this type, i.e.: all values across all object state instances are updated at once, rather than per
individual instance update() call.
"""
# Numpy array of raw internally tracked values
# Shape is (N, ...), where the ith entry in the first dimension corresponds to the ith object state instance's value
VALUES = None
# Dictionary mapping object name to index in VALUES
OBJ_IDXS = None
# Dict of callbacks that can be added to when an object is removed
CALLBACKS_ON_REMOVE = None
@classmethod
def global_initialize(cls):
# Call super first
super().global_initialize()
# Initialize the global variables
cls.VALUES = np.array([], dtype=cls.value_type).reshape(0, *cls.value_shape)
cls.OBJ_IDXS = dict()
cls.CALLBACKS_ON_REMOVE = dict()
@classmethod
def global_update(cls):
# Call super first
super().global_update()
# This should be globally update all values. If there are no values, we skip by default since there is nothing
# being tracked currently
if len(cls.VALUES) == 0:
return
cls.VALUES = cls._update_values(values=cls.VALUES)
@classmethod
def global_clear(cls):
# Call super first
super().global_clear()
# Clear internal state
cls.VALUES = None
cls.OBJ_IDXS = None
cls.CALLBACKS_ON_REMOVE = None
@classmethod
def _update_values(cls, values):
"""
Updates all internally tracked @values for this object state. Should be implemented by subclass.
Args:
values (np.array): Tensorized value array
Returns:
np.array: Updated tensorized value array
"""
raise NotImplementedError
@classmethod
def _add_obj(cls, obj):
"""
Adds object @obj to be tracked internally in @VALUES array.
Args:
obj (StatefulObject): Object to add
"""
assert obj.name not in cls.OBJ_IDXS, \
f"Tried to add object {obj.name} to the global tensorized value array but the object already exists!"
# Add this object to the tracked global state
cls.OBJ_IDXS[obj.name] = len(cls.VALUES)
cls.VALUES = np.concatenate([cls.VALUES, np.zeros((1, *cls.value_shape), dtype=cls.value_type)], axis=0)
@classmethod
def _remove_obj(cls, obj):
"""
Removes object @obj from the internally tracked @VALUES array.
This also removes the corresponding tracking idx in @OBJ_IDXS
Args:
obj (StatefulObject): Object to remove
"""
# Removes this tracked object from the global value array
assert obj.name in cls.OBJ_IDXS, \
f"Tried to remove object {obj.name} from the global tensorized value array but the object does not exist!"
deleted_idx = cls.OBJ_IDXS.pop(obj.name)
# Re-standardize the indices
for i, name in enumerate(cls.OBJ_IDXS.keys()):
cls.OBJ_IDXS[name] = i
cls.VALUES = np.delete(cls.VALUES, [deleted_idx])
@classmethod
def add_callback_on_remove(cls, name, callback):
"""
Adds a callback that will be triggered when @self.remove is called
Args:
name (str): Name of the callback to trigger
callback (function): Function to execute. Should have signature callback(obj: BaseObject) --> None
"""
cls.CALLBACKS_ON_REMOVE[name] = callback
@classmethod
def remove_callback_on_remove(cls, name):
"""
Removes callback with name @name from the internal set of callbacks
Args:
name (str): Name of the callback to remove
"""
cls.CALLBACKS_ON_REMOVE.pop(name)
@classproperty
def value_shape(cls):
"""
Returns:
tuple: Expected shape of the per-object state instance value. If empty (), this assumes
that each entry is a single (non-array) value. Default is ()
"""
return ()
@classproperty
def value_type(cls):
"""
Returns:
type: Type of the internal value array, e.g., bool, np.uint, float, etc. Default is float
"""
return float
@classproperty
def value_name(cls):
"""
Returns:
str: Name of the value key to assign when dumping / loading the state. Should be implemented by subclass
"""
raise NotImplementedError
def __init__(self, *args, **kwargs):
# Run super first
super().__init__(*args, **kwargs)
self._add_obj(obj=self.obj)
def remove(self):
# Execute all callbacks
for callback in self.CALLBACKS_ON_REMOVE.values():
callback(self.obj)
# Removes this tracked object from the global value array
self._remove_obj(obj=self.obj)
def _get_value(self):
# Directly access value from global register
return self.value_type(self.VALUES[self.OBJ_IDXS[self.obj.name]])
def _set_value(self, new_value):
# Directly set value in global register
self.VALUES[self.OBJ_IDXS[self.obj.name]] = new_value
return True
@property
def state_size(self):
# This is the flattened size of @self.value_shape
# Note that np.product(()) returns 1, which is also correct for a non-arrayed value
return int(np.product(self.value_shape))
# For this state, we simply store its value.
def _dump_state(self):
return {self.value_name: self._get_value()}
def _load_state(self, state):
self._set_value(state[self.value_name])
def _serialize(self, state):
# If the state value is not an iterable, wrap it in a numpy array
val = state[self.value_name] if isinstance(state[self.value_name], np.ndarray) else np.array([state[self.value_name]])
return val.flatten().astype(float)
def _deserialize(self, state):
value_length = int(np.product(self.value_shape))
value = state[:value_length].reshape(self.value_shape) if len(self.value_shape) > 0 else state[0]
return {self.value_name: value}, value_length
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("TensorizedValueState")
return classes
| 6,878 | Python | 33.395 | 126 | 0.631288 |
StanfordVL/OmniGibson/omnigibson/object_states/inside.py | import numpy as np
import omnigibson as og
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.adjacency import HorizontalAdjacency, VerticalAdjacency, flatten_planes
from omnigibson.object_states.kinematics_mixin import KinematicsMixin
from omnigibson.object_states.object_state_base import BooleanStateMixin, RelativeObjectState
from omnigibson.utils.object_state_utils import sample_kinematics
from omnigibson.utils.constants import PrimType
from omnigibson.utils.object_state_utils import m as os_m
class Inside(RelativeObjectState, KinematicsMixin, BooleanStateMixin):
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.update({AABB, HorizontalAdjacency, VerticalAdjacency})
return deps
def _set_value(self, other, new_value, reset_before_sampling=False):
if not new_value:
raise NotImplementedError("Inside does not support set_value(False)")
if other.prim_type == PrimType.CLOTH:
raise ValueError("Cannot set an object inside a cloth object.")
state = og.sim.dump_state(serialized=False)
# Possibly reset this object if requested
if reset_before_sampling:
self.obj.reset()
for _ in range(os_m.DEFAULT_HIGH_LEVEL_SAMPLING_ATTEMPTS):
if sample_kinematics("inside", self.obj, other) and self.get_value(other):
return True
else:
og.sim.load_state(state, serialized=False)
return False
def _get_value(self, other):
if other.prim_type == PrimType.CLOTH:
raise ValueError("Cannot detect if an object is inside a cloth object.")
# First check that the inner object's position is inside the outer's AABB.
# Since we usually check for a small set of outer objects, this is cheap
aabb_lower, aabb_upper = self.obj.states[AABB].get_value()
inner_object_pos = (aabb_lower + aabb_upper) / 2.0
outer_object_aabb_lo, outer_object_aabb_hi = other.states[AABB].get_value()
if not (np.less_equal(outer_object_aabb_lo, inner_object_pos).all() and np.less_equal(inner_object_pos, outer_object_aabb_hi).all()):
return False
# Our definition of inside: an object A is inside an object B if there
# exists a 3-D coordinate space in which object B can be found on both
# sides of object A in at least 2 out of 3 of the coordinate axes. To
# check this, we sample a bunch of coordinate systems (for the sake of
# simplicity, all have their 3rd axes aligned with the Z axis but the
# 1st and 2nd axes are free.
vertical_adjacency = self.obj.states[VerticalAdjacency].get_value()
horizontal_adjacency = self.obj.states[HorizontalAdjacency].get_value()
# First, check if the body can be found on both sides in Z
on_both_sides_Z = other in vertical_adjacency.negative_neighbors and other in vertical_adjacency.positive_neighbors
if on_both_sides_Z:
# If the object is on both sides of Z, we already found 1 axis, so just
# find another axis where the object is on both sides.
on_both_sides_in_any_axis = any(
other in adjacency_list.positive_neighbors and
other in adjacency_list.negative_neighbors
for adjacency_list in flatten_planes(horizontal_adjacency)
)
return on_both_sides_in_any_axis
# If the object was not on both sides of Z, then we need to look at each
# plane and try to find one where the object is on both sides of both
# axes in that plane.
on_both_sides_of_both_axes_in_any_plane = any(
other in adjacency_list_by_axis[0].positive_neighbors and
other in adjacency_list_by_axis[0].negative_neighbors and
other in adjacency_list_by_axis[1].positive_neighbors and
other in adjacency_list_by_axis[1].negative_neighbors
for adjacency_list_by_axis in horizontal_adjacency
)
return on_both_sides_of_both_axes_in_any_plane
| 4,157 | Python | 47.917646 | 141 | 0.674044 |
StanfordVL/OmniGibson/omnigibson/object_states/temperature.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.heat_source_or_sink import HeatSourceOrSink
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.tensorized_value_state import TensorizedValueState
import omnigibson as og
from omnigibson.utils.python_utils import classproperty
# Create settings for this module
m = create_module_macros(module_path=__file__)
# TODO: Consider sourcing default temperature from scene
# Default ambient temperature.
m.DEFAULT_TEMPERATURE = 23.0 # degrees Celsius
# What fraction of the temperature difference with the default temperature should be decayed every step.
m.TEMPERATURE_DECAY_SPEED = 0.02 # per second. We'll do the conversion to steps later.
class Temperature(TensorizedValueState):
def __init__(self, obj):
# Run super first
super(Temperature, self).__init__(obj)
# Set value to be default
self._set_value(m.DEFAULT_TEMPERATURE)
@classmethod
def update_temperature_from_heatsource_or_sink(cls, objs, temperature, rate):
"""
Updates @objs' internal temperatures based on @temperature and @rate
Args:
objs (Iterable of StatefulObject): Objects whose temperatures should be updated
temperature (float): Heat source / sink temperature
rate (float): Heating rate of the source / sink
"""
# Get idxs for objs
idxs = [cls.OBJ_IDXS[obj.name] for obj in objs]
cls.VALUES[idxs] += (temperature - cls.VALUES[idxs]) * rate * og.sim.get_rendering_dt()
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(AABB)
return deps
@classmethod
def get_optional_dependencies(cls):
deps = super().get_optional_dependencies()
deps.add(HeatSourceOrSink)
return deps
@classmethod
def _update_values(cls, values):
# Apply temperature decay
return values + (m.DEFAULT_TEMPERATURE - values) * m.TEMPERATURE_DECAY_SPEED * og.sim.get_rendering_dt()
@classproperty
def value_name(cls):
return "temperature"
| 2,179 | Python | 33.603174 | 112 | 0.692061 |
StanfordVL/OmniGibson/omnigibson/objects/usd_object.py | import os
import tempfile
import omnigibson as og
from omnigibson.objects.stateful_object import StatefulObject
from omnigibson.utils.constants import PrimType
from omnigibson.utils.usd_utils import add_asset_to_stage
from omnigibson.utils.asset_utils import decrypt_file
class USDObject(StatefulObject):
"""
USDObjects are instantiated from a USD file. They can be composed of one
or more links and joints. They may or may not be passive.
"""
def __init__(
self,
name,
usd_path,
encrypted=False,
prim_path=None,
category="object",
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
kinematic_only=None,
self_collisions=False,
prim_type=PrimType.RIGID,
load_config=None,
abilities=None,
include_default_states=True,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
usd_path (str): global path to the USD file to load
encrypted (bool): whether this file is encrypted (and should therefore be decrypted) or not
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
kinematic_only (None or bool): Whether this object should be kinematic only (and not get affected by any
collisions). If None, then this value will be set to True if @fixed_base is True and some other criteria
are satisfied (see object_base.py post_load function), else False.
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
include_default_states (bool): whether to include the default object states from @get_default_states
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
Note that this base object does NOT pass kwargs down into the Prim-type super() classes, and we assume
that kwargs are only shared between all SUBclasses (children), not SUPERclasses (parents).
"""
self._usd_path = usd_path
self._encrypted = encrypted
super().__init__(
prim_path=prim_path,
name=name,
category=category,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
kinematic_only=kinematic_only,
self_collisions=self_collisions,
prim_type=prim_type,
include_default_states=include_default_states,
load_config=load_config,
abilities=abilities,
**kwargs,
)
def _load(self):
"""
Load the object into pybullet and set it to the correct pose
"""
usd_path = self._usd_path
if self._encrypted:
# Create a temporary file to store the decrytped asset, load it, and then delete it
encrypted_filename = self._usd_path.replace(".usd", ".encrypted.usd")
usd_path = self._usd_path.replace(".usd", f".{self.uuid}.usd")
decrypt_file(encrypted_filename, usd_path)
prim = add_asset_to_stage(asset_path=usd_path, prim_path=self._prim_path)
if self._encrypted:
# On Windows, Isaac Sim won't let go of the file until the prim is removed, so we can't delete it.
if os.name == "posix":
os.remove(usd_path)
return prim
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
# Add additional kwargs
return self.__class__(
prim_path=prim_path,
usd_path=self._usd_path,
name=name,
category=self.category,
scale=self.scale,
visible=self.visible,
fixed_base=self.fixed_base,
visual_only=self._visual_only,
prim_type=self._prim_type,
load_config=load_config,
abilities=self._abilities,
)
@property
def usd_path(self):
"""
Returns:
str: absolute path to this model's USD file. By default, this is the loaded usd path
passed in as an argument
"""
return self._usd_path
| 5,930 | Python | 43.593985 | 120 | 0.615683 |
StanfordVL/OmniGibson/omnigibson/objects/stateful_object.py | import sys
from collections import defaultdict
import numpy as np
from bddl.object_taxonomy import ObjectTaxonomy
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, gm
from omnigibson.object_states.factory import (
get_default_states,
get_state_name,
get_requirements_for_ability,
get_states_for_ability,
get_states_by_dependency_order,
get_texture_change_states,
get_fire_states,
get_steam_states,
get_visual_states,
get_texture_change_priority,
)
from omnigibson.object_states.object_state_base import REGISTERED_OBJECT_STATES
from omnigibson.object_states.heat_source_or_sink import HeatSourceOrSink
from omnigibson.object_states.on_fire import OnFire
from omnigibson.object_states.particle_modifier import ParticleRemover
from omnigibson.objects.object_base import BaseObject
from omnigibson.renderer_settings.renderer_settings import RendererSettings
from omnigibson.utils.constants import PrimType, EmitterType
from omnigibson.utils.python_utils import classproperty, extract_class_init_kwargs_from_dict
from omnigibson.object_states import Saturated
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
OBJECT_TAXONOMY = ObjectTaxonomy()
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.STEAM_EMITTER_SIZE_RATIO = [0.8, 0.8, 0.4] # (x,y,z) scale of generated steam relative to its object, range [0, inf)
m.STEAM_EMITTER_DENSITY_CELL_RATIO = 0.1 # scale of steam density relative to its object, range [0, inf)
m.STEAM_EMITTER_HEIGHT_RATIO = 0.6 # z-height of generated steam relative to its object's native height, range [0, inf)
m.FIRE_EMITTER_HEIGHT_RATIO = 0.4 # z-height of generated fire relative to its object's native height, range [0, inf)
class FlowEmitterLayerRegistry:
"""
Registry for flow emitter layers. This is used to ensure that all flow emitters are placed on unique layers, so that
they do not interfere with each other.
"""
def __init__(self):
self._layer = 0
def __call__(self):
self._layer += 1
return self._layer
LAYER_REGISTRY = FlowEmitterLayerRegistry()
class StatefulObject(BaseObject):
"""Objects that support object states."""
def __init__(
self,
name,
prim_path=None,
category="object",
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
kinematic_only=None,
self_collisions=False,
prim_type=PrimType.RIGID,
load_config=None,
abilities=None,
include_default_states=True,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
kinematic_only (None or bool): Whether this object should be kinematic only (and not get affected by any
collisions). If None, then this value will be set to True if @fixed_base is True and some other criteria
are satisfied (see object_base.py post_load function), else False.
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
include_default_states (bool): whether to include the default object states from @get_default_states
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Values that will be filled later
self._states = None
self._emitters = dict()
self._visual_states = None
self._current_texture_state = None
self._include_default_states = include_default_states
# Load abilities from taxonomy if needed & possible
if abilities is None:
abilities = {}
taxonomy_class = OBJECT_TAXONOMY.get_synset_from_category(category)
if taxonomy_class is not None:
abilities = OBJECT_TAXONOMY.get_abilities(taxonomy_class)
assert isinstance(abilities, dict), "Object abilities must be in dictionary form."
self._abilities = abilities
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
category=category,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
kinematic_only=kinematic_only,
self_collisions=self_collisions,
prim_type=prim_type,
load_config=load_config,
**kwargs,
)
def _post_load(self):
# Run super first
super()._post_load()
# Prepare the object states
self._states = {}
self.prepare_object_states()
def _initialize(self):
# Run super first
super()._initialize()
# Initialize all states
for state in self._states.values():
state.initialize()
# Check whether this object requires any visual updates
states_set = set(self.states)
self._visual_states = states_set & get_visual_states()
# If we require visual updates, possibly create additional APIs
if len(self._visual_states) > 0:
if len(states_set & get_steam_states()) > 0:
self._create_emitter_apis(EmitterType.STEAM)
if len(states_set & get_fire_states()) > 0:
self._create_emitter_apis(EmitterType.FIRE)
def add_state(self, state):
"""
Adds state @state with name @name to self.states.
Args:
state (ObjectStateBase): Object state instance to add to this object
"""
assert self._states is not None, "Cannot add state since states have not been initialized yet!"
assert state.__class__ not in self._states, f"State {state.__class__.__name__} " \
f"has already been added to this object!"
self._states[state.__class__] = state
@property
def states(self):
"""
Get the current states of this object.
Returns:
dict: Keyword-mapped states for this object
"""
return self._states
@property
def abilities(self):
"""
Returns:
dict: Dictionary mapping ability name to ability arguments for this object
"""
return self._abilities
def prepare_object_states(self):
"""
Prepare the state dictionary for an object by generating the appropriate
object state instances.
This uses the abilities of the object and the state dependency graph to
find & instantiate all relevant states.
"""
states_info = {state_type: {"ability": None, "params": dict()} for state_type in get_default_states()} if \
self._include_default_states else dict()
# Map the state type (class) to ability name and params
if gm.ENABLE_OBJECT_STATES:
for ability in tuple(self._abilities.keys()):
# First, sanity check all ability requirements
compatible = True
for requirement in get_requirements_for_ability(ability):
compatible, reason = requirement.is_compatible(obj=self)
if not compatible:
# Print out warning and pop ability
log.warning(f"Ability '{ability}' is incompatible with obj {self.name}, "
f"because requirement {requirement.__name__} was not met. Reason: {reason}")
self._abilities.pop(ability)
break
if compatible:
params = self._abilities[ability]
for state_type in get_states_for_ability(ability):
states_info[state_type] = {"ability": ability,
"params": state_type.postprocess_ability_params(params)}
# Add the dependencies into the list, too, and sort based on the dependency chain
# Must iterate over explicit tuple since dictionary changes size mid-iteration
for state_type in tuple(states_info.keys()):
# Add each state's dependencies, too. Note that only required dependencies are explicitly added, but both
# required AND optional dependencies are checked / sorted
for dependency in state_type.get_dependencies():
if dependency not in states_info:
states_info[dependency] = {"ability": None, "params": dict()}
# Iterate over all sorted state types, generating the states in topological order.
self._states = dict()
for state_type in get_states_by_dependency_order(states=states_info):
# Skip over any types that are not in our info dict -- these correspond to optional dependencies
if state_type not in states_info:
continue
relevant_params = extract_class_init_kwargs_from_dict(cls=state_type, dic=states_info[state_type]["params"], copy=False)
compatible, reason = state_type.is_compatible(obj=self, **relevant_params)
if compatible:
self._states[state_type] = state_type(obj=self, **relevant_params)
else:
log.warning(f"State {state_type.__name__} is incompatible with obj {self.name}. Reason: {reason}")
# Remove the ability if it exists
# Note that the object may still have some of the states related to the desired ability. In this way,
# we guarantee that the existence of a certain ability in self.abilities means at ALL corresponding
# object state dependencies are met by the underlying object asset
ability = states_info[state_type]["ability"]
if ability in self._abilities:
self._abilities.pop(ability)
def _create_emitter_apis(self, emitter_type):
"""
Create necessary prims and apis for steam effects.
Args:
emitter_type (EmitterType): Emitter to create
"""
# Make sure that flow setting is enabled.
renderer_setting = RendererSettings()
renderer_setting.common_settings.flow_settings.enable()
# Specify emitter config.
emitter_config = {}
bbox_extent_local = self.native_bbox if hasattr(self, "native_bbox") else self.aabb_extent / self.scale
if emitter_type == EmitterType.FIRE:
fire_at_metalink = True
if OnFire in self.states:
# Note whether the heat source link is explicitly set
link = self.states[OnFire].link
fire_at_metalink = link != self.root_link
elif HeatSourceOrSink in self.states:
# Only apply fire to non-root-link (i.e.: explicitly specified) heat source links
# Otherwise, immediately return
link = self.states[HeatSourceOrSink].link
if link == self.root_link:
return
else:
raise ValueError("Unknown fire state")
emitter_config["name"] = "flowEmitterSphere"
emitter_config["type"] = "FlowEmitterSphere"
emitter_config["position"] = (0.0, 0.0, 0.0) if fire_at_metalink \
else (0.0, 0.0, bbox_extent_local[2] * m.FIRE_EMITTER_HEIGHT_RATIO)
emitter_config["fuel"] = 0.6
emitter_config["coupleRateFuel"] = 1.2
emitter_config["buoyancyPerTemp"] = 0.04
emitter_config["burnPerTemp"] = 4
emitter_config["gravity"] = (0, 0, -60.0)
emitter_config["constantMask"] = 5.0
emitter_config["attenuation"] = 0.5
elif emitter_type == EmitterType.STEAM:
link = self.root_link
emitter_config["name"] = "flowEmitterBox"
emitter_config["type"] = "FlowEmitterBox"
emitter_config["position"] = (0.0, 0.0, bbox_extent_local[2] * m.STEAM_EMITTER_HEIGHT_RATIO)
emitter_config["fuel"] = 1.0
emitter_config["coupleRateFuel"] = 0.5
emitter_config["buoyancyPerTemp"] = 0.05
emitter_config["burnPerTemp"] = 0.5
emitter_config["gravity"] = (0, 0, -50.0)
emitter_config["constantMask"] = 10.0
emitter_config["attenuation"] = 1.5
else:
raise ValueError("Currently, only EmitterTypes FIRE and STEAM are supported!")
# Define prim paths.
# The flow system is created under the root link so that it automatically updates its pose as the object moves
flowEmitter_prim_path = f"{link.prim_path}/{emitter_config['name']}"
flowSimulate_prim_path = f"{link.prim_path}/flowSimulate"
flowOffscreen_prim_path = f"{link.prim_path}/flowOffscreen"
flowRender_prim_path = f"{link.prim_path}/flowRender"
# Define prims.
stage = og.sim.stage
emitter = stage.DefinePrim(flowEmitter_prim_path, emitter_config["type"])
simulate = stage.DefinePrim(flowSimulate_prim_path, "FlowSimulate")
offscreen = stage.DefinePrim(flowOffscreen_prim_path, "FlowOffscreen")
renderer = stage.DefinePrim(flowRender_prim_path, "FlowRender")
advection = stage.DefinePrim(flowSimulate_prim_path + "/advection", "FlowAdvectionCombustionParams")
smoke = stage.DefinePrim(flowSimulate_prim_path + "/advection/smoke", "FlowAdvectionCombustionParams")
vorticity = stage.DefinePrim(flowSimulate_prim_path + "/vorticity", "FlowVorticityParams")
rayMarch = stage.DefinePrim(flowRender_prim_path + "/rayMarch", "FlowRayMarchParams")
colormap = stage.DefinePrim(flowOffscreen_prim_path + "/colormap", "FlowRayMarchColormapParams")
self._emitters[emitter_type] = emitter
layer_number = LAYER_REGISTRY()
# Update emitter general settings.
emitter.CreateAttribute("enabled", lazy.pxr.Sdf.ValueTypeNames.Bool, False).Set(False)
emitter.CreateAttribute("position", lazy.pxr.Sdf.ValueTypeNames.Float3, False).Set(emitter_config["position"])
emitter.CreateAttribute("fuel", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(emitter_config["fuel"])
emitter.CreateAttribute("coupleRateFuel", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(emitter_config["coupleRateFuel"])
emitter.CreateAttribute("coupleRateVelocity", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(2.0)
emitter.CreateAttribute("velocity", lazy.pxr.Sdf.ValueTypeNames.Float3, False).Set((0, 0, 0))
emitter.CreateAttribute("layer", lazy.pxr.Sdf.ValueTypeNames.Int, False).Set(layer_number)
simulate.CreateAttribute("layer", lazy.pxr.Sdf.ValueTypeNames.Int, False).Set(layer_number)
offscreen.CreateAttribute("layer", lazy.pxr.Sdf.ValueTypeNames.Int, False).Set(layer_number)
renderer.CreateAttribute("layer", lazy.pxr.Sdf.ValueTypeNames.Int, False).Set(layer_number)
advection.CreateAttribute("buoyancyPerTemp", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(emitter_config["buoyancyPerTemp"])
advection.CreateAttribute("burnPerTemp", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(emitter_config["burnPerTemp"])
advection.CreateAttribute("gravity", lazy.pxr.Sdf.ValueTypeNames.Float3, False).Set(emitter_config["gravity"])
vorticity.CreateAttribute("constantMask", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(emitter_config["constantMask"])
rayMarch.CreateAttribute("attenuation", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(emitter_config["attenuation"])
# Update emitter unique settings.
if emitter_type == EmitterType.FIRE:
# Radius is in the absolute world coordinate even though the fire is under the link frame.
# In other words, scaling the object doesn't change the fire radius.
if fire_at_metalink:
# TODO: get radius of heat_source_link from metadata.
radius = 0.05
else:
bbox_extent_world = self.native_bbox * self.scale if hasattr(self, "native_bbox") else self.aabb_extent
# Radius is the average x-y half-extent of the object
radius = float(np.mean(bbox_extent_world[:2]) / 2.0)
emitter.CreateAttribute("radius", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(radius)
simulate.CreateAttribute("densityCellSize", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(radius*0.2)
smoke.CreateAttribute("fade", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(2.0)
# Set fire colormap.
rgbaPoints = []
rgbaPoints.append(lazy.pxr.Gf.Vec4f(0.0154, 0.0177, 0.0154, 0.004902))
rgbaPoints.append(lazy.pxr.Gf.Vec4f(0.03575, 0.03575, 0.03575, 0.504902))
rgbaPoints.append(lazy.pxr.Gf.Vec4f(0.03575, 0.03575, 0.03575, 0.504902))
rgbaPoints.append(lazy.pxr.Gf.Vec4f(1, 0.1594, 0.0134, 0.8))
rgbaPoints.append(lazy.pxr.Gf.Vec4f(13.53, 2.99, 0.12599, 0.8))
rgbaPoints.append(lazy.pxr.Gf.Vec4f(78, 39, 6.1, 0.7))
colormap.CreateAttribute("rgbaPoints", lazy.pxr.Sdf.ValueTypeNames.Float4Array, False).Set(rgbaPoints)
elif emitter_type == EmitterType.STEAM:
emitter.CreateAttribute("halfSize", lazy.pxr.Sdf.ValueTypeNames.Float3, False).Set(
tuple(bbox_extent_local * np.array(m.STEAM_EMITTER_SIZE_RATIO) / 2.0))
simulate.CreateAttribute("densityCellSize", lazy.pxr.Sdf.ValueTypeNames.Float, False).Set(bbox_extent_local[2] * m.STEAM_EMITTER_DENSITY_CELL_RATIO)
def set_emitter_enabled(self, emitter_type, value):
"""
Enable/disable the emitter prim for fire/steam effect.
Args:
emitter_type (EmitterType): Emitter to set
value (bool): Value to set
"""
if emitter_type not in self._emitters:
return
if value != self._emitters[emitter_type].GetAttribute("enabled").Get():
self._emitters[emitter_type].GetAttribute("enabled").Set(value)
def get_textures(self):
"""
Gets prim's texture files.
Returns:
list of str: List of texture file paths
"""
return [material.diffuse_texture for material in self.materials if material.diffuse_texture is not None]
def update_visuals(self):
"""
Update the prim's visuals (texture change, steam/fire effects, etc).
Should be called after all the states are updated.
"""
if len(self._visual_states) > 0:
texture_change_states = []
emitter_enabled = defaultdict(bool)
for state_type in self._visual_states:
state = self.states[state_type]
if state_type in get_texture_change_states():
if state_type == Saturated:
for particle_system in ParticleRemover.supported_active_systems.values():
if state.get_value(particle_system):
texture_change_states.append(state)
# Only need to do this once, since soaked handles all fluid systems
break
elif state.get_value():
texture_change_states.append(state)
if state_type in get_steam_states():
emitter_enabled[EmitterType.STEAM] |= state.get_value()
if state_type in get_fire_states():
emitter_enabled[EmitterType.FIRE] |= state.get_value()
for emitter_type in emitter_enabled:
self.set_emitter_enabled(emitter_type, emitter_enabled[emitter_type])
texture_change_states.sort(key=lambda s: get_texture_change_priority()[s.__class__])
object_state = texture_change_states[-1] if len(texture_change_states) > 0 else None
# Only update our texture change if it's a different object state than the one we already have
if object_state != self._current_texture_state:
self._update_texture_change(object_state)
self._current_texture_state = object_state
def _update_texture_change(self, object_state):
"""
Update the texture based on the given object_state. E.g. if object_state is Frozen, update the diffuse color
to match the frozen state. If object_state is None, update the diffuse color to the default value. It modifies
the current albedo map by adding and scaling the values. See @self._update_albedo_value for details.
Args:
object_state (BooleanStateMixin or None): the object state that the diffuse color should match to
"""
for material in self.materials:
self._update_albedo_value(object_state, material)
@staticmethod
def _update_albedo_value(object_state, material):
"""
Update the albedo value based on the given object_state. The final albedo value is
albedo_value = diffuse_tint * (albedo_value + albedo_add)
Args:
object_state (BooleanStateMixin or None): the object state that the diffuse color should match to
material (MaterialPrim): the material to use to update the albedo value
"""
if object_state is None:
# This restore the albedo map to its original value
albedo_add = 0.0
diffuse_tint = (1.0, 1.0, 1.0)
else:
# Query the object state for the parameters
albedo_add, diffuse_tint = object_state.get_texture_change_params()
if material.is_glass:
if not np.allclose(material.glass_color, diffuse_tint):
material.glass_color = diffuse_tint
else:
if material.albedo_add != albedo_add:
material.albedo_add = albedo_add
if not np.allclose(material.diffuse_tint, diffuse_tint):
material.diffuse_tint = diffuse_tint
def remove(self):
# Run super
super().remove()
# Iterate over all states and run their remove call
for state_instance in self._states.values():
state_instance.remove()
def _dump_state(self):
# Grab state from super class
state = super()._dump_state()
# Also add non-kinematic states
non_kin_states = dict()
for state_type, state_instance in self._states.items():
if state_instance.stateful:
non_kin_states[get_state_name(state_type)] = state_instance.dump_state(serialized=False)
state["non_kin"] = non_kin_states
return state
def _load_state(self, state):
# Call super method first
super()._load_state(state=state)
# Load non-kinematic states
self.load_non_kin_state(state)
def load_non_kin_state(self, state):
# Load all states that are stateful
for state_type, state_instance in self._states.items():
state_name = get_state_name(state_type)
if state_instance.stateful:
if state_name in state["non_kin"]:
state_instance.load_state(state=state["non_kin"][state_name], serialized=False)
else:
log.warning(f"Missing object state [{state_name}] in the state dump for obj {self.name}")
# Clear cache after loading state
self.clear_states_cache()
def _serialize(self, state):
# Call super method first
state_flat = super()._serialize(state=state)
# Iterate over all states and serialize them individually
non_kin_state_flat = np.concatenate([
self._states[REGISTERED_OBJECT_STATES[state_name]].serialize(state_dict)
for state_name, state_dict in state["non_kin"].items()
]) if len(state["non_kin"]) > 0 else np.array([])
# Combine these two arrays
return np.concatenate([state_flat, non_kin_state_flat]).astype(float)
def _deserialize(self, state):
# Call super method first
state_dic, idx = super()._deserialize(state=state)
# Iterate over all states and deserialize their states if they're stateful
non_kin_state_dic = dict()
for state_type, state_instance in self._states.items():
state_name = get_state_name(state_type)
if state_instance.stateful:
non_kin_state_dic[state_name] = state_instance.deserialize(state[idx:idx+state_instance.state_size])
idx += state_instance.state_size
state_dic["non_kin"] = non_kin_state_dic
return state_dic, idx
def clear_states_cache(self):
"""
Clears the internal cache from all owned states
"""
# Check self._states just in case states have not been initialized yet.
if not self._states:
return
for _, obj_state in self._states.items():
obj_state.clear_cache()
def set_position_orientation(self, position=None, orientation=None):
super().set_position_orientation(position=position, orientation=orientation)
self.clear_states_cache()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("StatefulObject")
return classes
| 27,486 | Python | 46.970332 | 160 | 0.625446 |
StanfordVL/OmniGibson/omnigibson/objects/light_object.py | import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.objects.stateful_object import StatefulObject
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.constants import PrimType
from omnigibson.utils.ui_utils import create_module_logger
import numpy as np
# Create module logger
log = create_module_logger(module_name=__name__)
class LightObject(StatefulObject):
"""
LightObjects are objects that generate light in the simulation
"""
LIGHT_TYPES = {
"Cylinder",
"Disk",
"Distant",
"Dome",
"Geometry",
"Rect",
"Sphere",
}
def __init__(
self,
name,
light_type,
prim_path=None,
category="light",
uuid=None,
scale=None,
fixed_base=False,
load_config=None,
abilities=None,
include_default_states=True,
radius=1.0,
intensity=50000.0,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
light_type (str): Type of light to create. Valid options are LIGHT_TYPES
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
fixed_base (bool): whether to fix the base of this object or not
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
include_default_states (bool): whether to include the default object states from @get_default_states
radius (float): Radius for this light.
intensity (float): Intensity for this light.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Compose load config and add rgba values
load_config = dict() if load_config is None else load_config
load_config["scale"] = scale
load_config["intensity"] = intensity
load_config["radius"] = radius if light_type in {"Cylinder", "Disk", "Sphere"} else None
# Make sure primitive type is valid
assert_valid_key(key=light_type, valid_keys=self.LIGHT_TYPES, name="light_type")
self.light_type = light_type
# Other attributes to be filled in at runtime
self._light_link = None
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
category=category,
uuid=uuid,
scale=scale,
visible=True,
fixed_base=fixed_base,
visual_only=True,
self_collisions=False,
prim_type=PrimType.RIGID,
include_default_states=include_default_states,
load_config=load_config,
abilities=abilities,
**kwargs,
)
def _load(self):
# Define XForm and base link for this light
prim = og.sim.stage.DefinePrim(self._prim_path, "Xform")
base_link = og.sim.stage.DefinePrim(f"{self._prim_path}/base_link", "Xform")
# Define the actual light link
light_prim = getattr(lazy.pxr.UsdLux, f"{self.light_type}Light").Define(og.sim.stage, f"{self._prim_path}/base_link/light").GetPrim()
return prim
def _post_load(self):
# run super first
super()._post_load()
# Grab reference to light link
self._light_link = XFormPrim(prim_path=f"{self._prim_path}/base_link/light", name=f"{self.name}:light_link")
# Apply Shaping API and set default cone angle attribute
shaping_api = lazy.pxr.UsdLux.ShapingAPI.Apply(self._light_link.prim).GetShapingConeAngleAttr().Set(180.0)
# Optionally set the intensity
if self._load_config.get("intensity", None) is not None:
self.intensity = self._load_config["intensity"]
# Optionally set the radius
if self._load_config.get("radius", None) is not None:
self.radius = self._load_config["radius"]
def _initialize(self):
# Run super
super()._initialize()
# Initialize light link
self._light_link.initialize()
@property
def aabb(self):
# This is a virtual object (with no associated visual mesh), so omni returns an invalid AABB.
# Therefore we instead return a hardcoded small value
return np.ones(3) * -0.001, np.ones(3) * 0.001
@property
def light_link(self):
"""
Returns:
XFormPrim: Link corresponding to the light prim itself
"""
return self._light_link
@property
def radius(self):
"""
Gets this light's radius
Returns:
float: radius for this light
"""
return self._light_link.get_attribute("inputs:radius")
@radius.setter
def radius(self, radius):
"""
Sets this light's radius
Args:
radius (float): radius to set
"""
self._light_link.set_attribute("inputs:radius", radius)
@property
def intensity(self):
"""
Gets this light's intensity
Returns:
float: intensity for this light
"""
return self._light_link.get_attribute("inputs:intensity")
@intensity.setter
def intensity(self, intensity):
"""
Sets this light's intensity
Args:
intensity (float): intensity to set
"""
self._light_link.set_attribute(
"inputs:intensity",
intensity)
@property
def color(self):
"""
Gets this light's color
Returns:
float: color for this light
"""
return tuple(float(x) for x in self._light_link.get_attribute("inputs:color"))
@color.setter
def color(self, color):
"""
Sets this light's color
Args:
color ([float, float, float]): color to set, each value in range [0, 1]
"""
self._light_link.set_attribute(
"inputs:color",
lazy.pxr.Gf.Vec3f(color))
@property
def texture_file_path(self):
"""
Gets this light's texture file path. Only valid for dome lights.
Returns:
str: texture file path for this light
"""
return str(self._light_link.get_attribute("inputs:texture:file"))
@texture_file_path.setter
def texture_file_path(self, texture_file_path):
"""
Sets this light's texture file path. Only valid for dome lights.
Args:
texture_file_path (str): path of texture file that should be used for this light
"""
self._light_link.set_attribute(
"inputs:texture:file",
lazy.pxr.Sdf.AssetPath(texture_file_path))
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
# Add additional kwargs (bounding_box is already captured in load_config)
return self.__class__(
prim_path=prim_path,
light_type=self.light_type,
name=name,
intensity=self.intensity,
load_config=load_config,
)
| 8,319 | Python | 32.821138 | 141 | 0.597548 |
StanfordVL/OmniGibson/omnigibson/objects/object_base.py | from abc import ABCMeta
import numpy as np
from collections.abc import Iterable
import trimesh
from scipy.spatial.transform import Rotation
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, gm
from omnigibson.utils.usd_utils import create_joint, CollisionAPI
from omnigibson.prims.entity_prim import EntityPrim
from omnigibson.utils.python_utils import Registerable, classproperty, get_uuid
from omnigibson.utils.constants import PrimType, semantic_class_name_to_id
from omnigibson.utils.ui_utils import create_module_logger, suppress_omni_log
import omnigibson.utils.transform_utils as T
# Global dicts that will contain mappings
REGISTERED_OBJECTS = dict()
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Settings for highlighting objects
m.HIGHLIGHT_RGB = [1.0, 0.1, 0.92] # Default highlighting (R,G,B) color when highlighting objects
m.HIGHLIGHT_INTENSITY = 10000.0 # Highlight intensity to apply, range [0, 10000)
# Physics settings for objects -- see https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/docs/RigidBodyDynamics.html?highlight=velocity%20iteration#solver-iterations
m.DEFAULT_SOLVER_POSITION_ITERATIONS = 32
m.DEFAULT_SOLVER_VELOCITY_ITERATIONS = 1
class BaseObject(EntityPrim, Registerable, metaclass=ABCMeta):
"""This is the interface that all OmniGibson objects must implement."""
def __init__(
self,
name,
prim_path=None,
category="object",
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
kinematic_only=None,
self_collisions=False,
prim_type=PrimType.RIGID,
load_config=None,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
kinematic_only (None or bool): Whether this object should be kinematic only (and not get affected by any
collisions). If None, then this value will be set to True if @fixed_base is True and some other criteria
are satisfied (see object_base.py post_load function), else False.
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
Note that this base object does NOT pass kwargs down into the Prim-type super() classes, and we assume
that kwargs are only shared between all SUBclasses (children), not SUPERclasses (parents).
"""
# Generate default prim path if none is specified
prim_path = f"/World/{name}" if prim_path is None else prim_path
# Store values
self.uuid = get_uuid(name) if uuid is None else uuid
assert len(str(self.uuid)) <= 8, f"UUID for this object must be at max 8-digits, got: {self.uuid}"
self.category = category
self.fixed_base = fixed_base
# Values to be created at runtime
self._highlight_cached_values = None
self._highlighted = None
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["scale"] = np.array(scale) if isinstance(scale, Iterable) else scale
load_config["visible"] = visible
load_config["visual_only"] = visual_only
load_config["kinematic_only"] = kinematic_only
load_config["self_collisions"] = self_collisions
load_config["prim_type"] = prim_type
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
# TODO: Super hacky, think of a better way to preserve this info
# Update init info for this
self._init_info["args"]["name"] = self.name
self._init_info["args"]["uuid"] = self.uuid
def load(self):
# Run super method ONLY if we're not loaded yet
if self.loaded:
prim = self._prim
else:
prim = super().load()
log.info(f"Loaded {self.name} at {self.prim_path}")
return prim
def remove(self):
# Run super first
super().remove()
# Notify user that the object was removed
log.info(f"Removed {self.name} from {self.prim_path}")
def _post_load(self):
# Add fixed joint or make object kinematic only if we're fixing the base
kinematic_only = False
if self.fixed_base:
# For optimization purposes, if we only have a single rigid body that has either
# (no custom scaling OR no fixed joints), we assume this is not an articulated object so we
# merely set this to be a static collider, i.e.: kinematic-only
# The custom scaling / fixed joints requirement is needed because omniverse complains about scaling that
# occurs with respect to fixed joints, as omni will "snap" bodies together otherwise
scale = np.ones(3) if self._load_config["scale"] is None else np.array(self._load_config["scale"])
if self.n_joints == 0 and (np.all(np.isclose(scale, 1.0, atol=1e-3)) or self.n_fixed_joints == 0) and (self._load_config["kinematic_only"] != False) and not self.has_attachment_points:
kinematic_only = True
# Validate that we didn't make a kinematic-only decision that does not match
assert self._load_config["kinematic_only"] is None or kinematic_only == self._load_config["kinematic_only"], \
f"Kinematic only decision does not match! Got: {kinematic_only}, expected: {self._load_config['kinematic_only']}"
# Actually apply the kinematic-only decision
self._load_config["kinematic_only"] = kinematic_only
# Run super first
super()._post_load()
# If the object is fixed_base but kinematic only is false, create the joint
if self.fixed_base and not self.kinematic_only:
# Create fixed joint, and set Body0 to be this object's root prim
# This renders, which causes a material lookup error since we're creating a temp file, so we suppress
# the error explicitly here
with suppress_omni_log(channels=["omni.hydra"]):
create_joint(
prim_path=f"{self._prim_path}/rootJoint",
joint_type="FixedJoint",
body1=f"{self._prim_path}/{self._root_link_name}",
)
# Delete n_fixed_joints cached property if it exists since the number of fixed joints has now changed
# See https://stackoverflow.com/questions/59899732/python-cached-property-how-to-delete and
# https://docs.python.org/3/library/functools.html#functools.cached_property
if "n_fixed_joints" in self.__dict__:
del self.n_fixed_joints
# Set visibility
if "visible" in self._load_config and self._load_config["visible"] is not None:
self.visible = self._load_config["visible"]
# First, remove any articulation root API that already exists at the object-level or root link level prim
if self._prim.HasAPI(lazy.pxr.UsdPhysics.ArticulationRootAPI):
self._prim.RemoveAPI(lazy.pxr.UsdPhysics.ArticulationRootAPI)
self._prim.RemoveAPI(lazy.pxr.PhysxSchema.PhysxArticulationAPI)
if self.root_prim.HasAPI(lazy.pxr.UsdPhysics.ArticulationRootAPI):
self.root_prim.RemoveAPI(lazy.pxr.UsdPhysics.ArticulationRootAPI)
self.root_prim.RemoveAPI(lazy.pxr.PhysxSchema.PhysxArticulationAPI)
# Potentially add articulation root APIs and also set self collisions
root_prim = None if self.articulation_root_path is None else lazy.omni.isaac.core.utils.prims.get_prim_at_path(self.articulation_root_path)
if root_prim is not None:
lazy.pxr.UsdPhysics.ArticulationRootAPI.Apply(root_prim)
lazy.pxr.PhysxSchema.PhysxArticulationAPI.Apply(root_prim)
self.self_collisions = self._load_config["self_collisions"]
# Set position / velocity solver iterations if we're not cloth
if self._prim_type != PrimType.CLOTH:
self.solver_position_iteration_count = m.DEFAULT_SOLVER_POSITION_ITERATIONS
self.solver_velocity_iteration_count = m.DEFAULT_SOLVER_VELOCITY_ITERATIONS
# Add semantics
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=self._prim,
semantic_label=self.category,
type_label="class",
)
def _initialize(self):
# Run super first
super()._initialize()
# Iterate over all links and grab their relevant material info for highlighting (i.e.: emissivity info)
self._highlighted = False
self._highlight_cached_values = dict()
for material in self.materials:
self._highlight_cached_values[material] = {
"enable_emission": material.enable_emission,
"emissive_color": material.emissive_color,
"emissive_intensity": material.emissive_intensity,
}
@property
def articulation_root_path(self):
has_articulated_joints, has_fixed_joints = self.n_joints > 0, self.n_fixed_joints > 0
if self.kinematic_only or ((not has_articulated_joints) and (not has_fixed_joints)):
# Kinematic only, or non-jointed single body objects
return None
elif not self.fixed_base and has_articulated_joints:
# This is all remaining non-fixed objects
# This is a bit hacky because omniverse is buggy
# Articulation roots mess up the joint order if it's on a non-fixed base robot, e.g. a
# mobile manipulator. So if we have to move it to the actual root link of the robot instead.
# See https://forums.developer.nvidia.com/t/inconsistent-values-from-isaacsims-dc-get-joint-parent-child-body/201452/2
# for more info
return f"{self._prim_path}/{self.root_link_name}"
else:
# Fixed objects that are not kinematic only, or non-fixed objects that have no articulated joints but do
# have fixed joints
return self._prim_path
@property
def mass(self):
"""
Returns:
float: Cumulative mass of this potentially articulated object.
"""
mass = 0.0
for link in self._links.values():
mass += link.mass
return mass
@mass.setter
def mass(self, mass):
raise NotImplementedError("Cannot set mass directly for an object!")
@property
def volume(self):
"""
Returns:
float: Cumulative volume of this potentially articulated object.
"""
return sum(link.volume for link in self._links.values())
@volume.setter
def volume(self, volume):
raise NotImplementedError("Cannot set volume directly for an object!")
@property
def scale(self):
# Just super call
return super().scale
@scale.setter
def scale(self, scale):
# call super first
# A bit esoteric -- see https://gist.github.com/Susensio/979259559e2bebcd0273f1a95d7c1e79
super(BaseObject, type(self)).scale.fset(self, scale)
# Update init info for scale
self._init_info["args"]["scale"] = scale
@property
def link_prim_paths(self):
return [link.prim_path for link in self._links.values()]
@property
def highlighted(self):
"""
Returns:
bool: Whether the object is highlighted or not
"""
return self._highlighted
@highlighted.setter
def highlighted(self, enabled):
"""
Iterates over all owned links, and modifies their materials with emissive colors so that the object is
highlighted (magenta by default)
Args:
enabled (bool): whether the object should be highlighted or not
"""
# Return early if the set value matches the internal value
if enabled == self._highlighted:
return
for material in self.materials:
if enabled:
# Store values before swapping
self._highlight_cached_values[material] = {
"enable_emission": material.enable_emission,
"emissive_color": material.emissive_color,
"emissive_intensity": material.emissive_intensity,
}
material.enable_emission = True if enabled else self._highlight_cached_values[material]["enable_emission"]
material.emissive_color = m.HIGHLIGHT_RGB if enabled else self._highlight_cached_values[material]["emissive_color"]
material.emissive_intensity = m.HIGHLIGHT_INTENSITY if enabled else self._highlight_cached_values[material]["emissive_intensity"]
# Update internal value
self._highlighted = enabled
def get_base_aligned_bbox(self, link_name=None, visual=False, xy_aligned=False):
"""
Get a bounding box for this object that's axis-aligned in the object's base frame.
Args:
link_name (None or str): If specified, only get the bbox for the given link
visual (bool): Whether to aggregate the bounding boxes from the visual meshes. Otherwise, will use
collision meshes
xy_aligned (bool): Whether to align the bounding box to the global XY-plane
Returns:
4-tuple:
- 3-array: (x,y,z) bbox center position in world frame
- 3-array: (x,y,z,w) bbox quaternion orientation in world frame
- 3-array: (x,y,z) bbox extent in desired frame
- 3-array: (x,y,z) bbox center in desired frame
"""
# Get the base position transform.
pos, orn = self.get_position_orientation()
base_frame_to_world = T.pose2mat((pos, orn))
# Prepare the desired frame.
if xy_aligned:
# If the user requested an XY-plane aligned bbox, convert everything to that frame.
# The desired frame is same as the base_com frame with its X/Y rotations removed.
translate = trimesh.transformations.translation_from_matrix(base_frame_to_world)
# To find the rotation that this transform does around the Z axis, we rotate the [1, 0, 0] vector by it
# and then take the arctangent of its projection onto the XY plane.
rotated_X_axis = base_frame_to_world[:3, 0]
rotation_around_Z_axis = np.arctan2(rotated_X_axis[1], rotated_X_axis[0])
xy_aligned_base_com_to_world = trimesh.transformations.compose_matrix(
translate=translate, angles=[0, 0, rotation_around_Z_axis]
)
# Finally update our desired frame.
desired_frame_to_world = xy_aligned_base_com_to_world
else:
# Default desired frame is base CoM frame.
desired_frame_to_world = base_frame_to_world
# Compute the world-to-base frame transform.
world_to_desired_frame = np.linalg.inv(desired_frame_to_world)
# Grab all the world-frame points corresponding to the object's visual or collision hulls.
points_in_world = []
if self.prim_type == PrimType.CLOTH:
particle_contact_offset = self.root_link.cloth_system.particle_contact_offset
particle_positions = self.root_link.compute_particle_positions()
particles_in_world_frame = np.concatenate([
particle_positions - particle_contact_offset,
particle_positions + particle_contact_offset
], axis=0)
points_in_world.extend(particles_in_world_frame)
else:
links = {link_name: self._links[link_name]} if link_name is not None else self._links
for link_name, link in links.items():
if visual:
hull_points = link.visual_boundary_points_world
else:
hull_points = link.collision_boundary_points_world
if hull_points is not None:
points_in_world.extend(hull_points)
# Move the points to the desired frame
points = trimesh.transformations.transform_points(points_in_world, world_to_desired_frame)
# All points are now in the desired frame: either the base CoM or the xy-plane-aligned base CoM.
# Now fit a bounding box to all the points by taking the minimum/maximum in the desired frame.
aabb_min_in_desired_frame = np.amin(points, axis=0)
aabb_max_in_desired_frame = np.amax(points, axis=0)
bbox_center_in_desired_frame = (aabb_min_in_desired_frame + aabb_max_in_desired_frame) / 2
bbox_extent_in_desired_frame = aabb_max_in_desired_frame - aabb_min_in_desired_frame
# Transform the center to the world frame.
bbox_center_in_world = trimesh.transformations.transform_points(
[bbox_center_in_desired_frame], desired_frame_to_world
)[0]
bbox_orn_in_world = Rotation.from_matrix(desired_frame_to_world[:3, :3]).as_quat()
return bbox_center_in_world, bbox_orn_in_world, bbox_extent_in_desired_frame, bbox_center_in_desired_frame
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseObject")
return classes
@classproperty
def _cls_registry(cls):
# Global robot registry
global REGISTERED_OBJECTS
return REGISTERED_OBJECTS
| 19,397 | Python | 45.742169 | 196 | 0.637985 |
StanfordVL/OmniGibson/omnigibson/objects/__init__.py | from omnigibson.objects.object_base import REGISTERED_OBJECTS, BaseObject
from omnigibson.objects.controllable_object import ControllableObject
from omnigibson.objects.dataset_object import DatasetObject
from omnigibson.objects.light_object import LightObject
from omnigibson.objects.primitive_object import PrimitiveObject
from omnigibson.objects.stateful_object import StatefulObject
from omnigibson.objects.usd_object import USDObject
| 439 | Python | 47.888884 | 73 | 0.879271 |
StanfordVL/OmniGibson/omnigibson/objects/primitive_object.py | import numpy as np
from omnigibson.objects.stateful_object import StatefulObject
from omnigibson.utils.python_utils import assert_valid_key
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.utils.constants import PrimType, PRIMITIVE_MESH_TYPES
from omnigibson.utils.usd_utils import create_primitive_mesh
from omnigibson.utils.render_utils import create_pbr_material
from omnigibson.utils.physx_utils import bind_material
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Define valid objects that can be created
VALID_RADIUS_OBJECTS = {"Cone", "Cylinder", "Disk", "Sphere"}
VALID_HEIGHT_OBJECTS = {"Cone", "Cylinder"}
VALID_SIZE_OBJECTS = {"Cube", "Torus"}
class PrimitiveObject(StatefulObject):
"""
PrimitiveObjects are objects defined by a single geom, e.g: sphere, mesh, cube, etc.
"""
def __init__(
self,
name,
primitive_type,
prim_path=None,
category="object",
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
kinematic_only=None,
self_collisions=False,
prim_type=PrimType.RIGID,
load_config=None,
abilities=None,
include_default_states=True,
rgba=(0.5, 0.5, 0.5, 1.0),
radius=None,
height=None,
size=None,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
primitive_type (str): type of primitive object to create. Should be one of:
{"Cone", "Cube", "Cylinder", "Disk", "Plane", "Sphere", "Torus"}
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
kinematic_only (None or bool): Whether this object should be kinematic only (and not get affected by any
collisions). If None, then this value will be set to True if @fixed_base is True and some other criteria
are satisfied (see object_base.py post_load function), else False.
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.rgba (4-array): (R, G, B, A) values to set for this object
include_default_states (bool): whether to include the default object states from @get_default_states
radius (None or float): If specified, sets the radius for this object. This value is scaled by @scale
Note: Should only be specified if the @primitive_type is one of {"Cone", "Cylinder", "Disk", "Sphere"}
height (None or float): If specified, sets the height for this object. This value is scaled by @scale
Note: Should only be specified if the @primitive_type is one of {"Cone", "Cylinder"}
size (None or float): If specified, sets the size for this object. This value is scaled by @scale
Note: Should only be specified if the @primitive_type is one of {"Cube", "Torus"}
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Compose load config and add rgba values
load_config = dict() if load_config is None else load_config
load_config["color"] = np.array(rgba[:3])
load_config["opacity"] = rgba[3]
load_config["radius"] = radius
load_config["height"] = height
load_config["size"] = size
# Initialize other internal variables
self._vis_geom = None
self._col_geom = None
self._extents = np.ones(3) # (x,y,z extents)
# Make sure primitive type is valid
assert_valid_key(key=primitive_type, valid_keys=PRIMITIVE_MESH_TYPES, name="primitive mesh type")
self._primitive_type = primitive_type
super().__init__(
prim_path=prim_path,
name=name,
category=category,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
kinematic_only=kinematic_only,
self_collisions=self_collisions,
prim_type=prim_type,
include_default_states=include_default_states,
load_config=load_config,
abilities=abilities,
**kwargs,
)
def _load(self):
# Define an Xform at the specified path
prim = og.sim.stage.DefinePrim(self._prim_path, "Xform")
# Define a nested mesh corresponding to the root link for this prim
base_link = og.sim.stage.DefinePrim(f"{self._prim_path}/base_link", "Xform")
self._vis_geom = create_primitive_mesh(prim_path=f"{self._prim_path}/base_link/visuals", primitive_type=self._primitive_type)
self._col_geom = create_primitive_mesh(prim_path=f"{self._prim_path}/base_link/collisions", primitive_type=self._primitive_type)
# Add collision API to collision geom
lazy.pxr.UsdPhysics.CollisionAPI.Apply(self._col_geom.GetPrim())
lazy.pxr.UsdPhysics.MeshCollisionAPI.Apply(self._col_geom.GetPrim())
lazy.pxr.PhysxSchema.PhysxCollisionAPI.Apply(self._col_geom.GetPrim())
# Create a material for this object for the base link
og.sim.stage.DefinePrim(f"{self._prim_path}/Looks", "Scope")
mat_path = f"{self._prim_path}/Looks/default"
mat = create_pbr_material(prim_path=mat_path)
bind_material(prim_path=self._vis_geom.GetPrim().GetPrimPath().pathString, material_path=mat_path)
return prim
def _post_load(self):
# Possibly set scalings (only if the scale value is not set)
if self._load_config["scale"] is not None:
log.warning("Custom scale specified for primitive object, so ignoring radius, height, and size arguments!")
else:
if self._load_config["radius"] is not None:
self.radius = self._load_config["radius"]
if self._load_config["height"] is not None:
self.height = self._load_config["height"]
if self._load_config["size"] is not None:
self.size = self._load_config["size"]
# This step might will perform cloth remeshing if self._prim_type == PrimType.CLOTH.
# Therefore, we need to apply size, radius, and height before this to scale the points properly.
super()._post_load()
# Cloth primitive does not have collision meshes
if self._prim_type != PrimType.CLOTH:
# Set the collision approximation appropriately
if self._primitive_type == "Sphere":
col_approximation = "boundingSphere"
elif self._primitive_type == "Cube":
col_approximation = "boundingCube"
else:
col_approximation = "convexHull"
self.root_link.collision_meshes["collisions"].set_collision_approximation(col_approximation)
def _initialize(self):
# Run super first
super()._initialize()
# Set color and opacity
if self._prim_type == PrimType.RIGID:
visual_geom_prim = list(self.root_link.visual_meshes.values())[0]
elif self._prim_type == PrimType.CLOTH:
visual_geom_prim = self.root_link
else:
raise ValueError("Prim type must either be PrimType.RIGID or PrimType.CLOTH for loading a primitive object")
visual_geom_prim.color = self._load_config["color"]
visual_geom_prim.opacity = self._load_config["opacity"]
@property
def radius(self):
"""
Gets this object's radius, if it exists.
Note: Can only be called if the primitive type is one of {"Cone", "Cylinder", "Disk", "Sphere"}
Returns:
float: radius for this object
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_RADIUS_OBJECTS, name="primitive object with radius")
return self._extents[0] / 2.0
@radius.setter
def radius(self, radius):
"""
Sets this object's radius
Note: Can only be called if the primitive type is one of {"Cone", "Cylinder", "Disk", "Sphere"}
Args:
radius (float): radius to set
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_RADIUS_OBJECTS, name="primitive object with radius")
# Update the extents variable
original_extent = np.array(self._extents)
self._extents = np.ones(3) * radius * 2.0 if self._primitive_type == "Sphere" else \
np.array([radius * 2.0, radius * 2.0, self._extents[2]])
attr_pairs = []
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
vals = np.array(attr.Get()).astype(np.float64)
attr_pairs.append([attr, vals])
geom.GetExtentAttr().Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*(-self._extents / 2.0)), lazy.pxr.Gf.Vec3f(*(self._extents / 2.0))]))
# Calculate how much to scale extents by and then modify the points / normals accordingly
scaling_factor = 2.0 * radius / original_extent[0]
for attr, vals in attr_pairs:
# If this is a sphere, modify all 3 axes
if self._primitive_type == "Sphere":
vals = vals * scaling_factor
# Otherwise, just modify the first two dimensions
else:
vals[:, :2] = vals[:, :2] * scaling_factor
# Set the value
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
@property
def height(self):
"""
Gets this object's height, if it exists.
Note: Can only be called if the primitive type is one of {"Cone", "Cylinder"}
Returns:
float: height for this object
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_HEIGHT_OBJECTS, name="primitive object with height")
return self._extents[2]
@height.setter
def height(self, height):
"""
Sets this object's height
Note: Can only be called if the primitive type is one of {"Cone", "Cylinder"}
Args:
height (float): height to set
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_HEIGHT_OBJECTS, name="primitive object with height")
# Update the extents variable
original_extent = np.array(self._extents)
self._extents[2] = height
# Calculate the correct scaling factor and scale the points and normals appropriately
scaling_factor = height / original_extent[2]
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
vals = np.array(attr.Get()).astype(np.float64)
# Scale the z axis by the scaling factor
vals[:, 2] = vals[:, 2] * scaling_factor
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
geom.GetExtentAttr().Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*(-self._extents / 2.0)), lazy.pxr.Gf.Vec3f(*(self._extents / 2.0))]))
@property
def size(self):
"""
Gets this object's size, if it exists.
Note: Can only be called if the primitive type is one of {"Cube", "Torus"}
Returns:
float: size for this object
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_SIZE_OBJECTS, name="primitive object with size")
return self._extents[0]
@size.setter
def size(self, size):
"""
Sets this object's size
Note: Can only be called if the primitive type is one of {"Cube", "Torus"}
Args:
size (float): size to set
"""
assert_valid_key(key=self._primitive_type, valid_keys=VALID_SIZE_OBJECTS, name="primitive object with size")
# Update the extents variable
original_extent = np.array(self._extents)
self._extents = np.ones(3) * size
# Calculate the correct scaling factor and scale the points and normals appropriately
scaling_factor = size / original_extent[0]
for geom in self._vis_geom, self._col_geom:
if geom is not None:
for attr in (geom.GetPointsAttr(), geom.GetNormalsAttr()):
# Scale all three axes by the scaling factor
vals = np.array(attr.Get()).astype(np.float64) * scaling_factor
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*v) for v in vals]))
geom.GetExtentAttr().Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*(-self._extents / 2.0)), lazy.pxr.Gf.Vec3f(*(self._extents / 2.0))]))
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
# Add additional kwargs (bounding_box is already captured in load_config)
return self.__class__(
prim_path=prim_path,
primitive_type=self._primitive_type,
name=name,
category=self.category,
scale=self.scale,
visible=self.visible,
fixed_base=self.fixed_base,
prim_type=self._prim_type,
load_config=load_config,
abilities=self._abilities,
visual_only=self._visual_only,
)
def _dump_state(self):
state = super()._dump_state()
# state["extents"] = self._extents
state["radius"] = self.radius if self._primitive_type in VALID_RADIUS_OBJECTS else -1
state["height"] = self.height if self._primitive_type in VALID_HEIGHT_OBJECTS else -1
state["size"] = self.size if self._primitive_type in VALID_SIZE_OBJECTS else -1
return state
def _load_state(self, state):
super()._load_state(state=state)
# self._extents = np.array(state["extents"])
if self._primitive_type in VALID_RADIUS_OBJECTS:
self.radius = state["radius"]
if self._primitive_type in VALID_HEIGHT_OBJECTS:
self.height = state["height"]
if self._primitive_type in VALID_SIZE_OBJECTS:
self.size = state["size"]
def _deserialize(self, state):
state_dict, idx = super()._deserialize(state=state)
# state_dict["extents"] = state[idx: idx + 3]
state_dict["radius"] = state[idx]
state_dict["height"] = state[idx + 1]
state_dict["size"] = state[idx + 2]
return state_dict, idx + 3
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
return np.concatenate([
state_flat,
np.array([state["radius"], state["height"], state["size"]]),
]).astype(float)
| 16,539 | Python | 44.690608 | 153 | 0.612673 |
StanfordVL/OmniGibson/omnigibson/objects/dataset_object.py | import math
import os
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
from omnigibson.objects.usd_object import USDObject
from omnigibson.utils.constants import AVERAGE_CATEGORY_SPECS, DEFAULT_JOINT_FRICTION, SPECIAL_JOINT_FRICTIONS, JointType
import omnigibson.utils.transform_utils as T
from omnigibson.utils.asset_utils import get_all_object_category_models
from omnigibson.utils.constants import PrimType
from omnigibson.macros import gm, create_module_macros
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# A lower bound is needed in order to consistently trigger contacts
m.MIN_OBJ_MASS = 0.4
class DatasetObject(USDObject):
"""
DatasetObjects are instantiated from a USD file. It is an object that is assumed to come from an iG-supported
dataset. These objects should contain additional metadata, including aggregate statistics across the
object's category, e.g., avg dims, bounding boxes, masses, etc.
"""
def __init__(
self,
name,
prim_path=None,
category="object",
model=None,
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
kinematic_only=None,
self_collisions=False,
prim_type=PrimType.RIGID,
load_config=None,
abilities=None,
include_default_states=True,
bounding_box=None,
in_rooms=None,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
model (None or str): If specified, this is used in conjunction with
@category to infer the usd filepath to load for this object, which evaluates to the following:
{og_dataset_path}/objects/{category}/{model}/usd/{model}.usd
Otherwise, will randomly sample a model given @category
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
kinematic_only (None or bool): Whether this object should be kinematic only (and not get affected by any
collisions). If None, then this value will be set to True if @fixed_base is True and some other criteria
are satisfied (see object_base.py post_load function), else False.
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
include_default_states (bool): whether to include the default object states from @get_default_states
bounding_box (None or 3-array): If specified, will scale this object such that it fits in the desired
(x,y,z) object-aligned bounding box. Note that EITHER @bounding_box or @scale may be specified
-- not both!
in_rooms (None or str or list): If specified, sets the room(s) that this object should belong to. Either
a list of room type(s) or a single room type
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store variables
if isinstance(in_rooms, str):
assert "," not in in_rooms
self._in_rooms = [in_rooms] if isinstance(in_rooms, str) else in_rooms
# Make sure only one of bounding_box and scale are specified
if bounding_box is not None and scale is not None:
raise Exception("You cannot define both scale and bounding box size for an DatasetObject")
# Add info to load config
load_config = dict() if load_config is None else load_config
load_config["bounding_box"] = bounding_box
# Infer the correct usd path to use
if model is None:
available_models = get_all_object_category_models(category=category)
assert len(available_models) > 0, f"No available models found for category {category}!"
model = np.random.choice(available_models)
# If the model is in BAD_CLOTH_MODELS, raise an error for now -- this is a model that's unstable and needs to be fixed
# TODO: Remove this once the asset is fixed!
from omnigibson.utils.bddl_utils import BAD_CLOTH_MODELS
if prim_type == PrimType.CLOTH and model in BAD_CLOTH_MODELS.get(category, dict()):
raise ValueError(f"Cannot create cloth object category: {category}, model: {model} because it is "
f"currently broken ): This will be fixed in the next release!")
self._model = model
usd_path = self.get_usd_path(category=category, model=model)
# Run super init
super().__init__(
prim_path=prim_path,
usd_path=usd_path,
encrypted=True,
name=name,
category=category,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
kinematic_only=kinematic_only,
self_collisions=self_collisions,
prim_type=prim_type,
include_default_states=include_default_states,
load_config=load_config,
abilities=abilities,
**kwargs,
)
@classmethod
def get_usd_path(cls, category, model):
"""
Grabs the USD path for a DatasetObject corresponding to @category and @model.
NOTE: This is the unencrypted path, NOT the encrypted path
Args:
category (str): Category for the object
model (str): Specific model ID of the object
Returns:
str: Absolute filepath to the corresponding USD asset file
"""
return os.path.join(gm.DATASET_PATH, "objects", category, model, "usd", f"{model}.usd")
def sample_orientation(self):
"""
Samples an orientation in quaternion (x,y,z,w) form
Returns:
4-array: (x,y,z,w) sampled quaternion orientation for this object, based on self.orientations
"""
if self.orientations is None:
raise ValueError("No orientation probabilities set")
if len(self.orientations) == 0:
# Set default value
chosen_orientation = np.array([0, 0, 0, 1.0])
else:
probabilities = [o["prob"] for o in self.orientations.values()]
probabilities = np.array(probabilities) / np.sum(probabilities)
chosen_orientation = np.array(np.random.choice(list(self.orientations.values()), p=probabilities)["rotation"])
# Randomize yaw from -pi to pi
rot_num = np.random.uniform(-1, 1)
rot_matrix = np.array(
[
[math.cos(math.pi * rot_num), -math.sin(math.pi * rot_num), 0.0],
[math.sin(math.pi * rot_num), math.cos(math.pi * rot_num), 0.0],
[0.0, 0.0, 1.0],
]
)
rotated_quat = T.mat2quat(rot_matrix @ T.quat2mat(chosen_orientation))
return rotated_quat
def _initialize(self):
# Run super method first
super()._initialize()
# Apply any forced light intensity updates.
if gm.FORCE_LIGHT_INTENSITY is not None:
def recursive_light_update(child_prim):
if "Light" in child_prim.GetPrimTypeInfo().GetTypeName():
child_prim.GetAttribute("inputs:intensity").Set(gm.FORCE_LIGHT_INTENSITY)
for child_child_prim in child_prim.GetChildren():
recursive_light_update(child_child_prim)
recursive_light_update(self._prim)
# Apply any forced roughness updates
for material in self.materials:
if ("reflection_roughness_texture_influence" in material.shader_input_names and
"reflection_roughness_constant" in material.shader_input_names):
material.reflection_roughness_texture_influence = 0.0
material.reflection_roughness_constant = gm.FORCE_ROUGHNESS
# Set the joint frictions based on category
friction = SPECIAL_JOINT_FRICTIONS.get(self.category, DEFAULT_JOINT_FRICTION)
for joint in self._joints.values():
if joint.joint_type != JointType.JOINT_FIXED:
joint.friction = friction
def _post_load(self):
# If manual bounding box is specified, scale based on ratio between that and the native bbox
if self._load_config["bounding_box"] is not None:
scale = np.ones(3)
valid_idxes = self.native_bbox > 1e-4
scale[valid_idxes] = np.array(self._load_config["bounding_box"])[valid_idxes] / self.native_bbox[valid_idxes]
else:
scale = np.ones(3) if self._load_config["scale"] is None else np.array(self._load_config["scale"])
# Assert that the scale does not have too small dimensions
assert np.all(scale > 1e-4), f"Scale of {self.name} is too small: {scale}"
# Set this scale in the load config -- it will automatically scale the object during self.initialize()
self._load_config["scale"] = scale
# Run super last
super()._post_load()
# The loaded USD is from an already-deleted temporary file, so the asset paths for texture maps are wrong.
# We explicitly provide the root_path to update all the asset paths: the asset paths are relative to the
# original USD folder, i.e. <category>/<model>/usd.
root_path = os.path.dirname(self._usd_path)
for material in self.materials:
material.shader_update_asset_paths_with_root_path(root_path)
# Assign realistic density and mass based on average object category spec, or fall back to a default value
if self.avg_obj_dims is not None and self.avg_obj_dims["density"] is not None:
density = self.avg_obj_dims["density"]
else:
density = 1000.0
if self._prim_type == PrimType.RIGID:
for link in self._links.values():
# If not a meta (virtual) link, set the density based on avg_obj_dims and a zero mass (ignored)
if link.has_collision_meshes:
link.mass = 0.0
link.density = density
elif self._prim_type == PrimType.CLOTH:
self.root_link.mass = density * self.root_link.volume
def _update_texture_change(self, object_state):
"""
Update the texture based on the given object_state. E.g. if object_state is Frozen, update the diffuse color
to match the frozen state. If object_state is None, update the diffuse color to the default value. It attempts
to load the cached texture map named DIFFUSE/albedo_[STATE_NAME].png. If the cached texture map does not exist,
it modifies the current albedo map by adding and scaling the values. See @self._update_albedo_value for details.
Args:
object_state (BooleanStateMixin or None): the object state that the diffuse color should match to
"""
# TODO: uncomment these once our dataset has the object state-conditioned texture maps
# DEFAULT_ALBEDO_MAP_SUFFIX = frozenset({"DIFFUSE", "COMBINED", "albedo"})
# state_name = object_state.__class__.__name__ if object_state is not None else None
for material in self.materials:
# texture_path = material.diffuse_texture
# assert texture_path is not None, f"DatasetObject [{self.prim_path}] has invalid diffuse texture map."
#
# # Get updated texture file path for state.
# texture_path_split = texture_path.split("/")
# filedir, filename = "/".join(texture_path_split[:-1]), texture_path_split[-1]
# assert filename[-4:] == ".png", f"Texture file {filename} does not end with .png"
#
# filename_split = filename[:-4].split("_")
# # Check all three file names for backward compatibility.
# if len(filename_split) > 0 and filename_split[-1] not in DEFAULT_ALBEDO_MAP_SUFFIX:
# filename_split.pop()
# target_texture_path = f"{filedir}/{'_'.join(filename_split)}"
# target_texture_path += f"_{state_name}.png" if state_name is not None else ".png"
#
# if os.path.exists(target_texture_path):
# # Since we are loading a pre-cached texture map, we need to reset the albedo value to the default
# self._update_albedo_value(None, material)
# if material.diffuse_texture != target_texture_path:
# material.diffuse_texture = target_texture_path
# else:
# print(f"Warning: DatasetObject [{self.prim_path}] does not have texture map: "
# f"[{target_texture_path}]. Falling back to directly updating albedo value.")
self._update_albedo_value(object_state, material)
def set_bbox_center_position_orientation(self, position=None, orientation=None):
"""
Sets the center of the object's bounding box with respect to the world's frame.
Args:
position (None or 3-array): The desired global (x,y,z) position. None means it will not be changed
orientation (None or 4-array): The desired global (x,y,z,w) quaternion orientation.
None means it will not be changed
"""
if orientation is None:
orientation = self.get_orientation()
if position is not None:
rotated_offset = T.pose_transform([0, 0, 0], orientation,
self.scaled_bbox_center_in_base_frame, [0, 0, 0, 1])[0]
position = position + rotated_offset
self.set_position_orientation(position, orientation)
@property
def model(self):
"""
Returns:
str: Unique model ID for this object
"""
return self._model
@property
def in_rooms(self):
"""
Returns:
None or list of str: If specified, room(s) that this object should belong to
"""
return self._in_rooms
@in_rooms.setter
def in_rooms(self, rooms):
"""
Sets which room(s) this object should belong to. If no rooms, then should set to None
Args:
rooms (None or list of str): If specified, the room(s) this object should belong to
"""
# Store the value to the internal variable and also update the init kwargs accordingly
self._init_info["args"]["in_rooms"] = rooms
self._in_rooms = rooms
@property
def native_bbox(self):
"""
Get this object's native bounding box
Returns:
3-array: (x,y,z) bounding box
"""
assert "ig:nativeBB" in self.property_names, \
f"This dataset object '{self.name}' is expected to have native_bbox specified, but found none!"
return np.array(self.get_attribute(attr="ig:nativeBB"))
@property
def base_link_offset(self):
"""
Get this object's native base link offset
Returns:
3-array: (x,y,z) base link offset if it exists
"""
return np.array(self.get_attribute(attr="ig:offsetBaseLink"))
@property
def metadata(self):
"""
Gets this object's metadata, if it exists
Returns:
None or dict: Nested dictionary of object's metadata if it exists, else None
"""
return self.get_custom_data().get("metadata", None)
@property
def orientations(self):
"""
Returns:
None or dict: Possible orientation information for this object, if it exists. Otherwise, returns None
"""
metadata = self.metadata
return None if metadata is None else metadata.get("orientations", None)
@property
def scale(self):
# Just super call
return super().scale
@scale.setter
def scale(self, scale):
# call super first
# A bit esoteric -- see https://gist.github.com/Susensio/979259559e2bebcd0273f1a95d7c1e79
super(DatasetObject, type(self)).scale.fset(self, scale)
# Remove bounding_box from scale if it's in our args
if "bounding_box" in self._init_info["args"]:
self._init_info["args"].pop("bounding_box")
@property
def scaled_bbox_center_in_base_frame(self):
"""
where the base_link origin is wrt. the bounding box center. This allows us to place the model correctly
since the joint transformations given in the scene USD are wrt. the bounding box center.
We need to scale this offset as well.
Returns:
3-array: (x,y,z) location of bounding box, with respet to the base link's coordinate frame
"""
return -self.scale * self.base_link_offset
@property
def scales_in_link_frame(self):
"""
Returns:
dict: Keyword-mapped relative scales for each link of this object
"""
scales = {self.root_link.body_name: self.scale}
# We iterate through all links in this object, and check for any joint prims that exist
# We traverse manually this way instead of accessing the self._joints dictionary, because
# the dictionary only includes articulated joints and not fixed joints!
for link in self._links.values():
for prim in link.prim.GetChildren():
if "joint" in prim.GetTypeName().lower():
# Grab relevant joint information
parent_name = prim.GetProperty("physics:body0").GetTargets()[0].pathString.split("/")[-1]
child_name = prim.GetProperty("physics:body1").GetTargets()[0].pathString.split("/")[-1]
if parent_name in scales and child_name not in scales:
scale_in_parent_lf = scales[parent_name]
# The location of the joint frame is scaled using the scale in the parent frame
quat0 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(prim.GetAttribute("physics:localRot0").Get())[[1, 2, 3, 0]]
quat1 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(prim.GetAttribute("physics:localRot1").Get())[[1, 2, 3, 0]]
# Invert the child link relationship, and multiply the two rotations together to get the final rotation
local_ori = T.quat_multiply(quaternion1=T.quat_inverse(quat1), quaternion0=quat0)
jnt_frame_rot = T.quat2mat(local_ori)
scale_in_child_lf = np.absolute(jnt_frame_rot.T @ np.array(scale_in_parent_lf))
scales[child_name] = scale_in_child_lf
return scales
@property
def avg_obj_dims(self):
"""
Get the average object dimensions for this object, based on its category
Returns:
None or dict: Average object information based on its category
"""
return AVERAGE_CATEGORY_SPECS.get(self.category, None)
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
# Add additional kwargs (bounding_box is already captured in load_config)
return self.__class__(
prim_path=prim_path,
name=name,
category=self.category,
scale=self.scale,
visible=self.visible,
fixed_base=self.fixed_base,
visual_only=self._visual_only,
prim_type=self._prim_type,
load_config=load_config,
abilities=self._abilities,
in_rooms=self.in_rooms,
)
| 21,486 | Python | 45.109442 | 148 | 0.618356 |
StanfordVL/OmniGibson/omnigibson/objects/controllable_object.py | from abc import abstractmethod
from copy import deepcopy
import numpy as np
import gym
import omnigibson as og
from omnigibson.objects.object_base import BaseObject
from omnigibson.controllers import create_controller
from omnigibson.controllers.controller_base import ControlType
from omnigibson.utils.python_utils import assert_valid_key, merge_nested_dicts, CachedFunctions
from omnigibson.utils.constants import PrimType
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class ControllableObject(BaseObject):
"""
Simple class that extends object functionality for controlling joints -- this assumes that at least some joints
are motorized (i.e.: non-zero low-level simulator joint motor gains) and intended to be controlled,
e.g.: a conveyor belt or a robot agent
"""
def __init__(
self,
name,
prim_path=None,
category="object",
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
self_collisions=False,
prim_type=PrimType.RIGID,
load_config=None,
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be at the render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
Note that _default_joint_pos are hardcoded & precomputed, and thus should not be modified by the user.
Set this value instead if you want to initialize the object with a different rese joint position.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store inputs
self._control_freq = control_freq
self._controller_config = controller_config
self._reset_joint_pos = None if reset_joint_pos is None else np.array(reset_joint_pos)
# Make sure action type is valid, and also save
assert_valid_key(key=action_type, valid_keys={"discrete", "continuous"}, name="action type")
self._action_type = action_type
self._action_normalize = action_normalize
# Store internal placeholders that will be filled in later
self._dof_to_joints = None # dict that will map DOF indices to JointPrims
self._last_action = None
self._controllers = None
self.dof_names_ordered = None
self._control_enabled = True
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
category=category,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
self_collisions=self_collisions,
prim_type=prim_type,
load_config=load_config,
**kwargs,
)
def _initialize(self):
# Run super first
super()._initialize()
# Fill in the DOF to joint mapping
self._dof_to_joints = dict()
idx = 0
for joint in self._joints.values():
for _ in range(joint.n_dof):
self._dof_to_joints[idx] = joint
idx += 1
# Update the reset joint pos
if self._reset_joint_pos is None:
self._reset_joint_pos = self._default_joint_pos
# Load controllers
self._load_controllers()
# Setup action space
self._action_space = self._create_discrete_action_space() if self._action_type == "discrete" \
else self._create_continuous_action_space()
# Reset the object and keep all joints still after loading
self.reset()
self.keep_still()
# If we haven't already created a physics callback, do so now so control gets updated every sim step
callback_name = f"{self.name}_controller_callback"
if not og.sim.physics_callback_exists(callback_name=callback_name):
og.sim.add_physics_callback(
callback_name=callback_name,
callback_fn=lambda x: self.step(),
)
def load(self):
# Run super first
prim = super().load()
# Set the control frequency if one was not provided.
expected_control_freq = 1.0 / og.sim.get_rendering_dt()
if self._control_freq is None:
log.info(
"Control frequency is None - being set to default of render_frequency: %.4f", expected_control_freq
)
self._control_freq = expected_control_freq
else:
assert np.isclose(
expected_control_freq, self._control_freq
), "Stored control frequency does not match environment's render timestep."
return prim
def _load_controllers(self):
"""
Loads controller(s) to map inputted actions into executable (pos, vel, and / or effort) signals on this object.
Stores created controllers as dictionary mapping controller names to specific controller
instances used by this object.
"""
# Generate the controller config
self._controller_config = self._generate_controller_config(custom_config=self._controller_config)
# Store dof idx mapping to dof name
self.dof_names_ordered = list(self._joints.keys())
# Initialize controllers to create
self._controllers = dict()
# Loop over all controllers, in the order corresponding to @action dim
for name in self.controller_order:
assert_valid_key(key=name, valid_keys=self._controller_config, name="controller name")
cfg = self._controller_config[name]
# If we're using normalized action space, override the inputs for all controllers
if self._action_normalize:
cfg["command_input_limits"] = "default" # default is normalized (-1, 1)
# Create the controller
controller = create_controller(**cfg)
# Verify the controller's DOFs can all be driven
for idx in controller.dof_idx:
assert self._joints[self.dof_names_ordered[idx]].driven, "Controllers should only control driveable joints!"
self._controllers[name] = controller
self.update_controller_mode()
def update_controller_mode(self):
"""
Helper function to force the joints to use the internal specified control mode and gains
"""
# Update the control modes of each joint based on the outputted control from the controllers
for name in self._controllers:
for dof in self._controllers[name].dof_idx:
control_type = self._controllers[name].control_type
self._joints[self.dof_names_ordered[dof]].set_control_type(
control_type=control_type,
kp=self.default_kp if control_type == ControlType.POSITION else None,
kd=self.default_kd if control_type == ControlType.VELOCITY else None,
)
def _generate_controller_config(self, custom_config=None):
"""
Generates a fully-populated controller config, overriding any default values with the corresponding values
specified in @custom_config
Args:
custom_config (None or Dict[str, ...]): nested dictionary mapping controller name(s) to specific custom
controller configurations for this object. This will override any default values specified by this class
Returns:
dict: Fully-populated nested dictionary mapping controller name(s) to specific controller configurations for
this object
"""
controller_config = {} if custom_config is None else deepcopy(custom_config)
# Update the configs
for group in self.controller_order:
group_controller_name = (
controller_config[group]["name"]
if group in controller_config and "name" in controller_config[group]
else self._default_controllers[group]
)
controller_config[group] = merge_nested_dicts(
base_dict=self._default_controller_config[group][group_controller_name],
extra_dict=controller_config.get(group, {}),
)
return controller_config
def reload_controllers(self, controller_config=None):
"""
Reloads controllers based on the specified new @controller_config
Args:
controller_config (None or Dict[str, ...]): nested dictionary mapping controller name(s) to specific
controller configurations for this object. This will override any default values specified by this class.
"""
self._controller_config = {} if controller_config is None else controller_config
# (Re-)load controllers
self._load_controllers()
# (Re-)create the action space
self._action_space = self._create_discrete_action_space() if self._action_type == "discrete" \
else self._create_continuous_action_space()
def reset(self):
# Call super first
super().reset()
# Override the reset joint state based on reset values
self.set_joint_positions(positions=self._reset_joint_pos, drive=False)
@abstractmethod
def _create_discrete_action_space(self):
"""
Create a discrete action space for this object. Should be implemented by the subclass (if a subclass does not
support this type of action space, it should raise an error).
Returns:
gym.space: Object-specific discrete action space
"""
raise NotImplementedError
def _create_continuous_action_space(self):
"""
Create a continuous action space for this object. By default, this loops over all controllers and
appends their respective input command limits to set the action space.
Any custom behavior should be implemented by the subclass (e.g.: if a subclass does not
support this type of action space, it should raise an error).
Returns:
gym.space.Box: Object-specific continuous action space
"""
# Action space is ordered according to the order in _default_controller_config control
low, high = [], []
for controller in self._controllers.values():
limits = controller.command_input_limits
low.append(np.array([-np.inf] * controller.command_dim) if limits is None else limits[0])
high.append(np.array([np.inf] * controller.command_dim) if limits is None else limits[1])
return gym.spaces.Box(shape=(self.action_dim,), low=np.concatenate(low), high=np.concatenate(high), dtype=float)
def apply_action(self, action):
"""
Converts inputted actions into low-level control signals
NOTE: This does NOT deploy control on the object. Use self.step() instead.
Args:
action (n-array): n-DOF length array of actions to apply to this object's internal controllers
"""
# Store last action as the current action being applied
self._last_action = action
# If we're using discrete action space, we grab the specific action and use that to convert to control
if self._action_type == "discrete":
action = np.array(self.discrete_action_list[action])
# Check if the input action's length matches the action dimension
assert len(action) == self.action_dim, "Action must be dimension {}, got dim {} instead.".format(
self.action_dim, len(action)
)
# First, loop over all controllers, and update the desired command
idx = 0
for name, controller in self._controllers.items():
# Set command, then take a controller step
controller.update_goal(command=action[idx : idx + controller.command_dim], control_dict=self.get_control_dict())
# Update idx
idx += controller.command_dim
@property
def control_enabled(self):
return self._control_enabled
@control_enabled.setter
def control_enabled(self, value):
self._control_enabled = value
def step(self):
"""
Takes a controller step across all controllers and deploys the computed control signals onto the object.
"""
# Skip if we don't have control enabled
if not self.control_enabled:
return
# Skip this step if our articulation view is not valid
if self._articulation_view_direct is None or not self._articulation_view_direct.initialized:
return
# First, loop over all controllers, and calculate the computed control
control = dict()
idx = 0
# Compose control_dict
control_dict = self.get_control_dict()
for name, controller in self._controllers.items():
control[name] = {
"value": controller.step(control_dict=control_dict),
"type": controller.control_type,
}
# Update idx
idx += controller.command_dim
# Compose controls
u_vec = np.zeros(self.n_dof)
# By default, the control type is None and the control value is 0 (np.zeros) - i.e. no control applied
u_type_vec = np.array([ControlType.NONE] * self.n_dof)
for group, ctrl in control.items():
idx = self._controllers[group].dof_idx
u_vec[idx] = ctrl["value"]
u_type_vec[idx] = ctrl["type"]
u_vec, u_type_vec = self._postprocess_control(control=u_vec, control_type=u_type_vec)
# Deploy control signals
self.deploy_control(control=u_vec, control_type=u_type_vec, indices=None, normalized=False)
def _postprocess_control(self, control, control_type):
"""
Runs any postprocessing on @control with corresponding @control_type on this entity. Default is no-op.
Deploys control signals @control with corresponding @control_type on this entity.
Args:
control (k- or n-array): control signals to deploy. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @control must
be the same length as @indices!
control_type (k- or n-array): control types for each DOF. Each entry should be one of ControlType.
This should be n-DOF length if all joints are being set, or k-length (k < n) if specific
indices are being set. In this case, the length of @control must be the same length as @indices!
Returns:
2-tuple:
- n-array: raw control signals to send to the object's joints
- list: control types for each joint
"""
return control, control_type
def deploy_control(self, control, control_type, indices=None, normalized=False):
"""
Deploys control signals @control with corresponding @control_type on this entity.
Note: This is DIFFERENT than self.set_joint_positions/velocities/efforts, because in this case we are only
setting target values (i.e.: we subject this entity to physical dynamics in order to reach the desired
@control setpoints), compared to set_joint_XXXX which manually sets the actual state of the joints.
This function is intended to be used with motorized entities, e.g.: robot agents or machines (e.g.: a
conveyor belt) to simulation physical control of these entities.
In contrast, use set_joint_XXXX for simulation-specific logic, such as simulator resetting or "magic"
action implementations.
Args:
control (k- or n-array): control signals to deploy. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @control must
be the same length as @indices!
control_type (k- or n-array): control types for each DOF. Each entry should be one of ControlType.
This should be n-DOF length if all joints are being set, or k-length (k < n) if specific
indices are being set. In this case, the length of @control must be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF controls to deploy.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint controls should be interpreted as normalized
values. Expects a single bool for the entire @control. Default is False.
"""
# Run sanity check
if indices is None:
assert len(control) == len(control_type) == self.n_dof, (
"Control signals, control types, and number of DOF should all be the same!"
"Got {}, {}, and {} respectively.".format(len(control), len(control_type), self.n_dof)
)
# Set indices manually so that we're standardized
indices = np.arange(self.n_dof)
else:
assert len(control) == len(control_type) == len(indices), (
"Control signals, control types, and indices should all be the same!"
"Got {}, {}, and {} respectively.".format(len(control), len(control_type), len(indices))
)
# Standardize normalized input
n_indices = len(indices)
# Loop through controls and deploy
# We have to use delicate logic to account for the edge cases where a single joint may contain > 1 DOF
# (e.g.: spherical joint)
pos_vec, pos_idxs, using_pos = [], [], False
vel_vec, vel_idxs, using_vel = [], [], False
eff_vec, eff_idxs, using_eff = [], [], False
cur_indices_idx = 0
while cur_indices_idx != n_indices:
# Grab the current DOF index we're controlling and find the corresponding joint
joint = self._dof_to_joints[indices[cur_indices_idx]]
cur_ctrl_idx = indices[cur_indices_idx]
joint_dof = joint.n_dof
if joint_dof > 1:
# Run additional sanity checks since the joint has more than one DOF to make sure our controls,
# control types, and indices all match as expected
# Make sure the indices are mapped correctly
assert indices[cur_indices_idx + joint_dof] == cur_ctrl_idx + joint_dof, \
"Got mismatched control indices for a single joint!"
# Check to make sure all joints, control_types, and normalized as all the same over n-DOF for the joint
for group_name, group in zip(
("joints", "control_types", "normalized"),
(self._dof_to_joints, control_type, normalized),
):
assert len({group[indices[cur_indices_idx + i]] for i in range(joint_dof)}) == 1, \
f"Not all {group_name} were the same when trying to deploy control for a single joint!"
# Assuming this all passes, we grab the control subvector, type, and normalized value accordingly
ctrl = control[cur_ctrl_idx: cur_ctrl_idx + joint_dof]
else:
# Grab specific control. No need to do checks since this is a single value
ctrl = control[cur_ctrl_idx]
# Deploy control based on type
ctrl_type = control_type[cur_ctrl_idx] # In multi-DOF joint case all values were already checked to be the same
if ctrl_type == ControlType.EFFORT:
eff_vec.append(ctrl)
eff_idxs.append(cur_ctrl_idx)
using_eff = True
elif ctrl_type == ControlType.VELOCITY:
vel_vec.append(ctrl)
vel_idxs.append(cur_ctrl_idx)
using_vel = True
elif ctrl_type == ControlType.POSITION:
pos_vec.append(ctrl)
pos_idxs.append(cur_ctrl_idx)
using_pos = True
elif ctrl_type == ControlType.NONE:
# Set zero efforts
eff_vec.append(0)
eff_idxs.append(cur_ctrl_idx)
using_eff = True
else:
raise ValueError("Invalid control type specified: {}".format(ctrl_type))
# Finally, increment the current index based on how many DOFs were just controlled
cur_indices_idx += joint_dof
# set the targets for joints
if using_pos:
self.set_joint_positions(positions=np.array(pos_vec), indices=np.array(pos_idxs), drive=True, normalized=normalized)
if using_vel:
self.set_joint_velocities(velocities=np.array(vel_vec), indices=np.array(vel_idxs), drive=True, normalized=normalized)
if using_eff:
self.set_joint_efforts(efforts=np.array(eff_vec), indices=np.array(eff_idxs), normalized=normalized)
def get_control_dict(self):
"""
Grabs all relevant information that should be passed to each controller during each controller step. This
automatically caches information
Returns:
CachedFunctions: Keyword-mapped control values for this object, mapping names to n-arrays.
By default, returns the following (can be queried via [] or get()):
- joint_position: (n_dof,) joint positions
- joint_velocity: (n_dof,) joint velocities
- joint_effort: (n_dof,) joint efforts
- root_pos: (3,) (x,y,z) global cartesian position of the object's root link
- root_quat: (4,) (x,y,z,w) global cartesian orientation of ths object's root link
- mass_matrix: (n_dof, n_dof) mass matrix
- gravity_force: (n_dof,) per-joint generalized gravity forces
- cc_force: (n_dof,) per-joint centripetal and centrifugal forces
"""
fcns = CachedFunctions()
fcns["_root_pos_quat"] = self.get_position_orientation
fcns["root_pos"] = lambda: fcns["_root_pos_quat"][0]
fcns["root_quat"] = lambda: fcns["_root_pos_quat"][1]
fcns["root_lin_vel"] = self.get_linear_velocity
fcns["root_ang_vel"] = self.get_angular_velocity
fcns["root_rel_lin_vel"] = self.get_relative_linear_velocity
fcns["root_rel_ang_vel"] = self.get_relative_angular_velocity
fcns["joint_position"] = lambda: self.get_joint_positions(normalized=False)
fcns["joint_velocity"] = lambda: self.get_joint_velocities(normalized=False)
fcns["joint_effort"] = lambda: self.get_joint_efforts(normalized=False)
fcns["mass_matrix"] = lambda: self.get_mass_matrix(clone=False)
fcns["gravity_force"] = lambda: self.get_generalized_gravity_forces(clone=False)
fcns["cc_force"] = lambda: self.get_coriolis_and_centrifugal_forces(clone=False)
return fcns
def dump_action(self):
"""
Dump the last action applied to this object. For use in demo collection.
"""
return self._last_action
def set_joint_positions(self, positions, indices=None, normalized=False, drive=False):
# Call super first
super().set_joint_positions(positions=positions, indices=indices, normalized=normalized, drive=drive)
# If we're not driving the joints, reset the controllers so that the goals are updated wrt to the new state
if not drive:
for controller in self._controllers.values():
controller.reset()
def _dump_state(self):
# Grab super state
state = super()._dump_state()
# Add in controller states
controller_states = dict()
for controller_name, controller in self._controllers.items():
controller_states[controller_name] = controller.dump_state()
state["controllers"] = controller_states
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# Load controller states
controller_states = state["controllers"]
for controller_name, controller in self._controllers.items():
controller.load_state(state=controller_states[controller_name])
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
# Serialize the controller states sequentially
controller_states_flat = np.concatenate([
c.serialize(state=state["controllers"][c_name]) for c_name, c in self._controllers.items()
])
# Concatenate and return
return np.concatenate([state_flat, controller_states_flat]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize the controller states sequentially
controller_states = dict()
for c_name, c in self._controllers.items():
state_size = c.state_size
controller_states[c_name] = c.deserialize(state=state[idx: idx + state_size])
idx += state_size
state_dict["controllers"] = controller_states
return state_dict, idx
@property
def action_dim(self):
"""
Returns:
int: Dimension of action space for this object. By default,
is the sum over all controller action dimensions
"""
return sum([controller.command_dim for controller in self._controllers.values()])
@property
def action_space(self):
"""
Action space for this object.
Returns:
gym.space: Action space, either discrete (Discrete) or continuous (Box)
"""
return deepcopy(self._action_space)
@property
def discrete_action_list(self):
"""
Discrete choices for actions for this object. Only needs to be implemented if the object supports discrete
actions.
Returns:
dict: Mapping from single action identifier (e.g.: a string, or a number) to array of continuous
actions to deploy via this object's controllers.
"""
raise NotImplementedError()
@property
def controllers(self):
"""
Returns:
dict: Controllers owned by this object, mapping controller name to controller object
"""
return self._controllers
@property
@abstractmethod
def controller_order(self):
"""
Returns:
list: Ordering of the actions, corresponding to the controllers. e.g., ["base", "arm", "gripper"],
to denote that the action vector should be interpreted as first the base action, then arm command, then
gripper command
"""
raise NotImplementedError
@property
def controller_action_idx(self):
"""
Returns:
dict: Mapping from controller names (e.g.: head, base, arm, etc.) to corresponding
indices (list) in the action vector
"""
dic = {}
idx = 0
for controller in self.controller_order:
cmd_dim = self._controllers[controller].command_dim
dic[controller] = np.arange(idx, idx + cmd_dim)
idx += cmd_dim
return dic
@property
def controller_joint_idx(self):
"""
Returns:
dict: Mapping from controller names (e.g.: head, base, arm, etc.) to corresponding
indices (list) of the joint state vector controlled by each controller
"""
dic = {}
for controller in self.controller_order:
dic[controller] = self._controllers[controller].dof_idx
return dic
@property
def control_limits(self):
"""
Returns:
dict: Keyword-mapped limits for this object. Dict contains:
position: (min, max) joint limits, where min and max are N-DOF arrays
velocity: (min, max) joint velocity limits, where min and max are N-DOF arrays
effort: (min, max) joint effort limits, where min and max are N-DOF arrays
has_limit: (n_dof,) array where each element is True if that corresponding joint has a position limit
(otherwise, joint is assumed to be limitless)
"""
return {
"position": (self.joint_lower_limits, self.joint_upper_limits),
"velocity": (-self.max_joint_velocities, self.max_joint_velocities),
"effort": (-self.max_joint_efforts, self.max_joint_efforts),
"has_limit": self.joint_has_limits,
}
@property
def default_kp(self):
"""
Returns:
float: Default kp gain to apply to any DOF when switching control modes (e.g.: switching from a
velocity control mode to a position control mode)
"""
return 1e7
@property
def default_kd(self):
"""
Returns:
float: Default kd gain to apply to any DOF when switching control modes (e.g.: switching from a
position control mode to a velocity control mode)
"""
return 1e5
@property
def reset_joint_pos(self):
"""
Returns:
n-array: reset joint positions for this robot
"""
return self._reset_joint_pos
@reset_joint_pos.setter
def reset_joint_pos(self, value):
"""
Args:
value: the new reset joint positions for this robot
"""
self._reset_joint_pos = value
@property
@abstractmethod
def _default_joint_pos(self):
"""
Returns:
n-array: Default joint positions for this robot
"""
raise NotImplementedError
@property
@abstractmethod
def _default_controller_config(self):
"""
Returns:
dict: default nested dictionary mapping controller name(s) to specific controller
configurations for this object. Note that the order specifies the sequence of actions to be received
from the environment.
Expected structure is as follows:
group1:
controller_name1:
controller_name1_params
...
controller_name2:
...
group2:
...
The @group keys specify the control type for various aspects of the object,
e.g.: "head", "arm", "base", etc. @controller_name keys specify the supported controllers for
that group. A default specification MUST be specified for each controller_name.
e.g.: IKController, DifferentialDriveController, JointController, etc.
"""
return {}
@property
@abstractmethod
def _default_controllers(self):
"""
Returns:
dict: Maps object group (e.g. base, arm, etc.) to default controller class name to use
(e.g. IKController, JointController, etc.)
"""
return {}
| 34,103 | Python | 43.522193 | 130 | 0.614052 |
StanfordVL/OmniGibson/omnigibson/utils/deprecated_utils.py | """
A set of utility functions slated to be deprecated once Omniverse bugs are fixed
"""
import carb
from typing import List, Optional, Tuple, Union, Callable
import omni.usd as ou
from omni.particle.system.core.scripts.core import Core as OmniCore
from omni.particle.system.core.scripts.utils import Utils as OmniUtils
from pxr import Sdf, UsdShade, PhysxSchema, Usd, UsdGeom, UsdPhysics
import omni
import omni.graph.core as ogc
from omni.kit.primitive.mesh.command import _get_all_evaluators
from omni.kit.primitive.mesh.command import CreateMeshPrimWithDefaultXformCommand as CMPWDXC
import omni.timeline
from omni.isaac.core.utils.prims import get_prim_at_path
import numpy as np
import torch
import warp as wp
import math
from omni.isaac.core.articulations import ArticulationView as _ArticulationView
from omni.isaac.core.prims import RigidPrimView as _RigidPrimView
from PIL import Image, ImageDraw
from omni.replicator.core import random_colours
DEG2RAD = math.pi / 180.0
class CreateMeshPrimWithDefaultXformCommand(CMPWDXC):
def __init__(self, prim_type: str, **kwargs):
"""
Creates primitive.
Args:
prim_type (str): It supports Plane/Sphere/Cone/Cylinder/Disk/Torus/Cube.
kwargs:
object_origin (Gf.Vec3f): Position of mesh center in stage units.
u_patches (int): The number of patches to tessellate U direction.
v_patches (int): The number of patches to tessellate V direction.
w_patches (int): The number of patches to tessellate W direction.
It only works for Cone/Cylinder/Cube.
half_scale (float): Half size of mesh in centimeters. Default is None, which means it's controlled by settings.
u_verts_scale (int): Tessellation Level of U. It's a multiplier of u_patches.
v_verts_scale (int): Tessellation Level of V. It's a multiplier of v_patches.
w_verts_scale (int): Tessellation Level of W. It's a multiplier of w_patches.
It only works for Cone/Cylinder/Cube.
For Cone/Cylinder, it's to tessellate the caps.
For Cube, it's to tessellate along z-axis.
above_ground (bool): It will offset the center of mesh above the ground plane if it's True,
False otherwise. It's False by default. This param only works when param object_origin is not given.
Otherwise, it will be ignored.
stage (Usd.Stage): If specified, stage to create prim on
"""
self._prim_type = prim_type[0:1].upper() + prim_type[1:].lower()
self._usd_context = omni.usd.get_context()
self._selection = self._usd_context.get_selection()
self._stage = kwargs.get("stage", self._usd_context.get_stage())
self._settings = carb.settings.get_settings()
self._default_path = kwargs.get("prim_path", None)
self._select_new_prim = kwargs.get("select_new_prim", True)
self._prepend_default_prim = kwargs.get("prepend_default_prim", True)
self._above_round = kwargs.get("above_ground", False)
self._attributes = {**kwargs}
# Supported mesh types should have an associated evaluator class
self._evaluator_class = _get_all_evaluators()[prim_type]
assert isinstance(self._evaluator_class, type)
class Utils(OmniUtils):
def create_material(self, name):
material_url = carb.settings.get_settings().get("/exts/omni.particle.system.core/material")
# TODO: THIS IS THE ONLY LINE WE CHANGE! "/" SHOULD BE ""
material_path = ""
default_prim = self.stage.GetDefaultPrim()
if default_prim:
material_path = default_prim.GetPath().pathString
if not self.stage.GetPrimAtPath(material_path + "/Looks"):
self.stage.DefinePrim(material_path + "/Looks", "Scope")
material_path += "/Looks/" + name
material_path = ou.get_stage_next_free_path(
self.stage, material_path, False
)
prim = self.stage.DefinePrim(material_path, "Material")
if material_url:
prim.GetReferences().AddReference(material_url)
else:
carb.log_error("Failed to find material URL in settings")
return [material_path]
class Core(OmniCore):
"""
Subclass that overrides a specific function within Omni's Core class to fix a bug
"""
def __init__(self, popup_callback: Callable[[str], None], particle_system_name: str):
self._popup_callback = popup_callback
self.utils = Utils()
self.context = ou.get_context()
self.stage = self.context.get_stage()
self.selection = self.context.get_selection()
self.particle_system_name = particle_system_name
self.sub_stage_update = self.context.get_stage_event_stream().create_subscription_to_pop(self.on_stage_update)
self.on_stage_update()
def get_compute_graph(self, selected_paths, create_new_graph=True, created_paths=None):
"""
Returns the first ComputeGraph found in selected_paths.
If no graph is found and create_new_graph is true, a new graph will be created and its
path appended to created_paths (if provided).
"""
graph = None
graph_paths = [path for path in selected_paths
if self.stage.GetPrimAtPath(path).GetTypeName() in ["ComputeGraph", "OmniGraph"] ]
if len(graph_paths) > 0:
graph = ogc.get_graph_by_path(graph_paths[0])
if len(graph_paths) > 1:
carb.log_warn(f"Multiple ComputeGraph prims selected. Only the first will be used: {graph.get_path_to_graph()}")
elif create_new_graph:
# If no graph was found in the selected prims, we'll make a new graph.
# TODO: THIS IS THE ONLY LINE THAT WE CHANGE! ONCE FIXED, REMOVE THIS
graph_path = Sdf.Path(f"/OmniGraph/{self.particle_system_name}").MakeAbsolutePath(Sdf.Path.absoluteRootPath)
graph_path = ou.get_stage_next_free_path(self.stage, graph_path, True)
# prim = self.stage.GetDefaultPrim()
# path = str(prim.GetPath()) if prim else ""
self.stage.DefinePrim("/OmniGraph", "Scope")
container_graphs = ogc.get_global_container_graphs()
# FIXME: container_graphs[0] should be the simulation orchestration graph, but this may change in the future.
container_graph = container_graphs[0]
result, wrapper_node = ogc.cmds.CreateGraphAsNode(
graph=container_graph,
node_name=Sdf.Path(graph_path).name,
graph_path=graph_path,
evaluator_name="push",
is_global_graph=True,
backed_by_usd=True,
fc_backing_type=ogc.GraphBackingType.GRAPH_BACKING_TYPE_FLATCACHE_SHARED,
pipeline_stage=ogc.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION
)
graph = wrapper_node.get_wrapped_graph()
if created_paths is not None:
created_paths.append(graph.get_path_to_graph())
carb.log_info(f"No ComputeGraph selected. A new graph has been created at {graph.get_path_to_graph()}")
return graph
class ArticulationView(_ArticulationView):
"""ArticulationView with some additional functionality implemented."""
def set_joint_limits(
self,
values: Union[np.ndarray, torch.Tensor],
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Sets joint limits for articulation joints in the view.
Args:
values (Union[np.ndarray, torch.Tensor, wp.array]): joint limits for articulations in the view. shape (M, K, 2).
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, "cpu")
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, "cpu")
new_values = self._physics_view.get_dof_limits()
values = self._backend_utils.move_data(values, device="cpu")
new_values = self._backend_utils.assign(
values,
new_values,
[self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices],
)
self._physics_view.set_dof_limits(new_values, indices)
else:
indices = self._backend_utils.to_list(
self._backend_utils.resolve_indices(indices, self.count, self._device)
)
dof_types = self._backend_utils.to_list(self.get_dof_types())
joint_indices = self._backend_utils.to_list(
self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
)
values = self._backend_utils.to_list(values)
articulation_read_idx = 0
for i in indices:
dof_read_idx = 0
for dof_index in joint_indices:
dof_val = values[articulation_read_idx][dof_read_idx]
if dof_types[dof_index] == omni.physics.tensors.DofType.Rotation:
dof_val /= DEG2RAD
prim = get_prim_at_path(self._dof_paths[i][dof_index])
prim.GetAttribute("physics:lowerLimit").Set(dof_val[0])
prim.GetAttribute("physics:upperLimit").Set(dof_val[1])
dof_read_idx += 1
articulation_read_idx += 1
return
def get_joint_limits(
self,
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
clone: bool = True,
) -> Union[np.ndarray, torch.Tensor, wp.array]:
"""Gets joint limits for articulation in the view.
Args:
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to query. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to query. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
clone (Optional[bool]): True to return a clone of the internal buffer. Otherwise False. Defaults to True.
Returns:
Union[np.ndarray, torch.Tensor, wp.indexedarray]: joint limits for articulations in the view. shape (M, K).
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return None
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
values = self._backend_utils.move_data(self._physics_view.get_dof_limits(), self._device)
if clone:
values = self._backend_utils.clone_tensor(values, device=self._device)
result = values[
self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices
]
return result
else:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
dof_types = self._backend_utils.to_list(self.get_dof_types())
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
values = np.zeros(shape=(indices.shape[0], joint_indices.shape[0], 2), dtype="float32")
articulation_write_idx = 0
indices = self._backend_utils.to_list(indices)
joint_indices = self._backend_utils.to_list(joint_indices)
for i in indices:
dof_write_idx = 0
for dof_index in joint_indices:
prim = get_prim_at_path(self._dof_paths[i][dof_index])
values[articulation_write_idx][dof_write_idx][0] = prim.GetAttribute("physics:lowerLimit").Get()
values[articulation_write_idx][dof_write_idx][1] = prim.GetAttribute("physics:upperLimit").Get()
if dof_types[dof_index] == omni.physics.tensors.DofType.Rotation:
values[articulation_write_idx][dof_write_idx] = values[articulation_write_idx][dof_write_idx] * DEG2RAD
dof_write_idx += 1
articulation_write_idx += 1
values = self._backend_utils.convert(values, dtype="float32", device=self._device, indexed=True)
return values
def set_max_velocities(
self,
values: Union[np.ndarray, torch.Tensor, wp.array],
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Sets maximum velocities for articulation in the view.
Args:
values (Union[np.ndarray, torch.Tensor, wp.array]): maximum velocities for articulations in the view. shape (M, K).
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, "cpu")
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, "cpu")
new_values = self._physics_view.get_dof_max_velocities()
new_values = self._backend_utils.assign(
self._backend_utils.move_data(values, device="cpu"),
new_values,
[self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices],
)
self._physics_view.set_dof_max_velocities(new_values, indices)
else:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
articulation_read_idx = 0
indices = self._backend_utils.to_list(indices)
joint_indices = self._backend_utils.to_list(joint_indices)
values = self._backend_utils.to_list(values)
for i in indices:
dof_read_idx = 0
for dof_index in joint_indices:
prim = PhysxSchema.PhysxJointAPI(get_prim_at_path(self._dof_paths[i][dof_index]))
if not prim.GetMaxJointVelocityAttr():
prim.CreateMaxJointVelocityAttr().Set(values[articulation_read_idx][dof_read_idx])
else:
prim.GetMaxJointVelocityAttr().Set(values[articulation_read_idx][dof_read_idx])
dof_read_idx += 1
articulation_read_idx += 1
return
def get_max_velocities(
self,
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
clone: bool = True,
) -> Union[np.ndarray, torch.Tensor, wp.indexedarray]:
"""Gets maximum velocities for articulation in the view.
Args:
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indicies to specify which prims
to query. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indicies to specify which joints
to query. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
clone (Optional[bool]): True to return a clone of the internal buffer. Otherwise False. Defaults to True.
Returns:
Union[np.ndarray, torch.Tensor, wp.indexedarray]: maximum velocities for articulations in the view. shape (M, K).
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return None
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, "cpu")
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, "cpu")
max_velocities = self._physics_view.get_dof_max_velocities()
if clone:
max_velocities = self._backend_utils.clone_tensor(max_velocities, device="cpu")
result = self._backend_utils.move_data(
max_velocities[
self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices
],
device=self._device,
)
return result
else:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
max_velocities = np.zeros(shape=(indices.shape[0], joint_indices.shape[0]), dtype="float32")
indices = self._backend_utils.to_list(indices)
joint_indices = self._backend_utils.to_list(joint_indices)
articulation_write_idx = 0
for i in indices:
dof_write_idx = 0
for dof_index in joint_indices:
prim = PhysxSchema.PhysxJointAPI(get_prim_at_path(self._dof_paths[i][dof_index]))
max_velocities[articulation_write_idx][dof_write_idx] = prim.GetMaxJointVelocityAttr().Get()
dof_write_idx += 1
articulation_write_idx += 1
max_velocities = self._backend_utils.convert(max_velocities, dtype="float32", device=self._device, indexed=True)
return max_velocities
def set_joint_positions(
self,
positions: Optional[Union[np.ndarray, torch.Tensor, wp.array]],
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Set the joint positions of articulations in the view
.. warning::
This method will immediately set (teleport) the affected joints to the indicated value.
Use the ``set_joint_position_targets`` or the ``apply_action`` methods to control the articulation joints.
Args:
positions (Optional[Union[np.ndarray, torch.Tensor, wp.array]]): joint positions of articulations in the view to be set to in the next frame.
shape is (M, K).
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indices to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
.. hint::
This method belongs to the methods used to set the articulation kinematic states:
``set_velocities`` (``set_linear_velocities``, ``set_angular_velocities``),
``set_joint_positions``, ``set_joint_velocities``, ``set_joint_efforts``
Example:
.. code-block:: python
>>> # set all the articulation joints.
>>> # Since there are 5 envs, the joint positions are repeated 5 times
>>> positions = np.tile(np.array([0.0, -1.0, 0.0, -2.2, 0.0, 2.4, 0.8, 0.04, 0.04]), (num_envs, 1))
>>> prims.set_joint_positions(positions)
>>>
>>> # set only the fingers in closed position: panda_finger_joint1 (7) and panda_finger_joint2 (8) to 0.0
>>> # for the first, middle and last of the 5 envs
>>> positions = np.tile(np.array([0.0, 0.0]), (3, 1))
>>> prims.set_joint_positions(positions, indices=np.array([0, 2, 4]), joint_indices=np.array([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
new_dof_pos = self._physics_view.get_dof_positions()
new_dof_pos = self._backend_utils.assign(
self._backend_utils.move_data(positions, device=self._device),
new_dof_pos,
[self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices],
)
self._physics_view.set_dof_positions(new_dof_pos, indices)
# THIS IS THE FIX: COMMENT OUT THE BELOW LINE AND SET TARGETS INSTEAD
# self._physics_view.set_dof_position_targets(new_dof_pos, indices)
self.set_joint_position_targets(positions, indices, joint_indices)
else:
carb.log_warn("Physics Simulation View is not created yet in order to use set_joint_positions")
def set_joint_velocities(
self,
velocities: Optional[Union[np.ndarray, torch.Tensor, wp.array]],
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Set the joint velocities of articulations in the view
.. warning::
This method will immediately set the affected joints to the indicated value.
Use the ``set_joint_velocity_targets`` or the ``apply_action`` methods to control the articulation joints.
Args:
velocities (Optional[Union[np.ndarray, torch.Tensor, wp.array]]): joint velocities of articulations in the view to be set to in the next frame.
shape is (M, K).
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indices to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
.. hint::
This method belongs to the methods used to set the articulation kinematic states:
``set_velocities`` (``set_linear_velocities``, ``set_angular_velocities``),
``set_joint_positions``, ``set_joint_velocities``, ``set_joint_efforts``
Example:
.. code-block:: python
>>> # set the velocities for all the articulation joints to the indicated values.
>>> # Since there are 5 envs, the joint velocities are repeated 5 times
>>> velocities = np.tile(np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]), (num_envs, 1))
>>> prims.set_joint_velocities(velocities)
>>>
>>> # set the fingers velocities: panda_finger_joint1 (7) and panda_finger_joint2 (8) to -0.1
>>> # for the first, middle and last of the 5 envs
>>> velocities = np.tile(np.array([-0.1, -0.1]), (3, 1))
>>> prims.set_joint_velocities(velocities, indices=np.array([0, 2, 4]), joint_indices=np.array([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
new_dof_vel = self._physics_view.get_dof_velocities()
new_dof_vel = self._backend_utils.assign(
self._backend_utils.move_data(velocities, device=self._device),
new_dof_vel,
[self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices],
)
self._physics_view.set_dof_velocities(new_dof_vel, indices)
# THIS IS THE FIX: COMMENT OUT THE BELOW LINE AND SET TARGETS INSTEAD
# self._physics_view.set_dof_velocity_targets(new_dof_vel, indices)
self.set_joint_velocity_targets(velocities, indices, joint_indices)
else:
carb.log_warn("Physics Simulation View is not created yet in order to use set_joint_velocities")
return
def set_joint_efforts(
self,
efforts: Optional[Union[np.ndarray, torch.Tensor, wp.array]],
indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
joint_indices: Optional[Union[np.ndarray, List, torch.Tensor, wp.array]] = None,
) -> None:
"""Set the joint efforts of articulations in the view
.. note::
This method can be used for effort control. For this purpose, there must be no joint drive
or the stiffness and damping must be set to zero.
Args:
efforts (Optional[Union[np.ndarray, torch.Tensor, wp.array]]): efforts of articulations in the view to be set to in the next frame.
shape is (M, K).
indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): indices to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
joint_indices (Optional[Union[np.ndarray, List, torch.Tensor, wp.array]], optional): joint indices to specify which joints
to manipulate. Shape (K,).
Where K <= num of dofs.
Defaults to None (i.e: all dofs).
.. hint::
This method belongs to the methods used to set the articulation kinematic states:
``set_velocities`` (``set_linear_velocities``, ``set_angular_velocities``),
``set_joint_positions``, ``set_joint_velocities``, ``set_joint_efforts``
Example:
.. code-block:: python
>>> # set the efforts for all the articulation joints to the indicated values.
>>> # Since there are 5 envs, the joint efforts are repeated 5 times
>>> efforts = np.tile(np.array([10, 20, 30, 40, 50, 60, 70, 80, 90]), (num_envs, 1))
>>> prims.set_joint_efforts(efforts)
>>>
>>> # set the fingers efforts: panda_finger_joint1 (7) and panda_finger_joint2 (8) to 10
>>> # for the first, middle and last of the 5 envs
>>> efforts = np.tile(np.array([10, 10]), (3, 1))
>>> prims.set_joint_efforts(efforts, indices=np.array([0, 2, 4]), joint_indices=np.array([7, 8]))
"""
if not self._is_initialized:
carb.log_warn("ArticulationView needs to be initialized.")
return
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
joint_indices = self._backend_utils.resolve_indices(joint_indices, self.num_dof, self._device)
# THIS IS THE FIX: COMMENT OUT THE BELOW LINE AND USE ACTUATION FORCES INSTEAD
# new_dof_efforts = self._backend_utils.create_zeros_tensor(
# shape=[self.count, self.num_dof], dtype="float32", device=self._device
# )
new_dof_efforts = self._physics_view.get_dof_actuation_forces()
new_dof_efforts = self._backend_utils.assign(
self._backend_utils.move_data(efforts, device=self._device),
new_dof_efforts,
[self._backend_utils.expand_dims(indices, 1) if self._backend != "warp" else indices, joint_indices],
)
self._physics_view.set_dof_actuation_forces(new_dof_efforts, indices)
else:
carb.log_warn("Physics Simulation View is not created yet in order to use set_joint_efforts")
return
def _invalidate_physics_handle_callback(self, event):
# Overwrite super method, add additional de-initialization
if event.type == int(omni.timeline.TimelineEventType.STOP):
self._physics_view = None
self._invalidate_physics_handle_event = None
self._is_initialized = False
class RigidPrimView(_RigidPrimView):
def enable_gravities(self, indices: Optional[Union[np.ndarray, list, torch.Tensor, wp.array]] = None) -> None:
"""Enable gravity on rigid bodies (enabled by default).
Args:
indices (Optional[Union[np.ndarray, list, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
"""
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
indices = self._backend_utils.resolve_indices(indices, self.count, "cpu")
data = self._physics_view.get_disable_gravities().reshape(self._count)
data = self._backend_utils.assign(
self._backend_utils.create_tensor_from_list([False] * len(indices), dtype="uint8"), data, indices
)
self._physics_view.set_disable_gravities(data, indices)
else:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
indices = self._backend_utils.to_list(indices)
for i in indices:
if self._physx_rigid_body_apis[i] is None:
if self._prims[i].HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rigid_api = PhysxSchema.PhysxRigidBodyAPI(self._prims[i])
else:
rigid_api = PhysxSchema.PhysxRigidBodyAPI.Apply(self._prims[i])
self._physx_rigid_body_apis[i] = rigid_api
self._physx_rigid_body_apis[i].GetDisableGravityAttr().Set(False)
def disable_gravities(self, indices: Optional[Union[np.ndarray, list, torch.Tensor, wp.array]] = None) -> None:
"""Disable gravity on rigid bodies (enabled by default).
Args:
indices (Optional[Union[np.ndarray, list, torch.Tensor, wp.array]], optional): indicies to specify which prims
to manipulate. Shape (M,).
Where M <= size of the encapsulated prims in the view.
Defaults to None (i.e: all prims in the view).
"""
indices = self._backend_utils.resolve_indices(indices, self.count, "cpu")
if not omni.timeline.get_timeline_interface().is_stopped() and self._physics_view is not None:
data = self._physics_view.get_disable_gravities().reshape(self._count)
data = self._backend_utils.assign(
self._backend_utils.create_tensor_from_list([True] * len(indices), dtype="uint8"), data, indices
)
self._physics_view.set_disable_gravities(data, indices)
else:
indices = self._backend_utils.resolve_indices(indices, self.count, self._device)
indices = self._backend_utils.to_list(indices)
for i in indices:
if self._physx_rigid_body_apis[i] is None:
if self._prims[i].HasAPI(PhysxSchema.PhysxRigidBodyAPI):
rigid_api = PhysxSchema.PhysxRigidBodyAPI(self._prims[i])
else:
rigid_api = PhysxSchema.PhysxRigidBodyAPI.Apply(self._prims[i])
self._physx_rigid_body_apis[i] = rigid_api
self._physx_rigid_body_apis[i].GetDisableGravityAttr().Set(True)
return
def colorize_bboxes(bboxes_2d_data, bboxes_2d_rgb, num_channels=3):
"""Colorizes 2D bounding box data for visualization.
We are overriding the replicator native version of this function to fix a bug.
In their version of this function, the ordering of the rectangle corners is incorrect and we fix it here.
Args:
bboxes_2d_data (numpy.ndarray): 2D bounding box data from the sensor.
bboxes_2d_rgb (numpy.ndarray): RGB data from the sensor to embed bounding box.
num_channels (int): Specify number of channels i.e. 3 or 4.
"""
semantic_id_list = []
bbox_2d_list = []
rgb_img = Image.fromarray(bboxes_2d_rgb)
rgb_img_draw = ImageDraw.Draw(rgb_img)
for bbox_2d in bboxes_2d_data:
semantic_id_list.append(bbox_2d['semanticId'])
bbox_2d_list.append(bbox_2d)
semantic_id_list_np = np.unique(np.array(semantic_id_list))
color_list = random_colours(len(semantic_id_list_np.tolist()), True, num_channels)
for bbox_2d in bbox_2d_list:
index = np.where(semantic_id_list_np == bbox_2d['semanticId'])[0][0]
bbox_color = color_list[index]
outline = (bbox_color[0], bbox_color[1], bbox_color[2])
if num_channels == 4:
outline = (
bbox_color[0],
bbox_color[1],
bbox_color[2],
bbox_color[3],
)
rgb_img_draw.rectangle([(bbox_2d['x_min'], bbox_2d['y_min']), (bbox_2d['x_max'], bbox_2d['y_max'])], outline=outline, width=2)
bboxes_2d_rgb = np.array(rgb_img)
return bboxes_2d_rgb
| 39,722 | Python | 56.65312 | 155 | 0.557474 |
StanfordVL/OmniGibson/omnigibson/utils/profiling_utils.py | import gym
import omnigibson as og
import os
import psutil
from pynvml.smi import nvidia_smi
from time import time
class ProfilingEnv(og.Environment):
def step(self, action):
try:
start = time()
# If the action is not a dictionary, convert into a dictionary
if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict):
action_dict = dict()
idx = 0
for robot in self.robots:
action_dim = robot.action_dim
action_dict[robot.name] = action[idx: idx + action_dim]
idx += action_dim
else:
# Our inputted action is the action dictionary
action_dict = action
# Iterate over all robots and apply actions
for robot in self.robots:
robot.apply_action(action_dict[robot.name])
# Run simulation step
sim_start = time()
if len(og.sim._objects_to_initialize) > 0:
og.sim.render()
super(type(og.sim), og.sim).step(render=True)
omni_time = (time() - sim_start) * 1e3
# Additionally run non physics things
og.sim._non_physics_step()
# Grab observations
obs, obs_info = self.get_obs()
# Step the scene graph builder if necessary
if self._scene_graph_builder is not None:
self._scene_graph_builder.step(self.scene)
# Grab reward, done, and info, and populate with internal info
reward, done, info = self.task.step(self, action)
self._populate_info(info)
if done and self._automatic_reset:
# Add lost observation to our information dict, and reset
info["last_observation"] = obs
info["last_observation_info"] = obs_info
obs, obs_info = self.reset()
# Increment step
self._current_step += 1
# collect profiling data
total_frame_time = (time() - start) * 1e3
og_time = total_frame_time - omni_time
# memory usage in GB
memory_usage = psutil.Process(os.getpid()).memory_info().rss / 1024 ** 3
# VRAM usage in GB
for gpu in nvidia_smi.getInstance().DeviceQuery()['gpu']:
found = False
for process in gpu['processes']:
if process['pid'] == os.getpid():
vram_usage = process['used_memory'] / 1024
found = True
break
if found:
break
ret = [total_frame_time, omni_time, og_time, memory_usage, vram_usage]
if self._current_step % 100 == 0:
print("total time: {:.3f} ms, Omni time: {:.3f} ms, OG time: {:.3f} ms, memory: {:.3f} GB, vram: {:.3f} GB.".format(*ret))
return obs, reward, done, info, ret
except:
raise ValueError(f"Failed to execute environment step {self._current_step} in episode {self._current_episode}")
| 3,191 | Python | 37.926829 | 138 | 0.525541 |
StanfordVL/OmniGibson/omnigibson/utils/python_utils.py | """
A set of utility functions for general python usage
"""
import inspect
import re
from abc import ABCMeta
from copy import deepcopy
from collections.abc import Iterable
from functools import wraps, cache
from importlib import import_module
import numpy as np
# Global dictionary storing all unique names
NAMES = set()
CLASS_NAMES = set()
class classproperty:
def __init__(self, fget):
self.fget = fget
def __get__(self, owner_self, owner_cls):
return self.fget(owner_cls)
def subclass_factory(name, base_classes, __init__=None, **kwargs):
"""
Programmatically generates a new class type with name @name, subclassing from base classes @base_classes, with
corresponding __init__ call @__init__.
NOTE: If __init__ is None (default), the __init__ call from @base_classes will be used instead.
cf. https://stackoverflow.com/questions/15247075/how-can-i-dynamically-create-derived-classes-from-a-base-class
Args:
name (str): Generated class name
base_classes (type, or list of type): Base class(es) to use for generating the subclass
__init__ (None or function): Init call to use for the base class when it is instantiated. If None if specified,
the newly generated class will automatically inherit the __init__ call from @base_classes
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class / instance attribute to modify and the values represent the functions / value to set
"""
# Standardize base_classes
base_classes = tuple(base_classes if isinstance(base_classes, Iterable) else [base_classes])
# Generate the new class
if __init__ is not None:
kwargs["__init__"] = __init__
return type(name, base_classes, kwargs)
def save_init_info(func):
"""
Decorator to save the init info of an object to object._init_info.
_init_info contains class name and class constructor's input args.
"""
sig = inspect.signature(func)
@wraps(func) # preserve func name, docstring, arguments list, etc.
def wrapper(self, *args, **kwargs):
values = sig.bind(self, *args, **kwargs)
# Prevent args of super init from being saved.
if hasattr(self, "_init_info"):
func(*values.args, **values.kwargs)
return
# Initialize class's self._init_info.
self._init_info = {}
self._init_info["class_module"] = self.__class__.__module__
self._init_info["class_name"] = self.__class__.__name__
self._init_info["args"] = {}
# Populate class's self._init_info.
for k, p in sig.parameters.items():
if k == 'self':
continue
if k in values.arguments:
val = values.arguments[k]
if p.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY):
self._init_info["args"][k] = val
elif p.kind == inspect.Parameter.VAR_KEYWORD:
for kwarg_k, kwarg_val in values.arguments[k].items():
self._init_info["args"][kwarg_k] = kwarg_val
# Call the original function.
func(*values.args, **values.kwargs)
return wrapper
class RecreatableMeta(type):
"""
Simple metaclass that automatically saves __init__ args of the instances it creates.
"""
def __new__(cls, clsname, bases, clsdict):
if "__init__" in clsdict:
clsdict["__init__"] = save_init_info(clsdict["__init__"])
return super().__new__(cls, clsname, bases, clsdict)
class RecreatableAbcMeta(RecreatableMeta, ABCMeta):
"""
A composite metaclass of both RecreatableMeta and ABCMeta.
Adding in ABCMeta to resolve metadata conflicts.
"""
pass
class Recreatable(metaclass=RecreatableAbcMeta):
"""
Simple class that provides an abstract interface automatically saving __init__ args of
the classes inheriting it.
"""
def get_init_info(self):
"""
Grabs relevant initialization information for this class instance. Useful for directly
reloading an object from this information, using @create_object_from_init_info.
Returns:
dict: Nested dictionary that contains this object's initialization information
"""
# Note: self._init_info is procedurally generated via @save_init_info called in metaclass
return self._init_info
def create_object_from_init_info(init_info):
"""
Create a new object based on given init info.
Args:
init_info (dict): Nested dictionary that contains an object's init information.
Returns:
any: Newly created object.
"""
module = import_module(init_info["class_module"])
cls = getattr(module, init_info["class_name"])
return cls(**init_info["args"], **init_info.get("kwargs", {}))
def merge_nested_dicts(base_dict, extra_dict, inplace=False, verbose=False):
"""
Iteratively updates @base_dict with values from @extra_dict. Note: This generates a new dictionary!
Args:
base_dict (dict): Nested base dictionary, which should be updated with all values from @extra_dict
extra_dict (dict): Nested extra dictionary, whose values will overwrite corresponding ones in @base_dict
inplace (bool): Whether to modify @base_dict in place or not
verbose (bool): If True, will print when keys are mismatched
Returns:
dict: Updated dictionary
"""
# Loop through all keys in @extra_dict and update the corresponding values in @base_dict
base_dict = base_dict if inplace else deepcopy(base_dict)
for k, v in extra_dict.items():
if k not in base_dict:
base_dict[k] = v
else:
if isinstance(v, dict) and isinstance(base_dict[k], dict):
base_dict[k] = merge_nested_dicts(base_dict[k], v)
else:
not_equal = base_dict[k] != v
if isinstance(not_equal, np.ndarray):
not_equal = not_equal.any()
if not_equal and verbose:
print(f"Different values for key {k}: {base_dict[k]}, {v}\n")
base_dict[k] = np.array(v) if isinstance(v, list) else v
# Return new dict
return base_dict
def get_class_init_kwargs(cls):
"""
Helper function to return a list of all valid keyword arguments (excluding "self") for the given @cls class.
Args:
cls (object): Class from which to grab __init__ kwargs
Returns:
list: All keyword arguments (excluding "self") specified by @cls __init__ constructor method
"""
return list(inspect.signature(cls.__init__).parameters.keys())[1:]
def extract_subset_dict(dic, keys, copy=False):
"""
Helper function to extract a subset of dictionary key-values from a current dictionary. Optionally (deep)copies
the values extracted from the original @dic if @copy is True.
Args:
dic (dict): Dictionary containing multiple key-values
keys (Iterable): Specific keys to extract from @dic. If the key doesn't exist in @dic, then the key is skipped
copy (bool): If True, will deepcopy all values corresponding to the specified @keys
Returns:
dict: Extracted subset dictionary containing only the specified @keys and their corresponding values
"""
subset = {k: dic[k] for k in keys if k in dic}
return deepcopy(subset) if copy else subset
def extract_class_init_kwargs_from_dict(cls, dic, copy=False):
"""
Helper function to return a dictionary of key-values that specifically correspond to @cls class's __init__
constructor method, from @dic which may or may not contain additional, irrelevant kwargs.
Note that @dic may possibly be missing certain kwargs as specified by cls.__init__. No error will be raised.
Args:
cls (object): Class from which to grab __init__ kwargs that will be be used as filtering keys for @dic
dic (dict): Dictionary containing multiple key-values
copy (bool): If True, will deepcopy all values corresponding to the specified @keys
Returns:
dict: Extracted subset dictionary possibly containing only the specified keys from cls.__init__ and their
corresponding values
"""
# extract only relevant kwargs for this specific backbone
return extract_subset_dict(
dic=dic,
keys=get_class_init_kwargs(cls),
copy=copy,
)
def assert_valid_key(key, valid_keys, name=None):
"""
Helper function that asserts that @key is in dictionary @valid_keys keys. If not, it will raise an error.
Args:
key (any): key to check for in dictionary @dic's keys
valid_keys (Iterable): contains keys should be checked with @key
name (str or None): if specified, is the name associated with the key that will be printed out if the
key is not found. If None, default is "value"
"""
if name is None:
name = "value"
assert key in valid_keys, "Invalid {} received! Valid options are: {}, got: {}".format(
name, valid_keys.keys() if isinstance(valid_keys, dict) else valid_keys, key)
def create_class_from_registry_and_config(cls_name, cls_registry, cfg, cls_type_descriptor):
"""
Helper function to create a class with str type @cls_name, which should be a valid entry in @cls_registry, using
kwargs in dictionary form @cfg to pass to the constructor, with @cls_type_name specified for debugging
Args:
cls_name (str): Name of the class to create. This should correspond to the actual class type, in string form
cls_registry (dict): Class registry. This should map string names of valid classes to create to the
actual class type itself
cfg (dict): Any keyword arguments to pass to the class constructor
cls_type_descriptor (str): Description of the class type being created. This can be any string and is used
solely for debugging purposes
Returns:
any: Created class instance
"""
# Make sure the requested class type is valid
assert_valid_key(key=cls_name, valid_keys=cls_registry, name=f"{cls_type_descriptor} type")
# Grab the kwargs relevant for the specific class
cls = cls_registry[cls_name]
cls_kwargs = extract_class_init_kwargs_from_dict(cls=cls, dic=cfg, copy=False)
# Create the class
return cls(**cls_kwargs)
def get_uuid(name, n_digits=8):
"""
Helper function to create a unique @n_digits uuid given a unique @name
Args:
name (str): Name of the object or class
n_digits (int): Number of digits of the uuid, default is 8
Returns:
int: uuid
"""
return abs(hash(name)) % (10 ** n_digits)
def camel_case_to_snake_case(camel_case_text):
"""
Helper function to convert a camel case text to snake case, e.g. "StrawberrySmoothie" -> "strawberry_smoothie"
Args:
camel_case_text (str): Text in camel case
Returns:
str: snake case text
"""
return re.sub(r'(?<!^)(?=[A-Z])', '_', camel_case_text).lower()
def snake_case_to_camel_case(snake_case_text):
"""
Helper function to convert a snake case text to camel case, e.g. "strawberry_smoothie" -> "StrawberrySmoothie"
Args:
snake_case_text (str): Text in snake case
Returns:
str: camel case text
"""
return ''.join(item.title() for item in snake_case_text.split('_'))
def meets_minimum_version(test_version, minimum_version):
"""
Verify that @test_version meets the @minimum_version
Args:
test_version (str): Python package version. Should be, e.g., 0.26.1
minimum_version (str): Python package version to test against. Should be, e.g., 0.27.2
Returns:
bool: Whether @test_version meets @minimum_version
"""
test_nums = [int(num) for num in test_version.split(".")]
minimum_nums = [int(num) for num in minimum_version.split(".")]
assert len(test_nums) == 3
assert len(minimum_nums) == 3
for test_num, minimum_num in zip(test_nums, minimum_nums):
if test_num > minimum_num:
return True
elif test_num < minimum_num:
return False
# Otherwise, we continue through all sub-versions
# If we get here, that means test_version == threshold_version, so this is a success
return True
class UniquelyNamed:
"""
Simple class that implements a name property, that must be implemented by a subclass. Note that any @Named
entity must be UNIQUE!
"""
def __init__(self, *args, **kwargs):
global NAMES
# Register this object, making sure it's name is unique
assert self.name not in NAMES, \
f"UniquelyNamed object with name {self.name} already exists!"
NAMES.add(self.name)
def remove_names(self):
"""
Checks if self.name exists in the global NAMES registry, and deletes it if so. Possibly also iterates through
all owned member variables and checks for their corresponding names if @include_all_owned is True.
Args:
include_all_owned (bool): If True, will iterate through all owned members of this instance and remove their
names as well, if they are UniquelyNamed
skip_ids (None or set of int): If specified, will skip over any ids in the specified set that are matched
to any attributes found (this compares id(attr) to @skip_ids).
"""
# Check for this name, possibly remove it if it exists
if self.name in NAMES:
NAMES.remove(self.name)
@property
def name(self):
"""
Returns:
str: Name of this instance. Must be unique!
"""
raise NotImplementedError
class UniquelyNamedNonInstance:
"""
Identical to UniquelyNamed, but intended for non-instanceable classes
"""
def __init_subclass__(cls, **kwargs):
global CLASS_NAMES
# Register this object, making sure it's name is unique
assert cls.name not in CLASS_NAMES, \
f"UniquelyNamed class with name {cls.name} already exists!"
CLASS_NAMES.add(cls.name)
@classproperty
def name(cls):
"""
Returns:
str: Name of this instance. Must be unique!
"""
raise NotImplementedError
class Registerable:
"""
Simple class template that provides an abstract interface for registering classes.
"""
def __init_subclass__(cls, **kwargs):
"""
Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
user additions. This way, users can add their custom subclasses by simply extending this class,
and it will automatically be registered internally. This allows users to then specify their classes
directly in string-form in e.g., their config files, without having to manually set the str-to-class mapping
in our code.
"""
cls._register_cls()
@classmethod
def _register_cls(cls):
"""
Register this class. Can be extended by subclass.
"""
# print(f"registering: {cls.__name__}")
# print(f"registry: {cls._cls_registry}", cls.__name__ not in cls._cls_registry)
# print(f"do not register: {cls._do_not_register_classes}", cls.__name__ not in cls._do_not_register_classes)
# input()
if cls.__name__ not in cls._cls_registry and cls.__name__ not in cls._do_not_register_classes:
cls._cls_registry[cls.__name__] = cls
@classproperty
def _do_not_register_classes(cls):
"""
Returns:
set of str: Name(s) of classes that should not be registered. Default is empty set.
Subclasses that shouldn't be added should call super() and then add their own class name to the set
"""
return set()
@classproperty
def _cls_registry(cls):
"""
Returns:
dict: Mapping from all registered class names to their classes. This should be a REFERENCE
to some external, global dictionary that will be filled-in at runtime.
"""
raise NotImplementedError()
class Serializable:
"""
Simple class that provides an abstract interface to dump / load states, optionally with serialized functionality
as well.
"""
@property
def state_size(self):
"""
Returns:
int: Size of this object's serialized state
"""
raise NotImplementedError()
def _dump_state(self):
"""
Dumps the state of this object in dictionary form (can be empty). Should be implemented by subclass.
Returns:
dict: Keyword-mapped states of this object
"""
raise NotImplementedError()
def dump_state(self, serialized=False):
"""
Dumps the state of this object in either dictionary of flattened numerical form.
Args:
serialized (bool): If True, will return the state of this object as a 1D numpy array. Otherewise, will return
a (potentially nested) dictionary of states for this object
Returns:
dict or n-array: Either:
- Keyword-mapped states of this object, or
- encoded + serialized, 1D numerical np.array capturing this object's state, where n is @self.state_size
"""
state = self._dump_state()
return self.serialize(state=state) if serialized else state
def _load_state(self, state):
"""
Load the internal state to this object as specified by @state. Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to set
"""
raise NotImplementedError()
def load_state(self, state, serialized=False):
"""
Deserializes and loads this object's state based on @state
Args:
state (dict or n-array): Either:
- Keyword-mapped states of this object, or
- encoded + serialized, 1D numerical np.array capturing this object's state, where n is @self.state_size
serialized (bool): If True, will interpret @state as a 1D numpy array. Otherewise, will assume the input is
a (potentially nested) dictionary of states for this object
"""
state = self.deserialize(state=state) if serialized else state
self._load_state(state=state)
def _serialize(self, state):
"""
Serializes nested dictionary state @state into a flattened 1D numpy array for encoding efficiency.
Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to encode. Should match structure of output from
self._dump_state()
Returns:
n-array: encoded + serialized, 1D numerical np.array capturing this object's state
"""
raise NotImplementedError()
def serialize(self, state):
"""
Serializes nested dictionary state @state into a flattened 1D numpy array for encoding efficiency.
Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to encode. Should match structure of output from
self._dump_state()
Returns:
n-array: encoded + serialized, 1D numerical np.array capturing this object's state
"""
# Simply returns self._serialize() for now. this is for future proofing
return self._serialize(state=state)
def _deserialize(self, state):
"""
De-serializes flattened 1D numpy array @state into nested dictionary state.
Should be implemented by subclass.
Args:
state (n-array): encoded + serialized, 1D numerical np.array capturing this object's state
Returns:
2-tuple:
- dict: Keyword-mapped states of this object. Should match structure of output from
self._dump_state()
- int: current index of the flattened state vector that is left off. This is helpful for subclasses
that inherit partial deserializations from parent classes, and need to know where the
deserialization left off before continuing.
"""
raise NotImplementedError
def deserialize(self, state):
"""
De-serializes flattened 1D numpy array @state into nested dictionary state.
Should be implemented by subclass.
Args:
state (n-array): encoded + serialized, 1D numerical np.array capturing this object's state
Returns:
dict: Keyword-mapped states of this object. Should match structure of output from
self._dump_state()
"""
# Sanity check the idx with the expected state size
state_dict, idx = self._deserialize(state=state)
assert idx == self.state_size, f"Invalid state deserialization occurred! Expected {self.state_size} total " \
f"values to be deserialized, only {idx} were."
return state_dict
class SerializableNonInstance:
"""
Identical to Serializable, but intended for non-instanceable classes
"""
@classproperty
def state_size(cls):
"""
Returns:
int: Size of this object's serialized state
"""
raise NotImplementedError()
@classmethod
def _dump_state(cls):
"""
Dumps the state of this object in dictionary form (can be empty). Should be implemented by subclass.
Returns:
dict: Keyword-mapped states of this object
"""
raise NotImplementedError()
@classmethod
def dump_state(cls, serialized=False):
"""
Dumps the state of this object in either dictionary of flattened numerical form.
Args:
serialized (bool): If True, will return the state of this object as a 1D numpy array. Otherewise, will return
a (potentially nested) dictionary of states for this object
Returns:
dict or n-array: Either:
- Keyword-mapped states of this object, or
- encoded + serialized, 1D numerical np.array capturing this object's state, where n is @self.state_size
"""
state = cls._dump_state()
return cls.serialize(state=state) if serialized else state
@classmethod
def _load_state(cls, state):
"""
Load the internal state to this object as specified by @state. Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to set
"""
raise NotImplementedError()
@classmethod
def load_state(cls, state, serialized=False):
"""
Deserializes and loads this object's state based on @state
Args:
state (dict or n-array): Either:
- Keyword-mapped states of this object, or
- encoded + serialized, 1D numerical np.array capturing this object's state, where n is @self.state_size
serialized (bool): If True, will interpret @state as a 1D numpy array. Otherewise, will assume the input is
a (potentially nested) dictionary of states for this object
"""
state = cls.deserialize(state=state) if serialized else state
cls._load_state(state=state)
@classmethod
def _serialize(cls, state):
"""
Serializes nested dictionary state @state into a flattened 1D numpy array for encoding efficiency.
Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to encode. Should match structure of output from
self._dump_state()
Returns:
n-array: encoded + serialized, 1D numerical np.array capturing this object's state
"""
raise NotImplementedError()
@classmethod
def serialize(cls, state):
"""
Serializes nested dictionary state @state into a flattened 1D numpy array for encoding efficiency.
Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to encode. Should match structure of output from
self._dump_state()
Returns:
n-array: encoded + serialized, 1D numerical np.array capturing this object's state
"""
# Simply returns self._serialize() for now. this is for future proofing
return cls._serialize(state=state)
@classmethod
def _deserialize(cls, state):
"""
De-serializes flattened 1D numpy array @state into nested dictionary state.
Should be implemented by subclass.
Args:
state (n-array): encoded + serialized, 1D numerical np.array capturing this object's state
Returns:
2-tuple:
- dict: Keyword-mapped states of this object. Should match structure of output from
self._dump_state()
- int: current index of the flattened state vector that is left off. This is helpful for subclasses
that inherit partial deserializations from parent classes, and need to know where the
deserialization left off before continuing.
"""
raise NotImplementedError
@classmethod
def deserialize(cls, state):
"""
De-serializes flattened 1D numpy array @state into nested dictionary state.
Should be implemented by subclass.
Args:
state (n-array): encoded + serialized, 1D numerical np.array capturing this object's state
Returns:
dict: Keyword-mapped states of this object. Should match structure of output from
self._dump_state()
"""
# Sanity check the idx with the expected state size
state_dict, idx = cls._deserialize(state=state)
assert idx == cls.state_size, f"Invalid state deserialization occurred! Expected {cls.state_size} total " \
f"values to be deserialized, only {idx} were."
return state_dict
class CachedFunctions:
"""
Thin object which owns a dictionary in which each entry should be a function -- when a key is queried via get()
and it exists, it will call the function exactly once, and cache the value so that subsequent calls will refer
to the cached value.
This allows the dictionary to be created with potentially expensive operations, but only queried up to exaclty once
as needed.
"""
def __init__(self, **kwargs):
# Create internal dict to store functions
self._fcns = dict()
for kwarg in kwargs:
self._fcns[kwarg] = kwargs[kwarg]
def __getitem__(self, item):
return self.get(name=item)
def __setitem__(self, key, value):
self.add_fcn(name=key, fcn=value)
@cache
def get(self, name, *args, **kwargs):
"""
Computes the function referenced by @name with the corresponding @args and @kwargs. Note that for a unique
set of arguments, this value will be internally cached
Args:
name (str): The name of the function to call
*args (tuple): Positional arguments to pass into the function call
**kwargs (tuple): Keyword arguments to pass into the function call
Returns:
any: Output of the function referenced by @name
"""
return self._fcns[name](*args, **kwargs)
def get_fcn(self, name):
"""
Gets the raw stored function referenced by @name
Args:
name (str): The name of the function to grab
Returns:
function: The stored function
"""
return self._fcns[name]
def get_fcn_names(self):
"""
Get all stored function names
Returns:
tuple of str: Names of stored functions
"""
return tuple(self._fcns.keys())
def add_fcn(self, name, fcn):
"""
Adds a function to the internal registry.
Args:
name (str): Name of the function. This is the name that should be queried with self.get()
fcn (function): Function to add. Can be an arbitrary signature
"""
assert callable(fcn), "Only functions can be added via add_fcn!"
self._fcns[name] = fcn
class Wrapper:
"""
Base class for all wrappers in OmniGibson
Args:
obj (any): Arbitrary python object instance to wrap
"""
def __init__(self, obj):
# Set the internal attributes -- store wrapped obj
self.wrapped_obj = obj
@classmethod
def class_name(cls):
return cls.__name__
def _warn_double_wrap(self):
"""
Utility function that checks if we're accidentally trying to double wrap an env
Raises:
Exception: [Double wrapping env]
"""
obj = self.wrapped_obj
while True:
if isinstance(obj, Wrapper):
if obj.class_name() == self.class_name():
raise Exception("Attempted to double wrap with Wrapper: {}".format(self.__class__.__name__))
obj = obj.wrapped_obj
else:
break
@property
def unwrapped(self):
"""
Grabs unwrapped object
Returns:
any: The unwrapped object instance
"""
return self.wrapped_obj.unwrapped if hasattr(self.wrapped_obj, "unwrapped") else self.wrapped_obj
# this method is a fallback option on any methods the original env might support
def __getattr__(self, attr):
# If we're querying wrapped_obj, raise an error
if attr == "wrapped_obj":
raise AttributeError("wrapped_obj attribute not initialized yet!")
# Sanity check to make sure wrapped obj is not None -- if so, raise error
assert self.wrapped_obj is not None, f"Cannot access attribute {attr} since wrapped_obj is None!"
# using getattr ensures that both __getattribute__ and __getattr__ (fallback) get called
# (see https://stackoverflow.com/questions/3278077/difference-between-getattr-vs-getattribute)
orig_attr = getattr(self.wrapped_obj, attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
# prevent wrapped_class from becoming unwrapped
if id(result) == id(self.wrapped_obj):
return self
return result
return hooked
else:
return orig_attr
def __setattr__(self, key, value):
# Call setattr on wrapped obj if it has the attribute, otherwise, operate on this object
if hasattr(self, "wrapped_obj") and self.wrapped_obj is not None and hasattr(self.wrapped_obj, key):
setattr(self.wrapped_obj, key, value)
else:
super().__setattr__(key, value)
def nums2array(nums, dim, dtype=float):
"""
Converts input @nums into numpy array of length @dim. If @nums is a single number, broadcasts input to
corresponding dimension size @dim before converting into numpy array
Args:
nums (float or array): Numbers to map to numpy array
dim (int): Size of array to broadcast input to
Returns:
torch.Tensor: Mapped input numbers
"""
# Make sure the inputted nums isn't a string
assert not isinstance(nums, str), "Only numeric types are supported for this operation!"
out = np.array(nums, dtype=dtype) if isinstance(nums, Iterable) else np.ones(dim, dtype=dtype) * nums
return out
def clear():
"""
Clear state tied to singleton classes
"""
NAMES.clear()
CLASS_NAMES.clear()
| 32,190 | Python | 35.580682 | 121 | 0.629605 |
StanfordVL/OmniGibson/omnigibson/utils/object_state_utils.py | import cv2
import numpy as np
from IPython import embed
from scipy.spatial.transform import Rotation as R
from scipy.spatial import ConvexHull, distance_matrix
import omnigibson as og
from omnigibson.macros import create_module_macros, Dict, macros
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.contact_bodies import ContactBodies
from omnigibson.utils import sampling_utils
from omnigibson.utils.constants import PrimType
from omnigibson.utils.ui_utils import debug_breakpoint
import omnigibson.utils.transform_utils as T
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_HIGH_LEVEL_SAMPLING_ATTEMPTS = 10
m.DEFAULT_LOW_LEVEL_SAMPLING_ATTEMPTS = 10
m.ON_TOP_RAY_CASTING_SAMPLING_PARAMS = Dict({
"bimodal_stdev_fraction": 1e-6,
"bimodal_mean_fraction": 1.0,
"aabb_offset_fraction": 0.02,
"max_sampling_attempts": 50,
})
m.INSIDE_RAY_CASTING_SAMPLING_PARAMS = Dict({
"bimodal_stdev_fraction": 0.4,
"bimodal_mean_fraction": 0.5,
"aabb_offset_fraction": -0.02,
"max_sampling_attempts": 100,
})
m.UNDER_RAY_CASTING_SAMPLING_PARAMS = Dict({
"bimodal_stdev_fraction": 1e-6,
"bimodal_mean_fraction": 0.5,
"aabb_offset_fraction": 0.02,
"max_sampling_attempts": 50,
})
def sample_cuboid_for_predicate(predicate, on_obj, bbox_extent):
if predicate == "onTop":
params = m.ON_TOP_RAY_CASTING_SAMPLING_PARAMS
elif predicate == "inside":
params = m.INSIDE_RAY_CASTING_SAMPLING_PARAMS
elif predicate == "under":
params = m.UNDER_RAY_CASTING_SAMPLING_PARAMS
else:
raise ValueError(f"predicate must be onTop, under or inside in order to use ray casting-based "
f"kinematic sampling, but instead got: {predicate}")
if predicate == "under":
start_points, end_points = sampling_utils.sample_raytest_start_end_symmetric_bimodal_distribution(
obj=on_obj,
num_samples=1,
axis_probabilities=[0, 0, 1],
**params,
)
return sampling_utils.sample_cuboid_on_object(
obj=None,
start_points=start_points,
end_points=end_points,
ignore_objs=[on_obj],
cuboid_dimensions=bbox_extent,
refuse_downwards=True,
undo_cuboid_bottom_padding=True,
max_angle_with_z_axis=0.17,
hit_proportion=0.0, # rays will NOT hit the object itself, but the surface below it.
)
else:
return sampling_utils.sample_cuboid_on_object_symmetric_bimodal_distribution(
on_obj,
num_samples=1,
axis_probabilities=[0, 0, 1],
cuboid_dimensions=bbox_extent,
refuse_downwards=True,
undo_cuboid_bottom_padding=True,
max_angle_with_z_axis=0.17,
**params,
)
def sample_kinematics(
predicate,
objA,
objB,
max_trials=m.DEFAULT_LOW_LEVEL_SAMPLING_ATTEMPTS,
z_offset=0.05,
skip_falling=False,
):
"""
Samples the given @predicate kinematic state for @objA with respect to @objB
Args:
predicate (str): Name of the predicate to sample, e.g.: "onTop"
objA (StatefulObject): Object whose state should be sampled. e.g.: for sampling a microwave
on a cabinet, @objA is the microwave
objB (StatefulObject): Object who is the reference point for @objA's state. e.g.: for sampling
a microwave on a cabinet, @objB is the cabinet
max_trials (int): Number of attempts for sampling
z_offset (float): Z-offset to apply to the sampled pose
skip_falling (bool): Whether to let @objA fall after its position is sampled or not
Returns:
bool: True if successfully sampled, else False
"""
assert z_offset > 0.5 * 9.81 * (og.sim.get_physics_dt() ** 2) + 0.02,\
f"z_offset {z_offset} is too small for the current physics_dt {og.sim.get_physics_dt()}"
# Wake objects accordingly and make sure both are kept still
objA.wake()
objB.wake()
objA.keep_still()
objB.keep_still()
# Save the state of the simulator
state = og.sim.dump_state()
# Attempt sampling
for i in range(max_trials):
pos = None
if hasattr(objA, "orientations") and objA.orientations is not None:
orientation = objA.sample_orientation()
else:
orientation = np.array([0, 0, 0, 1.0])
# Orientation needs to be set for stable_z_on_aabb to work correctly
# Position needs to be set to be very far away because the object's
# original position might be blocking rays (use_ray_casting_method=True)
old_pos = np.array([100, 100, 10])
objA.set_position_orientation(old_pos, orientation)
objA.keep_still()
# We also need to step physics to make sure the pose propagates downstream (e.g.: to Bounding Box computations)
og.sim.step_physics()
# This would slightly change because of the step_physics call.
old_pos, orientation = objA.get_position_orientation()
# Run import here to avoid circular imports
from omnigibson.objects.dataset_object import DatasetObject
if isinstance(objA, DatasetObject) and objA.prim_type == PrimType.RIGID:
# Retrieve base CoM frame-aligned bounding box parallel to the XY plane
parallel_bbox_center, parallel_bbox_orn, parallel_bbox_extents, _ = objA.get_base_aligned_bbox(
xy_aligned=True
)
else:
aabb_lower, aabb_upper = objA.states[AABB].get_value()
parallel_bbox_center = (aabb_lower + aabb_upper) / 2.0
parallel_bbox_orn = np.array([0.0, 0.0, 0.0, 1.0])
parallel_bbox_extents = aabb_upper - aabb_lower
sampling_results = sample_cuboid_for_predicate(predicate, objB, parallel_bbox_extents)
sampled_vector = sampling_results[0][0]
sampled_quaternion = sampling_results[0][2]
sampling_success = sampled_vector is not None
if sampling_success:
# Move the object from the original parallel bbox to the sampled bbox
parallel_bbox_rotation = R.from_quat(parallel_bbox_orn)
sample_rotation = R.from_quat(sampled_quaternion)
original_rotation = R.from_quat(orientation)
# The additional orientation to be applied should be the delta orientation
# between the parallel bbox orientation and the sample orientation
additional_rotation = sample_rotation * parallel_bbox_rotation.inv()
combined_rotation = additional_rotation * original_rotation
orientation = combined_rotation.as_quat()
# The delta vector between the base CoM frame and the parallel bbox center needs to be rotated
# by the same additional orientation
diff = old_pos - parallel_bbox_center
rotated_diff = additional_rotation.apply(diff)
pos = sampled_vector + rotated_diff
if pos is None:
success = False
else:
pos[2] += z_offset
objA.set_position_orientation(pos, orientation)
objA.keep_still()
og.sim.step_physics()
objA.keep_still()
success = len(objA.states[ContactBodies].get_value()) == 0
if macros.utils.sampling_utils.DEBUG_SAMPLING:
debug_breakpoint(f"sample_kinematics: {success}")
if success:
break
else:
og.sim.load_state(state)
# If we didn't succeed, try last-ditch effort
if not success and predicate in {"onTop", "inside"}:
og.sim.step_physics()
# Place objA at center of objB's AABB, offset in z direction such that their AABBs are "stacked", and let fall
# until it settles
aabb_lower_a, aabb_upper_a = objA.states[AABB].get_value()
aabb_lower_b, aabb_upper_b = objB.states[AABB].get_value()
bbox_to_obj = objA.get_position() - (aabb_lower_a + aabb_upper_a) / 2.0
desired_bbox_pos = (aabb_lower_b + aabb_upper_b) / 2.0
desired_bbox_pos[2] = aabb_upper_b[2] + (aabb_upper_a[2] - aabb_lower_a[2]) / 2.0
pos = desired_bbox_pos + bbox_to_obj
success = True
if success and not skip_falling:
objA.set_position_orientation(pos, orientation)
objA.keep_still()
# Step until either (a) max steps is reached (total of 0.5 second in sim time) or (b) contact is made, then
# step until (a) max steps is reached (restarted from 0) or (b) velocity is below some threshold
n_steps_max = int(0.5 / og.sim.get_physics_dt())
i = 0
while len(objA.states[ContactBodies].get_value()) == 0 and i < n_steps_max:
og.sim.step_physics()
i += 1
objA.keep_still()
objB.keep_still()
# Step a few times so velocity can become non-zero if the objects are moving
for i in range(5):
og.sim.step_physics()
i = 0
while np.linalg.norm(objA.get_linear_velocity()) > 1e-3 and i < n_steps_max:
og.sim.step_physics()
i += 1
# Render at the end
og.sim.render()
return success
def sample_cloth_on_rigid(obj, other, max_trials=40, z_offset=0.05, randomize_xy=True):
"""
Samples the cloth object @obj on the rigid object @other
Args:
obj (StatefulObject): Object whose state should be sampled. e.g.: for sampling a bed sheet on a rack,
@obj is the bed sheet
other (StatefulObject): Object who is the reference point for @obj's state. e.g.: for sampling a bed sheet
on a rack, @other is the rack
max_trials (int): Number of attempts for sampling
z_offset (float): Z-offset to apply to the sampled pose
randomize_xy (bool): Whether to randomize the XY position of the sampled pose. If False, the center of @other
will always be used.
Returns:
bool: True if successfully sampled, else False
"""
assert z_offset > 0.5 * 9.81 * (og.sim.get_physics_dt() ** 2) + 0.02,\
f"z_offset {z_offset} is too small for the current physics_dt {og.sim.get_physics_dt()}"
if not (obj.prim_type == PrimType.CLOTH and other.prim_type == PrimType.RIGID):
raise ValueError("sample_cloth_on_rigid requires obj1 is cloth and obj2 is rigid.")
state = og.sim.dump_state(serialized=False)
# Reset the cloth
obj.root_link.reset()
obj_aabb_low, obj_aabb_high = obj.states[AABB].get_value()
other_aabb_low, other_aabb_high = other.states[AABB].get_value()
# z value is always the same: the top-z of the other object + half the height of the object to be placed + offset
z_value = other_aabb_high[2] + (obj_aabb_high[2] - obj_aabb_low[2]) / 2.0 + z_offset
if randomize_xy:
# Sample a random position in the x-y plane within the other object's AABB
low = np.array([other_aabb_low[0], other_aabb_low[1], z_value])
high = np.array([other_aabb_high[0], other_aabb_high[1], z_value])
else:
# Always sample the center of the other object's AABB
low = np.array([(other_aabb_low[0] + other_aabb_high[0]) / 2.0,
(other_aabb_low[1] + other_aabb_high[1]) / 2.0,
z_value])
high = low
for _ in range(max_trials):
# Sample a random position
pos = np.random.uniform(low, high)
# Sample a random orientation in the z-axis
orn = T.euler2quat(np.array([0., 0., np.random.uniform(0, np.pi * 2)]))
obj.set_position_orientation(pos, orn)
obj.root_link.reset()
obj.keep_still()
og.sim.step_physics()
success = len(obj.states[ContactBodies].get_value()) == 0
if success:
break
else:
og.sim.load_state(state)
if success:
# Let it fall for 0.2 second always to let the cloth settle
for _ in range(int(0.2 / og.sim.get_physics_dt())):
og.sim.step_physics()
obj.keep_still()
# Render at the end
og.sim.render()
return success
| 12,251 | Python | 38.019108 | 119 | 0.625173 |
StanfordVL/OmniGibson/omnigibson/utils/physx_utils.py | import numpy as np
from omnigibson.macros import gm, create_module_macros
from omnigibson.utils.ui_utils import suppress_omni_log
import omnigibson as og
import omnigibson.lazy as lazy
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.PROTOTYPE_GRAVEYARD_POS = (100.0, 100.0, 100.0)
def create_physx_particle_system(
prim_path,
physics_scene_path,
particle_contact_offset,
visual_only=False,
smoothing=True,
anisotropy=True,
isosurface=True,
):
"""
Creates an Omniverse physx particle system at @prim_path. For post-processing visualization effects (anisotropy,
smoothing, isosurface), see the Omniverse documentation
(https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#post-processing-for-fluid-rendering)
for more info
Args:
prim_path (str): Stage path to where particle system should be created
physics_scene_path (str): Stage path to where active physicsScene prim is defined
particle_contact_offset (float): Distance between particles which triggers a collision (m)
visual_only (bool): If True, will disable collisions between particles and non-particles,
as well as self-collisions
smoothing (bool): Whether to smooth particle positions or not
anisotropy (bool): Whether to apply anisotropy post-processing when visualizing particles. Stretches generated
particles in order to make the particle cluster surface appear smoother. Useful for fluids
isosurface (bool): Whether to apply isosurface mesh to visualize particles. Uses a monolithic surface that
can have materials attached to it, useful for visualizing fluids
Returns:
UsdGeom.PhysxParticleSystem: Generated particle system prim
"""
# TODO: Add sanity check to make sure GPU dynamics are enabled
# Create particle system
stage = lazy.omni.isaac.core.utils.stage.get_current_stage()
particle_system = lazy.pxr.PhysxSchema.PhysxParticleSystem.Define(stage, prim_path)
particle_system.CreateSimulationOwnerRel().SetTargets([physics_scene_path])
# Use a smaller particle size for nicer fluid, and let the sim figure out the other offsets
particle_system.CreateParticleContactOffsetAttr().Set(particle_contact_offset)
# Possibly disable collisions if we're only visual
if visual_only:
particle_system.GetGlobalSelfCollisionEnabledAttr().Set(False)
particle_system.GetNonParticleCollisionEnabledAttr().Set(False)
if anisotropy:
# apply api and use all defaults
lazy.pxr.PhysxSchema.PhysxParticleAnisotropyAPI.Apply(particle_system.GetPrim())
if smoothing:
# apply api and use all defaults
lazy.pxr.PhysxSchema.PhysxParticleSmoothingAPI.Apply(particle_system.GetPrim())
if isosurface:
# apply api and use all defaults
lazy.pxr.PhysxSchema.PhysxParticleIsosurfaceAPI.Apply(particle_system.GetPrim())
# Make sure we're not casting shadows
primVarsApi = lazy.pxr.UsdGeom.PrimvarsAPI(particle_system.GetPrim())
primVarsApi.CreatePrimvar("doNotCastShadows", lazy.pxr.Sdf.ValueTypeNames.Bool).Set(True)
# tweak anisotropy min, max, and scale to work better with isosurface:
if anisotropy:
ani_api = lazy.pxr.PhysxSchema.PhysxParticleAnisotropyAPI.Apply(particle_system.GetPrim())
ani_api.CreateScaleAttr().Set(5.0)
ani_api.CreateMinAttr().Set(1.0) # avoids gaps in surface
ani_api.CreateMaxAttr().Set(2.0)
return particle_system
def bind_material(prim_path, material_path):
"""
Binds material located at @material_path to the prim located at @prim_path.
Args:
prim_path (str): Stage path to prim to bind material to
material_path (str): Stage path to material to be bound
"""
lazy.omni.kit.commands.execute(
"BindMaterialCommand",
prim_path=prim_path,
material_path=material_path,
strength=None,
)
def create_physx_particleset_pointinstancer(
name,
particle_system_path,
physx_particle_system_path,
prototype_prim_paths,
particle_group,
positions,
self_collision=True,
fluid=False,
particle_mass=None,
particle_density=None,
orientations=None,
velocities=None,
angular_velocities=None,
scales=None,
prototype_indices=None,
enabled=True,
):
"""
Creates a particle set instancer based on a UsdGeom.PointInstancer at @prim_path on the current stage, with
the specified parameters.
Args:
name (str): Name for this point instancer
particle_system_path (str): Stage path to particle system (Scope)
physx_particle_system_path (str): Stage path to physx particle system (PhysxParticleSystem)
prototype_prim_paths (list of str): Stage path(s) to the prototypes to reference for this particle set.
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by @self_collision
positions (list of 3-tuple or np.array): Particle (x,y,z) positions either as a list or a (N, 3) numpy array
self_collision (bool): Whether to enable particle-particle collision within the set
(as defined by @particle_group) or not
fluid (bool): Whether to simulated the particle set as fluid or not
particle_mass (None or float): If specified, should be per-particle mass. Otherwise, will be
inferred from @density. Note: Either @particle_mass or @particle_density must be specified!
particle_density (None or float): If specified, should be per-particle density and is used to compute total
point set mass. Otherwise, will be inferred from @density. Note: Either @particle_mass or
@particle_density must be specified!
orientations (None or list of 4-array or np.array): Particle (x,y,z,w) quaternion orientations, either as a
list or a (N, 4) numpy array. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
velocities (None or list of 3-array or np.array): Particle (x,y,z) velocities either as a list or a (N, 3)
numpy array. If not specified, all will be set to 0
angular_velocities (None or list of 3-array or np.array): Particle (x,y,z) angular velocities either as a
list or a (N, 3) numpy array. If not specified, all will be set to 0
scales (None or list of 3-array or np.array): Particle (x,y,z) scales either as a list or a (N, 3)
numpy array. If not specified, all will be set to 1.0
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
enabled (bool): Whether to enable this particle instancer. If not enabled, then no physics will be used
Returns:
UsdGeom.PointInstancer: Created point instancer prim
"""
stage = og.sim.stage
n_particles = len(positions)
particle_system = lazy.omni.isaac.core.utils.prims.get_prim_at_path(physx_particle_system_path)
# Create point instancer scope
prim_path = f"{particle_system_path}/{name}"
assert not stage.GetPrimAtPath(prim_path), f"Cannot create an instancer scope, scope already exists at {prim_path}!"
stage.DefinePrim(prim_path, "Scope")
# Create point instancer
instancer_prim_path = f"{prim_path}/instancer"
assert not stage.GetPrimAtPath(instancer_prim_path), f"Cannot create a PointInstancer prim, prim already exists at {instancer_prim_path}!"
instancer = lazy.pxr.UsdGeom.PointInstancer.Define(stage, instancer_prim_path)
is_isosurface = particle_system.HasAPI(lazy.pxr.PhysxSchema.PhysxParticleIsosurfaceAPI) and \
particle_system.GetAttribute("physxParticleIsosurface:isosurfaceEnabled").Get()
# Add prototype mesh prim paths to the prototypes relationship attribute for this point set
# We need to make copies of prototypes for each instancer currently because particles won't render properly
# if multiple instancers share the same prototypes for some reason
mesh_list = instancer.GetPrototypesRel()
prototype_prims = []
for i, original_path in enumerate(prototype_prim_paths):
prototype_prim_path = f"{prim_path}/prototype{i}"
lazy.omni.kit.commands.execute("CopyPrim", path_from=original_path, path_to=prototype_prim_path)
prototype_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prototype_prim_path)
# Make sure this prim is invisible if we're using isosurface, and vice versa.
imageable = lazy.pxr.UsdGeom.Imageable(prototype_prim)
if is_isosurface:
imageable.MakeInvisible()
else:
imageable.MakeVisible()
# Move the prototype to the graveyard position so that it won't be visible to the agent
# We can't directly hide the prototype because it will also hide all the generated particles (if not isosurface)
prototype_prim.GetAttribute("xformOp:translate").Set(m.PROTOTYPE_GRAVEYARD_POS)
mesh_list.AddTarget(lazy.pxr.Sdf.Path(prototype_prim_path))
prototype_prims.append(prototype_prim)
# Set particle instance default data
prototype_indices = [0] * n_particles if prototype_indices is None else prototype_indices
if orientations is None:
orientations = np.zeros((n_particles, 4))
orientations[:, -1] = 1.0
orientations = np.array(orientations)[:, [3, 0, 1, 2]] # x,y,z,w --> w,x,y,z
velocities = np.zeros((n_particles, 3)) if velocities is None else velocities
angular_velocities = np.zeros((n_particles, 3)) if angular_velocities is None else angular_velocities
scales = np.ones((n_particles, 3)) if scales is None else scales
assert particle_mass is not None or particle_density is not None, \
"Either particle mass or particle density must be specified when creating particle instancer!"
particle_mass = 0.0 if particle_mass is None else particle_mass
particle_density = 0.0 if particle_density is None else particle_density
# Set particle states
instancer.GetProtoIndicesAttr().Set(prototype_indices)
instancer.GetPositionsAttr().Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(positions))
instancer.GetOrientationsAttr().Set(lazy.pxr.Vt.QuathArray.FromNumpy(orientations))
instancer.GetVelocitiesAttr().Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(velocities))
instancer.GetAngularVelocitiesAttr().Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(angular_velocities))
instancer.GetScalesAttr().Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(scales))
# Take a render step to "lock" the visuals of the prototypes at the graveyard position
# This needs to happen AFTER setting particle states
# We suppress a known warning that we have no control over where omni complains about a prototype
# not being populated yet
with suppress_omni_log(channels=["omni.hydra.scene_delegate.plugin"]):
og.sim.render()
# Then we move the prototypes back to zero offset because otherwise all the generated particles will be offset by
# the graveyard position. At this point, the prototypes themselves no longer appear at the zero offset (locked at
# the graveyard position), which is desirable because we don't want the agent to see the prototypes themselves.
for prototype_prim in prototype_prims:
prototype_prim.GetAttribute("xformOp:translate").Set((0.0, 0.0, 0.0))
instancer_prim = instancer.GetPrim()
lazy.omni.physx.scripts.particleUtils.configure_particle_set(
instancer_prim,
physx_particle_system_path,
self_collision,
fluid,
particle_group,
particle_mass * n_particles,
particle_density,
)
# Set whether the instancer is enabled or not
instancer_prim.GetAttribute("physxParticle:particleEnabled").Set(enabled)
# Render three more times to fully propagate changes
# Omni always complains about a low-level USD thing we have no control over
# so we suppress the warnings
with suppress_omni_log(channels=["omni.usd"]):
for i in range(3):
og.sim.render()
# Isosurfaces require an additional physics timestep before they're actually rendered
if is_isosurface:
og.log.warning(f"Creating an instancer that uses isosurface {instancer_prim_path}. "
f"The rendering of these particles will have a delay of one timestep.")
return instancer_prim
def apply_force_at_pos(prim, force, pos):
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(prim.prim_path)
og.sim.psi.apply_force_at_pos(og.sim.stage_id, prim_id, force, pos)
def apply_torque(prim, foward_vect, roll_torque_scalar):
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(prim.prim_path)
og.sim.psi.apply_torque(og.sim.stage_id, prim_id, foward_vect * roll_torque_scalar) | 13,178 | Python | 49.688461 | 142 | 0.711716 |
StanfordVL/OmniGibson/omnigibson/utils/sampling_utils.py | import itertools
from collections import Counter, defaultdict
import numpy as np
import time
import trimesh
from scipy.spatial.transform import Rotation as R
from scipy.stats import truncnorm
import omnigibson as og
from omnigibson.macros import create_module_macros, gm
import omnigibson.utils.transform_utils as T
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEBUG_SAMPLING = False
m.DEFAULT_AABB_OFFSET_FRACTION = 0.02
m.DEFAULT_PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE = 1.0 # Around 60 degrees
m.DEFAULT_HIT_TO_PLANE_THRESHOLD = 0.05
m.DEFAULT_MAX_ANGLE_WITH_Z_AXIS = 3 * np.pi / 4
m.DEFAULT_MAX_SAMPLING_ATTEMPTS = 10
m.DEFAULT_CUBOID_BOTTOM_PADDING = 0.005
# We will cast an additional parallel ray for each additional this much distance.
m.DEFAULT_NEW_RAY_PER_HORIZONTAL_DISTANCE = 0.1
m.DEFAULT_HIT_PROPORTION = 0.8
def fit_plane(points, refusal_log):
"""
Fits a plane to the given 3D points.
Copied from https://stackoverflow.com/a/18968498
Args:
points ((k, 3)-array): np.array of shape (k, 3)
refusal_log (dict): Debugging dictionary to add error messages to
Returns:
2-tuple:
- 3-array: (x,y,z) points' centroid
- 3-array: (x,y,z) normal of the fitted plane
"""
if points.shape[0] < points.shape[1]:
if m.DEBUG_SAMPLING:
refusal_log.append(f"insufficient points to fit a 3D plane: needs 3, has {points.shape[0]}.")
return None, None
ctr = points.mean(axis=0)
x = points - ctr
normal = np.linalg.svd(np.dot(x.T, x))[0][:, -1]
normal /= np.linalg.norm(normal)
return ctr, normal
def check_distance_to_plane(points, plane_centroid, plane_normal, hit_to_plane_threshold, refusal_log):
"""
Calculates whether points are within @hit_to_plane_threshold distance to plane defined by @plane_centroid
and @plane_normal
Args:
points ((k, 3)-array): np.array of shape (k, 3)
plane_centroid (3-array): (x,y,z) points' centroid
plane_normal (3-array): (x,y,z) normal of the fitted plane
hit_to_plane_threshold (float): Threshold distance to check between @points and plane
refusal_log (dict): Debugging dictionary to add error messages to
Returns:
bool: True if all points are within @hit_to_plane_threshold distance to plane, otherwise False
"""
distances = get_distance_to_plane(points, plane_centroid, plane_normal)
if np.any(distances > hit_to_plane_threshold):
if m.DEBUG_SAMPLING:
refusal_log.append("distances to plane: %r" % distances)
return False
return True
def get_distance_to_plane(points, plane_centroid, plane_normal):
"""
Computes distance from @points to plane defined by @plane_centroid and @plane_normal
Args:
points ((k, 3)-array): np.array of shape (k, 3)
plane_centroid (3-array): (x,y,z) points' centroid
plane_normal (3-array): (x,y,z) normal of the fitted plane
Returns:
k-array: Absolute distances from each point to the plane
"""
return np.abs(np.dot(points - plane_centroid, plane_normal))
def get_projection_onto_plane(points, plane_centroid, plane_normal):
"""
Computes @points' projection onto the plane defined by @plane_centroid and @plane_normal
Args:
points ((k, 3)-array): np.array of shape (k, 3)
plane_centroid (3-array): (x,y,z) points' centroid
plane_normal (3-array): (x,y,z) normal of the fitted plane
Returns:
(k,3)-array: Points' positions projected onto the plane
"""
distances_to_plane = get_distance_to_plane(points, plane_centroid, plane_normal)
return points - np.outer(distances_to_plane, plane_normal)
def draw_debug_markers(hit_positions, radius=0.01):
"""
Helper method to generate and place debug markers at @hit_positions
Args:
hit_positions ((n, 3)-array): Desired positions to place markers at
radius (float): Radius of the generated virtual marker
"""
# Import here to avoid circular imports
from omnigibson.objects.primitive_object import PrimitiveObject
color = np.concatenate([np.random.rand(3), [1]])
for vec in hit_positions:
time_str = str(time.time())
cur_time = time_str[(time_str.index(".") + 1):]
obj = PrimitiveObject(
prim_path=f"/World/debug_marker_{cur_time}",
name=f"debug_marker_{cur_time}",
primitive_type="Sphere",
visual_only=True,
rgba=color,
radius=radius,
)
og.sim.import_object(obj)
obj.set_position(vec)
def get_parallel_rays(
source, destination, offset, new_ray_per_horizontal_distance
):
"""
Given an input ray described by a source and a destination, sample parallel rays around it as the center.
The parallel rays start at the corners of a square of edge length `offset` centered on `source`, with the square
orthogonal to the ray direction. That is, the cast rays are the height edges of a square-base cuboid with bases
centered on `source` and `destination`.
Args:
source (3-array): (x,y,z) source of the ray to sample parallel rays of.
destination (3-array): Source of the ray to sample parallel rays of.
offset (float): Orthogonal distance of parallel rays from input ray.
new_ray_per_horizontal_distance (float): Step in offset beyond which an additional split will be applied in the
parallel ray grid (which at minimum is 3x3 at the AABB corners & center).
Returns:
3-tuple:
- list: generated sources from the original ray
- list: generated destinations from the original ray
- (W, H, 3)-array: unflattened, untransformed grid of parallel rays in object coordinates
"""
ray_direction = destination - source
# Get an orthogonal vector using a random vector.
random_vector = np.random.rand(3)
orthogonal_vector_1 = np.cross(ray_direction, random_vector)
orthogonal_vector_1 /= np.linalg.norm(orthogonal_vector_1)
# Get a second vector orthogonal to both the ray and the first vector.
orthogonal_vector_2 = -np.cross(ray_direction, orthogonal_vector_1)
orthogonal_vector_2 /= np.linalg.norm(orthogonal_vector_2)
orthogonal_vectors = np.array([orthogonal_vector_1, orthogonal_vector_2])
assert np.all(np.isfinite(orthogonal_vectors))
# Convert the offset into a 2-vector if it already isn't one.
offset = np.array([1, 1]) * offset
# Compute the grid of rays
steps = (offset / new_ray_per_horizontal_distance).astype(int) * 2 + 1
steps = np.maximum(steps, 3)
x_range = np.linspace(-offset[0], offset[0], steps[0])
y_range = np.linspace(-offset[1], offset[1], steps[1])
ray_grid = np.dstack(np.meshgrid(x_range, y_range, indexing="ij"))
ray_grid_flattened = ray_grid.reshape(-1, 2)
# Apply the grid onto the orthogonal vectors to obtain the rays in the world frame.
sources = [source + np.dot(offsets, orthogonal_vectors) for offsets in ray_grid_flattened]
destinations = [destination + np.dot(offsets, orthogonal_vectors) for offsets in ray_grid_flattened]
return sources, destinations, ray_grid
def sample_origin_positions(mins, maxes, count, bimodal_mean_fraction, bimodal_stdev_fraction, axis_probabilities):
"""
Sample ray casting origin positions with a given distribution.
The way the sampling works is that for each particle, it will sample two coordinates uniformly and one
using a symmetric, bimodal truncated normal distribution. This way, the particles will mostly be close to the faces
of the AABB (given a correctly parameterized bimodal truncated normal) and will be spread across each face,
but there will still be a small number of particles spawned inside the object if it has an interior.
Args:
mins (3-array): the minimum coordinate along each axis.
maxes (3-array): the maximum coordinate along each axis.
count (int): Number of origins to sample.
bimodal_mean_fraction (float): the mean of one side of the symmetric bimodal distribution as a fraction of the
min-max range.
bimodal_stdev_fraction (float): the standard deviation of one side of the symmetric bimodal distribution as a
fraction of the min-max range.
axis_probabilities (3-array): the probability of ray casting along each axis.
Returns:
list: List where each element is (ray cast axis index, bool whether the axis was sampled from the top side,
[x, y, z]) tuples.
"""
assert len(mins.shape) == 1
assert mins.shape == maxes.shape
results = []
for i in range(count):
# Get the uniform sample first.
position = np.random.rand(3)
# Sample the bimodal normal.
bottom = (0 - bimodal_mean_fraction) / bimodal_stdev_fraction
top = (1 - bimodal_mean_fraction) / bimodal_stdev_fraction
bimodal_sample = truncnorm.rvs(bottom, top, loc=bimodal_mean_fraction, scale=bimodal_stdev_fraction)
# Pick which axis the bimodal normal sample should go to.
bimodal_axis = np.random.choice([0, 1, 2], p=axis_probabilities)
# Choose which side of the axis to sample from. We only sample from the top for the Z axis.
if bimodal_axis == 2:
bimodal_axis_top_side = True
else:
bimodal_axis_top_side = np.random.choice([True, False])
# Move sample based on chosen side.
position[bimodal_axis] = bimodal_sample if bimodal_axis_top_side else 1 - bimodal_sample
# Scale the position from the standard normal range to the min-max range.
scaled_position = mins + (maxes - mins) * position
# Save the result.
results.append((bimodal_axis, bimodal_axis_top_side, scaled_position))
return results
def raytest_batch(start_points, end_points, only_closest=True, ignore_bodies=None, ignore_collisions=None, callback=None):
"""
Computes raytest collisions for a set of rays cast from @start_points to @end_points.
Args:
start_points (list of 3-array): Array of start locations to cast rays, where each is (x,y,z) global
start location of the ray
end_points (list of 3-array): Array of end locations to cast rays, where each is (x,y,z) global
end location of the ray
only_closest (bool): Whether we report the first (closest) hit from the ray or grab all hits
ignore_bodies (None or list of str): If specified, specifies absolute USD paths to rigid bodies
whose collisions should be ignored
ignore_collisions (None or list of str): If specified, specifies absolute USD paths to collision geoms
whose collisions should be ignored
callback (None or function): If specified and @only_closest is False, the custom callback to use per-hit.
This can be efficient if raytests are meant to terminate early. If None, no custom callback will be used.
Expected signature is callback(hit) -> bool, which returns True if the raycast should continue or not
Returns:
list of dict or list of list of dict: Results for all rays, where each entry corresponds to the result for the
ith ray cast. If @only_closest=True, each entry in the list is the closest hit. Otherwise, each entry is
its own (unordered) list of hits for that ray. Each dict is composed of:
"hit" (bool): Whether an object was hit or not
"position" (3-array): Location of the hit position
"normal" (3-array): normal vector of the face hit
"distance" (float): distance from @start_point the hit occurred
"collision" (str): absolute USD path to the collision body hit
"rigidBody" (str): absolute USD path to the associated rigid body hit
Note that only "hit" = False exists in the dict if no hit was found
"""
# For now, we do a naive for loop over individual raytests until a better API comes out
results = []
for start_point, end_point in zip(start_points, end_points):
results.append(raytest(
start_point=start_point,
end_point=end_point,
only_closest=only_closest,
ignore_bodies=ignore_bodies,
ignore_collisions=ignore_collisions,
callback=callback,
))
return results
def raytest(
start_point,
end_point,
only_closest=True,
ignore_bodies=None,
ignore_collisions=None,
callback=None,
):
"""
Computes raytest collision for ray cast from @start_point to @end_point
Args:
start_point (3-array): (x,y,z) global start location of the ray
end_point (3-array): (x,y,z) global end location of the ray
only_closest (bool): Whether we report the first (closest) hit from the ray or grab all hits
ignore_bodies (None or list of str): If specified, specifies absolute USD paths to rigid bodies
whose collisions should be ignored
ignore_collisions (None or list of str): If specified, specifies absolute USD paths to collision geoms
whose collisions should be ignored
callback (None or function): If specified and @only_closest is False, the custom callback to use per-hit.
This can be efficient if raytests are meant to terminate early. If None, no custom callback will be used.
Expected signature is callback(hit) -> bool, which returns True if the raycast should continue or not
Returns:
dict or list of dict: Results for this raytest. If @only_closest=True, then we only return the information from
the closest hit. Otherwise, we return an (unordered) list of information for all hits encountered.
Each dict is composed of:
"hit" (bool): Whether an object was hit or not
"position" (3-array): Location of the hit position
"normal" (3-array): normal vector of the face hit
"distance" (float): distance from @start_point the hit occurred
"collision" (str): absolute USD path to the collision body hit
"rigidBody" (str): absolute USD path to the associated rigid body hit
Note that only "hit" = False exists in the dict if no hit was found
"""
# Make sure start point, end point are numpy arrays
start_point, end_point = np.array(start_point), np.array(end_point)
point_diff = end_point - start_point
distance = np.linalg.norm(point_diff)
direction = point_diff / distance
# For efficiency's sake, we handle special case of no ignore_bodies, ignore_collisions, and closest_hit
if only_closest and ignore_bodies is None and ignore_collisions is None:
return og.sim.psqi.raycast_closest(
origin=start_point,
dir=direction,
distance=distance,
)
else:
# Compose callback function for finding raycasts
hits = []
ignore_bodies = set() if ignore_bodies is None else set(ignore_bodies)
ignore_collisions = set() if ignore_collisions is None else set(ignore_collisions)
def hit_callback(hit):
# Only add to hits if we're not ignoring this body or collision
if hit.rigid_body not in ignore_bodies and hit.collision not in ignore_collisions:
hits.append({
"hit": True,
"position": np.array(hit.position),
"normal": np.array(hit.normal),
"distance": hit.distance,
"collision": hit.collision,
"rigidBody": hit.rigid_body,
})
# We always want to continue traversing to collect all hits
return True if callback is None else callback(hit)
# Grab all collisions
og.sim.psqi.raycast_all(
origin=start_point,
dir=direction,
distance=distance,
reportFn=hit_callback,
)
# If we only want the closest, we need to sort these hits, otherwise we return them all
if only_closest:
# Return the empty hit dictionary if our ray did not hit anything, otherwise we return the closest
return {"hit": False} if len(hits) == 0 else sorted(hits, key=lambda hit: hit["distance"])[0]
else:
# Return all hits (list)
return hits
def sample_raytest_start_end_symmetric_bimodal_distribution(
obj,
num_samples,
bimodal_mean_fraction,
bimodal_stdev_fraction,
axis_probabilities,
aabb_offset=None,
aabb_offset_fraction=m.DEFAULT_AABB_OFFSET_FRACTION,
max_sampling_attempts=m.DEFAULT_MAX_SAMPLING_ATTEMPTS,
):
"""
Sample the start points and end points around a given object by a symmetric bimodal distribution
obj (DatasetObject): The object to sample points on.
num_samples (int): the number of points to try to sample.
bimodal_mean_fraction (float): the mean of one side of the symmetric bimodal distribution as a fraction of the
min-max range.
bimodal_stdev_fraction (float): the standard deviation of one side of the symmetric bimodal distribution as a
fraction of the min-max range.
axis_probabilities (3-array): probability of ray casting along each axis.
aabb_offset (None or float or 3-array): padding for AABB to initiate ray-testing, in absolute units. If specified,
will override @aabb_offset_fraction
aabb_offset_fraction (float or 3-array): padding for AABB to initiate ray-testing, as a fraction of overall AABB.
max_sampling_attempts (int): how many times sampling will be attempted for each requested point.
Returns:
2-tuple:
- (n, s, 3)-array: (num_samples, max_sampling_attempts, 3) shaped array representing the start points for
raycasting defined in the world frame
- (n, s, 3)-array: (num_samples, max_sampling_attempts, 3) shaped array representing the end points for
raycasting defined in the world frame
"""
bbox_center, bbox_orn, bbox_bf_extent, _ = obj.get_base_aligned_bbox(xy_aligned=True)
aabb_offset = aabb_offset_fraction * bbox_bf_extent if aabb_offset is None else aabb_offset
half_extent_with_offset = (bbox_bf_extent / 2) + aabb_offset
start_points = np.zeros((num_samples, max_sampling_attempts, 3))
end_points = np.zeros((num_samples, max_sampling_attempts, 3))
for i in range(num_samples):
# Sample the starting positions in advance.
# TODO: Narrow down the sampling domain so that we don't sample scenarios where the center is in-domain but the
# full extent isn't. Currently a lot of samples are being wasted because of this.
samples = sample_origin_positions(
-half_extent_with_offset,
half_extent_with_offset,
max_sampling_attempts,
bimodal_mean_fraction,
bimodal_stdev_fraction,
axis_probabilities,
)
# Try each sampled position in the AABB.
for j, (axis, is_top, start_point) in enumerate(samples):
# Compute the ray's destination using the sampling & AABB information.
end_point = compute_ray_destination(
axis, is_top, start_point, -half_extent_with_offset, half_extent_with_offset
)
start_points[i][j] = start_point
end_points[i][j] = end_point
# Convert the points into the world frame
orig_shape = start_points.shape
to_wf_transform = T.pose2mat((bbox_center, bbox_orn))
start_points = trimesh.transformations.transform_points(start_points.reshape(-1, 3), to_wf_transform).reshape(orig_shape)
end_points = trimesh.transformations.transform_points(end_points.reshape(-1, 3), to_wf_transform).reshape(orig_shape)
return start_points, end_points
def sample_raytest_start_end_full_grid_topdown(
obj,
ray_spacing,
aabb_offset=None,
aabb_offset_fraction=m.DEFAULT_AABB_OFFSET_FRACTION,
):
"""
Sample the start points and end points around a given object by a dense grid from top down.
Args:
obj (DatasetObject): The object to sample points on.
ray_spacing (float): spacing between the rays, or equivalently, size of the grid cell
aabb_offset (None or float or 3-array): padding for AABB to initiate ray-testing, in absolute units. If specified,
will override @aabb_offset_fraction
aabb_offset_fraction (float or 3-array): padding for AABB to initiate ray-testing, as a fraction of overall AABB.
Returns:
2-tuple:
- (n, s, 3)-array: (num_samples, max_sampling_attempts, 3) shaped array representing the start points for
raycasting defined in the world frame
- (n, s, 3)-array: (num_samples, max_sampling_attempts, 3) shaped array representing the end points for
raycasting defined in the world frame
"""
bbox_center, bbox_orn, bbox_bf_extent, _ = obj.get_base_aligned_bbox(xy_aligned=True)
aabb_offset = aabb_offset_fraction * bbox_bf_extent if aabb_offset is None else aabb_offset
half_extent_with_offset = (bbox_bf_extent / 2) + aabb_offset
x = np.linspace(-half_extent_with_offset[0], half_extent_with_offset[0], int(half_extent_with_offset[0] * 2 / ray_spacing) + 1)
y = np.linspace(-half_extent_with_offset[1], half_extent_with_offset[1], int(half_extent_with_offset[1] * 2 / ray_spacing) + 1)
n_rays = len(x) * len(y)
start_points = np.stack([
np.tile(x, len(y)),
np.repeat(y, len(x)),
np.ones(n_rays) * half_extent_with_offset[2],
]).T
end_points = np.copy(start_points)
end_points[:, 2] = -half_extent_with_offset[2]
# Convert the points into the world frame
to_wf_transform = T.pose2mat((bbox_center, bbox_orn))
start_points = trimesh.transformations.transform_points(start_points, to_wf_transform)
end_points = trimesh.transformations.transform_points(end_points, to_wf_transform)
start_points = np.expand_dims(start_points, axis=1)
end_points = np.expand_dims(end_points, axis=1)
return start_points, end_points
def sample_cuboid_on_object_symmetric_bimodal_distribution(
obj,
num_samples,
cuboid_dimensions,
bimodal_mean_fraction,
bimodal_stdev_fraction,
axis_probabilities,
new_ray_per_horizontal_distance=m.DEFAULT_NEW_RAY_PER_HORIZONTAL_DISTANCE,
hit_proportion=m.DEFAULT_HIT_PROPORTION,
aabb_offset=None,
aabb_offset_fraction=m.DEFAULT_AABB_OFFSET_FRACTION,
max_sampling_attempts=m.DEFAULT_MAX_SAMPLING_ATTEMPTS,
max_angle_with_z_axis=m.DEFAULT_MAX_ANGLE_WITH_Z_AXIS,
parallel_ray_normal_angle_tolerance=m.DEFAULT_PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE,
hit_to_plane_threshold=m.DEFAULT_HIT_TO_PLANE_THRESHOLD,
cuboid_bottom_padding=m.DEFAULT_CUBOID_BOTTOM_PADDING,
undo_cuboid_bottom_padding=True,
verify_cuboid_empty=True,
refuse_downwards=False,
):
"""
Samples points on an object's surface using ray casting.
Rays are sampled with a symmetric bimodal distribution.
Args:
obj (DatasetObject): The object to sample points on.
num_samples (int): the number of points to try to sample.
cuboid_dimensions ((n, 3)-array): Float sequence of len 3, the size of the empty cuboid we are trying to sample.
Can also provide list of cuboid dimension triplets in which case each i'th sample will be sampled using
the i'th triplet. Alternatively, cuboid_dimensions can be set to be all zeros if the user just want to
sample points (instead of cuboids) for significantly better performance. This applies when the user wants
to sample very small particles.
bimodal_mean_fraction (float): the mean of one side of the symmetric bimodal distribution as a fraction of the
min-max range.
bimodal_stdev_fraction (float): the standard deviation of one side of the symmetric bimodal distribution as a
fraction of the min-max range.
axis_probabilities (3-array): the probability of ray casting along each axis.
new_ray_per_horizontal_distance (float): per this distance of the cuboid dimension, increase the grid size of
the parallel ray-testing by 1. This controls how fine-grained the grid ray-casting should be with respect to
the size of the sampled cuboid.
hit_proportion (float): the minimum percentage of the hits required across the grid.
aabb_offset (None or float or 3-array): padding for AABB to initiate ray-testing, in absolute units. If specified,
will override @aabb_offset_fraction
aabb_offset_fraction (float or 3-array): padding for AABB to initiate ray-testing, as a fraction of overall AABB.
max_sampling_attempts (int): how many times sampling will be attempted for each requested point.
max_angle_with_z_axis (float): maximum angle between hit normal and positive Z axis allowed. Can be used to
disallow downward-facing hits when refuse_downwards=True.
parallel_ray_normal_angle_tolerance (float): maximum angle difference between the normal of the center hit
and the normal of other hits allowed.
hit_to_plane_threshold (float): how far any given hit position can be from the least-squares fit plane to
all of the hit positions before the sample is rejected.
cuboid_bottom_padding (float): additional padding applied to the bottom of the cuboid. This is needed for the
emptiness check (@check_cuboid_empty) within the cuboid. un_padding=True can be set if the user wants to remove
the padding after the emptiness check.
undo_cuboid_bottom_padding (bool): Whether the bottom padding that's applied to the cuboid should be removed before return.
Useful when the cuboid needs to be flush with the surface for whatever reason. Note that the padding will still
be applied initially (since it's not possible to do the cuboid emptiness check without doing this - otherwise
the rays will hit the sampled-on object), so the emptiness check still checks a padded cuboid. This flag will
simply make the sampler undo the padding prior to returning.
verify_cuboid_empty (bool): Whether to filter out sampled cuboid locations that are not collision-free. Note
that this check will only potentially occur if nonzero cuboid dimensions are specified.
refuse_downwards (bool): whether downward-facing hits (as defined by max_angle_with_z_axis) are allowed.
Returns:
list of tuple: list of length num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
filled if the m.DEBUG_SAMPLING flag is globally set to True.
"""
start_points, end_points = sample_raytest_start_end_symmetric_bimodal_distribution(
obj,
num_samples,
bimodal_mean_fraction,
bimodal_stdev_fraction,
axis_probabilities,
aabb_offset=aabb_offset,
aabb_offset_fraction=aabb_offset_fraction,
max_sampling_attempts=max_sampling_attempts,
)
return sample_cuboid_on_object(
obj,
start_points,
end_points,
cuboid_dimensions,
new_ray_per_horizontal_distance=new_ray_per_horizontal_distance,
hit_proportion=hit_proportion,
max_angle_with_z_axis=max_angle_with_z_axis,
parallel_ray_normal_angle_tolerance=parallel_ray_normal_angle_tolerance,
hit_to_plane_threshold=hit_to_plane_threshold,
cuboid_bottom_padding=cuboid_bottom_padding,
undo_cuboid_bottom_padding=undo_cuboid_bottom_padding,
verify_cuboid_empty=verify_cuboid_empty,
refuse_downwards=refuse_downwards,
)
def sample_cuboid_on_object_full_grid_topdown(
obj,
ray_spacing,
cuboid_dimensions,
new_ray_per_horizontal_distance=m.DEFAULT_NEW_RAY_PER_HORIZONTAL_DISTANCE,
hit_proportion=m.DEFAULT_HIT_PROPORTION,
aabb_offset=None,
aabb_offset_fraction=m.DEFAULT_AABB_OFFSET_FRACTION,
max_angle_with_z_axis=m.DEFAULT_MAX_ANGLE_WITH_Z_AXIS,
parallel_ray_normal_angle_tolerance=m.DEFAULT_PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE,
hit_to_plane_threshold=m.DEFAULT_HIT_TO_PLANE_THRESHOLD,
cuboid_bottom_padding=m.DEFAULT_CUBOID_BOTTOM_PADDING,
undo_cuboid_bottom_padding=True,
verify_cuboid_empty=True,
refuse_downwards=False,
):
"""
Samples points on an object's surface using ray casting.
Rays are sampled with a dense grid from top down.
Args:
obj (DatasetObject): The object to sample points on.
ray_spacing (float): spacing between the rays, or equivalently, size of the grid cell, when sampling the
start and end points. This implicitly determines the number of cuboids that will be sampled.
cuboid_dimensions ((n, 3)-array): Float sequence of len 3, the size of the empty cuboid we are trying to sample.
Can also provide list of cuboid dimension triplets in which case each i'th sample will be sampled using
the i'th triplet. Alternatively, cuboid_dimensions can be set to be all zeros if the user just want to
sample points (instead of cuboids) for significantly better performance. This applies when the user wants
to sample very small particles.
new_ray_per_horizontal_distance (float): per this distance of the cuboid dimension, increase the grid size of
the parallel ray-testing by 1. This controls how fine-grained the grid ray-casting should be with respect to
the size of the sampled cuboid.
hit_proportion (float): the minimum percentage of the hits required across the grid.
aabb_offset (None or float or 3-array): padding for AABB to initiate ray-testing, in absolute units. If specified,
will override @aabb_offset_fraction
aabb_offset_fraction (float or 3-array): padding for AABB to initiate ray-testing, as a fraction of overall AABB.
max_angle_with_z_axis (float): maximum angle between hit normal and positive Z axis allowed. Can be used to
disallow downward-facing hits when refuse_downwards=True.
parallel_ray_normal_angle_tolerance (float): maximum angle difference between the normal of the center hit
and the normal of other hits allowed.
hit_to_plane_threshold (float): how far any given hit position can be from the least-squares fit plane to
all of the hit positions before the sample is rejected.
cuboid_bottom_padding (float): additional padding applied to the bottom of the cuboid. This is needed for the
emptiness check (@check_cuboid_empty) within the cuboid. un_padding=True can be set if the user wants to remove
the padding after the emptiness check.
undo_cuboid_bottom_padding (bool): Whether the bottom padding that's applied to the cuboid should be removed before return.
Useful when the cuboid needs to be flush with the surface for whatever reason. Note that the padding will still
be applied initially (since it's not possible to do the cuboid emptiness check without doing this - otherwise
the rays will hit the sampled-on object), so the emptiness check still checks a padded cuboid. This flag will
simply make the sampler undo the padding prior to returning.
verify_cuboid_empty (bool): Whether to filter out sampled cuboid locations that are not collision-free. Note
that this check will only potentially occur if nonzero cuboid dimensions are specified.
refuse_downwards (bool): whether downward-facing hits (as defined by max_angle_with_z_axis) are allowed.
Returns:
list of tuple: list of length num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
filled if the m.DEBUG_SAMPLING flag is globally set to True.
"""
start_points, end_points = sample_raytest_start_end_full_grid_topdown(
obj,
ray_spacing,
aabb_offset=aabb_offset,
aabb_offset_fraction=aabb_offset_fraction,
)
return sample_cuboid_on_object(
obj,
start_points,
end_points,
cuboid_dimensions,
new_ray_per_horizontal_distance=new_ray_per_horizontal_distance,
hit_proportion=hit_proportion,
max_angle_with_z_axis=max_angle_with_z_axis,
parallel_ray_normal_angle_tolerance=parallel_ray_normal_angle_tolerance,
hit_to_plane_threshold=hit_to_plane_threshold,
cuboid_bottom_padding=cuboid_bottom_padding,
undo_cuboid_bottom_padding=undo_cuboid_bottom_padding,
verify_cuboid_empty=verify_cuboid_empty,
refuse_downwards=refuse_downwards,
)
def sample_cuboid_on_object(
obj,
start_points,
end_points,
cuboid_dimensions,
ignore_objs=None,
new_ray_per_horizontal_distance=m.DEFAULT_NEW_RAY_PER_HORIZONTAL_DISTANCE,
hit_proportion=m.DEFAULT_HIT_PROPORTION,
max_angle_with_z_axis=m.DEFAULT_MAX_ANGLE_WITH_Z_AXIS,
parallel_ray_normal_angle_tolerance=m.DEFAULT_PARALLEL_RAY_NORMAL_ANGLE_TOLERANCE,
hit_to_plane_threshold=m.DEFAULT_HIT_TO_PLANE_THRESHOLD,
cuboid_bottom_padding=m.DEFAULT_CUBOID_BOTTOM_PADDING,
undo_cuboid_bottom_padding=True,
verify_cuboid_empty=True,
refuse_downwards=False,
):
"""
Samples points on an object's surface using ray casting.
Args:
obj (DatasetObject): The object to sample points on.
start_points ((n, s, 3)-array): (num_samples, max_sampling_attempts, 3) shaped array representing the start points for
raycasting defined in the world frame
end_points ((n, s, 3)-array): (num_samples, max_sampling_attempts, 3) shaped array representing the end points for
raycasting defined in the world frame
cuboid_dimensions ((n, 3)-array): Float sequence of len 3, the size of the empty cuboid we are trying to sample.
Can also provide list of cuboid dimension triplets in which case each i'th sample will be sampled using
the i'th triplet. Alternatively, cuboid_dimensions can be set to be all zeros if the user just want to
sample points (instead of cuboids) for significantly better performance. This applies when the user wants
to sample very small particles.
ignore_objs (None or list of EntityPrim): If @obj is None, this can be used to filter objects when checking
for valid cuboid locations. Any sampled rays that hit an object in @ignore_objs will be ignored. If None,
no filtering will be used
new_ray_per_horizontal_distance (float): per this distance of the cuboid dimension, increase the grid size of
the parallel ray-testing by 1. This controls how fine-grained the grid ray-casting should be with respect to
the size of the sampled cuboid.
hit_proportion (float): the minimum percentage of the hits required across the grid.
max_angle_with_z_axis (float): maximum angle between hit normal and positive Z axis allowed. Can be used to
disallow downward-facing hits when refuse_downwards=True.
parallel_ray_normal_angle_tolerance (float): maximum angle difference between the normal of the center hit
and the normal of other hits allowed.
hit_to_plane_threshold (float): how far any given hit position can be from the least-squares fit plane to
all of the hit positions before the sample is rejected.
cuboid_bottom_padding (float): additional padding applied to the bottom of the cuboid. This is needed for the
emptiness check (@check_cuboid_empty) within the cuboid. un_padding=True can be set if the user wants to remove
the padding after the emptiness check.
undo_cuboid_bottom_padding (bool): Whether the bottom padding that's applied to the cuboid should be removed before return.
Useful when the cuboid needs to be flush with the surface for whatever reason. Note that the padding will still
be applied initially (since it's not possible to do the cuboid emptiness check without doing this - otherwise
the rays will hit the sampled-on object), so the emptiness check still checks a padded cuboid. This flag will
simply make the sampler undo the padding prior to returning.
verify_cuboid_empty (bool): Whether to filter out sampled cuboid locations that are not collision-free. Note
that this check will only potentially occur if nonzero cuboid dimensions are specified.
refuse_downwards (bool): whether downward-facing hits (as defined by max_angle_with_z_axis) are allowed.
Returns:
list of tuple: list of length num_samples elements where each element is a tuple in the form of
(cuboid_centroid, cuboid_up_vector, cuboid_rotation, {refusal_reason: [refusal_details...]}). Cuboid positions
are set to None when no successful sampling happens within the max number of attempts. Refusal details are only
filled if the m.DEBUG_SAMPLING flag is globally set to True.
"""
assert start_points.shape == end_points.shape, \
"the start and end points of raycasting are expected to have the same shape."
num_samples = start_points.shape[0]
cuboid_dimensions = np.array(cuboid_dimensions)
if np.any(cuboid_dimensions > 50.0):
log.warning("WARNING: Trying to sample for a very large cuboid (at least one dimensions > 50). "
"Terminating immediately, no hits will be registered.")
return [(None, None, None, None, defaultdict(list)) for _ in range(num_samples)]
assert cuboid_dimensions.ndim <= 2
assert cuboid_dimensions.shape[-1] == 3, "Cuboid dimensions need to contain all three dimensions."
if cuboid_dimensions.ndim == 2:
assert cuboid_dimensions.shape[0] == num_samples, "Need as many offsets as samples requested."
results = [(None, None, None, None, defaultdict(list)) for _ in range(num_samples)]
rigid_bodies = None if obj is None else {link.prim_path for link in obj.links.values()}
ignore_rigid_bodies = None if ignore_objs is None else \
{link.prim_path for ignore_obj in ignore_objs for link in ignore_obj.links.values()}
for i in range(num_samples):
refusal_reasons = results[i][4]
# Try each sampled position in the AABB.
for start_pos, end_pos in zip(start_points[i], end_points[i]):
# If we have a list of cuboid dimensions, pick the one that corresponds to this particular sample.
this_cuboid_dimensions = cuboid_dimensions if cuboid_dimensions.ndim == 1 else cuboid_dimensions[i]
zero_cuboid_dimension = (this_cuboid_dimensions == 0.0).all()
if not zero_cuboid_dimension:
# Make sure we have valid (nonzero) x and y values
assert (this_cuboid_dimensions[:-1] > 0).all(), \
f"Cuboid x and y dimensions must not be zero if z dimension is nonzero! Got: {this_cuboid_dimensions}"
# Obtain the parallel rays using the direction sampling method.
sources, destinations, grid = get_parallel_rays(
start_pos, end_pos, this_cuboid_dimensions[:2] / 2.0, new_ray_per_horizontal_distance,
)
sources = np.array(sources)
destinations = np.array(destinations)
else:
sources = np.array([start_pos])
destinations = np.array([end_pos])
# Time to cast the rays.
cast_results = raytest_batch(start_points=sources, end_points=destinations, ignore_bodies=ignore_rigid_bodies)
# Check whether sufficient number of rays hit the object
hits = check_rays_hit_object(
cast_results, hit_proportion, refusal_reasons["missed_object"], rigid_bodies)
if hits is None:
continue
center_idx = int(len(hits) / 2)
# Only consider objects whose center idx has a ray hit
if not hits[center_idx]:
continue
filtered_cast_results = []
filtered_center_idx = None
for idx, hit in enumerate(hits):
if hit:
filtered_cast_results.append(cast_results[idx])
if idx == center_idx:
filtered_center_idx = len(filtered_cast_results) - 1
# Process the hit positions and normals.
hit_positions = np.array([ray_res["position"] for ray_res in filtered_cast_results])
hit_normals = np.array([ray_res["normal"] for ray_res in filtered_cast_results])
hit_normals /= np.linalg.norm(hit_normals, axis=1, keepdims=True)
assert filtered_center_idx is not None
hit_link = filtered_cast_results[filtered_center_idx]["rigidBody"]
center_hit_pos = hit_positions[filtered_center_idx]
center_hit_normal = hit_normals[filtered_center_idx]
# Reject anything facing more than 45deg downwards if requested.
if refuse_downwards:
if not check_hit_max_angle_from_z_axis(
center_hit_normal, max_angle_with_z_axis, refusal_reasons["downward_normal"]
):
continue
# Check that none of the parallel rays' hit normal differs from center ray by more than threshold.
if not zero_cuboid_dimension:
if not check_normal_similarity(center_hit_normal, hit_normals, parallel_ray_normal_angle_tolerance, refusal_reasons["hit_normal_similarity"]):
continue
# Fit a plane to the points.
plane_centroid, plane_normal = fit_plane(hit_positions, refusal_reasons["fit_plane"])
if plane_centroid is None:
continue
# The fit_plane normal can be facing either direction on the normal axis, but we want it to face away from
# the object for purposes of normal checking and padding. To do this:
# We get a vector from the centroid towards the center ray source, and flip the plane normal to match it.
# The cosine has positive sign if the two vectors are similar and a negative one if not.
plane_to_source = sources[center_idx] - plane_centroid
plane_normal *= np.sign(np.dot(plane_to_source, plane_normal))
# Check that the plane normal is similar to the hit normal
if not check_normal_similarity(
center_hit_normal, plane_normal[None, :], parallel_ray_normal_angle_tolerance, refusal_reasons["plane_normal_similarity"]
):
continue
# Check that the points are all within some acceptable distance of the plane.
if not check_distance_to_plane(
hit_positions, plane_centroid, plane_normal, hit_to_plane_threshold, refusal_reasons["dist_to_plane"]
):
continue
# Get projection of the base onto the plane, fit a rotation, and compute the new center hit / corners.
hit_positions = np.array([ray_res.get("position", np.zeros(3)) for ray_res in cast_results])
projected_hits = get_projection_onto_plane(hit_positions, plane_centroid, plane_normal)
padding = cuboid_bottom_padding * plane_normal
projected_hits += padding
center_projected_hit = projected_hits[center_idx]
cuboid_centroid = center_projected_hit + plane_normal * this_cuboid_dimensions[2] / 2.0
rotation = compute_rotation_from_grid_sample(
grid, projected_hits, cuboid_centroid, this_cuboid_dimensions,
hits, refusal_reasons["rotation_not_computable"])
# Make sure there are enough hit points that can be used for alignment to find the rotation
if rotation is None:
continue
corner_positions = cuboid_centroid[None, :] + (
rotation.apply(
0.5
* this_cuboid_dimensions
* np.array(
[
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
]
)
)
)
# Now we use the cuboid's diagonals to check that the cuboid is actually empty
if verify_cuboid_empty and not check_cuboid_empty(
plane_normal,
corner_positions,
this_cuboid_dimensions,
refusal_reasons["cuboid_not_empty"],
):
continue
if undo_cuboid_bottom_padding:
cuboid_centroid -= padding
else:
cuboid_centroid = center_hit_pos
if not undo_cuboid_bottom_padding:
padding = cuboid_bottom_padding * center_hit_normal
cuboid_centroid += padding
plane_normal = np.zeros(3)
rotation = R.from_quat([0, 0, 0, 1])
# We've found a nice attachment point. Continue onto next point to sample.
results[i] = (cuboid_centroid, plane_normal, rotation.as_quat(), hit_link, refusal_reasons)
break
if m.DEBUG_SAMPLING:
og.log.debug("Sampling rejection reasons:")
counter = Counter()
for instance in results:
for reason, refusals in instance[-1].items():
counter[reason] += len(refusals)
og.log.debug("\n".join("%s: %d" % pair for pair in counter.items()))
return results
def compute_rotation_from_grid_sample(two_d_grid, projected_hits, cuboid_centroid, this_cuboid_dimensions, hits, refusal_log):
"""
Computes
Args:
two_d_grid (n, 2): (x,y) raycast origin points in the local plane frame
projected_hits ((k,3)-array): Points' positions projected onto the plane generated
cuboid_centroid (3-array): (x,y,z) sampled position of the hit cuboid centroid in the global frame
this_cuboid_dimensions (3-array): (x,y,z) size of cuboid being sampled from the grid
hits (list of bool): whether each point from @two_d_grid is a valid hit or not
refusal_log (dict): Dictionary to write debugging and log information to
Returns:
None or scipy.Rotation: If successfully hit, returns relative rotation from two_d_grid to
generated hit plane. Otherwise, returns None
"""
if np.sum(hits) < 3:
if m.DEBUG_SAMPLING:
refusal_log.append(f"insufficient hits to compute the rotation of the grid: needs 3, has {np.sum(hits)}")
return None
grid_in_planar_coordinates = two_d_grid.reshape(-1, 2)
grid_in_planar_coordinates = grid_in_planar_coordinates[hits]
grid_in_object_coordinates = np.zeros((len(grid_in_planar_coordinates), 3))
grid_in_object_coordinates[:, :2] = grid_in_planar_coordinates
grid_in_object_coordinates[:, 2] = -this_cuboid_dimensions[2] / 2.0
projected_hits = projected_hits[hits]
sampled_grid_relative_vectors = projected_hits - cuboid_centroid
rotation, _ = R.align_vectors(sampled_grid_relative_vectors, grid_in_object_coordinates)
return rotation
def check_normal_similarity(center_hit_normal, hit_normals, tolerance, refusal_log):
"""
Check whether the normals from @hit_normals are within some @tolerance of @center_hit_normal.
Args:
center_hit_normal (3-array): normal of the center hit point
hit_normals ((n, 3)-array): normals of all the hit points
tolerance (float): Acceptable deviation between the center hit normal and all normals
refusal_log (dict): Dictionary to write debugging and log information to
Returns:
bool: Whether the normal similarity is acceptable or not
"""
parallel_hit_main_hit_dot_products = np.clip(
np.dot(hit_normals, center_hit_normal)
/ (np.linalg.norm(hit_normals, axis=1) * np.linalg.norm(center_hit_normal)),
-1.0,
1.0,
)
parallel_hit_normal_angles_to_hit_normal = np.arccos(parallel_hit_main_hit_dot_products)
all_rays_hit_with_similar_normal = np.all(
parallel_hit_normal_angles_to_hit_normal < tolerance
)
if not all_rays_hit_with_similar_normal:
if m.DEBUG_SAMPLING:
refusal_log.append("angles %r" % (np.rad2deg(parallel_hit_normal_angles_to_hit_normal),))
return False
return True
def check_rays_hit_object(cast_results, threshold, refusal_log, body_names=None):
"""
Checks whether rays hit a specific object, as specified by a list of @body_names
Args:
cast_results (list of dict): Output from raycast_batch.
threshold (float): Relative ratio in [0, 1] specifying proportion of rays from @cast_results are
required to hit @body_names to count as the object being hit
refusal_log (list of str): Logging array for adding debug logs
body_names (None or list or set of str): absolute USD paths to rigid bodies to check for hit. If not
specified, then any valid hit will be accepted
Returns:
None or list of bool: Individual T/F for each ray -- whether it hit the object or not
"""
body_names = None if body_names is None else set(body_names)
ray_hits = [
ray_res["hit"] and
(body_names is None or ray_res["rigidBody"] in body_names)
for ray_res in cast_results
]
if sum(ray_hits) / len(cast_results) < threshold:
if m.DEBUG_SAMPLING:
refusal_log.append(f"{sum(ray_hits)} / {len(cast_results)} < {threshold} hits: {[ray_res['rigidBody'] for ray_res in cast_results if ray_res['hit']]}")
return None
return ray_hits
def check_hit_max_angle_from_z_axis(hit_normal, max_angle_with_z_axis, refusal_log):
"""
Check whether the normal @hit_normal deviates from the global z axis by more than @max_angle_with_z_axis
Args:
hit_normal (3-array): Normal vector to check with respect to global z-axis
max_angle_with_z_axis (float): Maximum acceptable angle between the global z-axis and @hit_normal
refusal_log (list of str): Logging array for adding debug logs
Returns:
bool: True if the angle between @hit_normal and the global z-axis is less than @max_angle_with_z_axis,
otherwise False
"""
hit_angle_with_z = np.arccos(np.clip(np.dot(hit_normal, np.array([0, 0, 1])), -1.0, 1.0))
if hit_angle_with_z > max_angle_with_z_axis:
if m.DEBUG_SAMPLING:
refusal_log.append("normal %r" % hit_normal)
return False
return True
def compute_ray_destination(axis, is_top, start_pos, aabb_min, aabb_max):
"""
Compute the point on the AABB defined by @aabb_min and @aabb_max from shooting a ray at @start_pos
in the direction defined by global axis @axis and @is_top
Args:
axis (int): Which direction to compute the ray destination. Valid options are {0, 1, 2} -- the
x, y, or z axes
is_top (bool): Whether to shoot in the positive or negative @axis direction
aabb_min (3-array): (x,y,z) position defining the lower corner of the AABB
aabb_max (3-array): (x,y,z) position defining the upper corner of the AABB
Returns:
3-array: computed (x,y,z) point on the AABB surface
"""
# Get the ray casting direction - we want to do it parallel to the sample axis.
ray_direction = np.array([0, 0, 0])
ray_direction[axis] = 1
ray_direction *= -1 if is_top else 1
# We want to extend our ray until it intersects one of the AABB's faces.
# Start by getting the distances towards the min and max boundaries of the AABB on each axis.
point_to_min = aabb_min - start_pos
point_to_max = aabb_max - start_pos
# Then choose the distance to the point in the correct direction on each axis.
closer_point_on_each_axis = np.where(ray_direction < 0, point_to_min, point_to_max)
# For each axis, find how many times the ray direction should be multiplied to reach the AABB's boundary.
multiple_to_face_on_each_axis = closer_point_on_each_axis / ray_direction
# Choose the minimum of these multiples, e.g. how many times the ray direction should be multiplied
# to reach the nearest boundary.
multiple_to_face = np.min(multiple_to_face_on_each_axis[np.isfinite(multiple_to_face_on_each_axis)])
# Finally, use the multiple we found to calculate the point on the AABB boundary that we want to cast our
# ray until.
point_on_face = start_pos + ray_direction * multiple_to_face
# Make sure that we did not end up with all NaNs or infinities due to division issues.
assert not np.any(np.isnan(point_on_face)) and not np.any(np.isinf(point_on_face))
return point_on_face
def check_cuboid_empty(hit_normal, bottom_corner_positions, this_cuboid_dimensions, refusal_log):
"""
Check whether the cuboid defined by @this_cuboid_dimensions and @bottom_corner_positions contains
empty space or not
Args:
hit_normal (3-array): (x,y,z) normal
bottom_corner_positions ((4, 3)-array): the positions defining the bottom corners of the cuboid
being sampled
this_cuboid_dimensions (3-array): (x,y,z) size of the sampled cuboid
refusal_log (list of str): Logging array for adding debug logs
Returns:
bool: True if the cuboid is empty, else False
"""
if m.DEBUG_SAMPLING:
draw_debug_markers(bottom_corner_positions)
# Compute top corners.
top_corner_positions = bottom_corner_positions + hit_normal * this_cuboid_dimensions[2]
# We only generate valid rays that have nonzero distances. If the inputted cuboid is flat (i.e.: one dimension
# is zero, i.e.: it is in fact a rectangle), raise an error
assert this_cuboid_dimensions[2] != 0, "Cannot check empty cuboid for cuboid with zero height!"
# Get all the top-to-bottom corner pairs.
# When we cast these rays, we check that the faces & volume of the cuboid are unoccupied.
top_to_bottom_pairs = list(itertools.product(top_corner_positions, bottom_corner_positions))
# Get all the same-height pairs. These also check that the surfaces areas are empty.
bottom_pairs = list(itertools.combinations(bottom_corner_positions, 2))
top_pairs = list(itertools.combinations(top_corner_positions, 2))
# Combine all these pairs, cast the rays, and make sure the rays don't hit anything.
all_pairs = np.array(top_to_bottom_pairs + bottom_pairs + top_pairs)
check_cast_results = raytest_batch(start_points=all_pairs[:, 0, :], end_points=all_pairs[:, 1, :])
if any(ray["hit"] for ray in check_cast_results):
if m.DEBUG_SAMPLING:
refusal_log.append("check ray info: %r" % (check_cast_results))
return False
return True
| 56,403 | Python | 48.60774 | 163 | 0.663511 |
StanfordVL/OmniGibson/omnigibson/utils/usd_utils.py | import math
from collections.abc import Iterable
import os
import omnigibson.lazy as lazy
import numpy as np
import trimesh
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.constants import JointType, PRIMITIVE_MESH_TYPES, PrimType
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.ui_utils import suppress_omni_log
import omnigibson.utils.transform_utils as T
def array_to_vtarray(arr, element_type):
"""
Converts array @arr into a Vt-typed array, where each individual element of type @element_type.
Args:
arr (n-array): An array of values. Can be, e.g., a list, or numpy array
element_type (type): Per-element type to convert the elements from @arr into.
Valid options are keys of GF_TO_VT_MAPPING
Returns:
Vt.Array: Vt-typed array, of specified type corresponding to @element_type
"""
GF_TO_VT_MAPPING = {
lazy.pxr.Gf.Vec3d: lazy.pxr.Vt.Vec3dArray,
lazy.pxr.Gf.Vec3f: lazy.pxr.Vt.Vec3fArray,
lazy.pxr.Gf.Vec3h: lazy.pxr.Vt.Vec3hArray,
lazy.pxr.Gf.Quatd: lazy.pxr.Vt.QuatdArray,
lazy.pxr.Gf.Quatf: lazy.pxr.Vt.QuatfArray,
lazy.pxr.Gf.Quath: lazy.pxr.Vt.QuathArray,
int: lazy.pxr.Vt.IntArray,
float: lazy.pxr.Vt.FloatArray,
bool: lazy.pxr.Vt.BoolArray,
str: lazy.pxr.Vt.StringArray,
chr: lazy.pxr.Vt.CharArray,
}
# Make sure array type is valid
assert_valid_key(key=element_type, valid_keys=GF_TO_VT_MAPPING, name="array element type")
# Construct list of values
arr_list = []
# Check first to see if elements are vectors or not. If this is an iterable value that is not a string,
# then this is a vector and we have to map it to the correct type via *
is_vec_element = (isinstance(arr[0], Iterable)) and (not isinstance(arr[0], str))
# Loop over array and set values
for ele in arr:
arr_list.append(element_type(*ele) if is_vec_element else ele)
return GF_TO_VT_MAPPING[element_type](arr_list)
def get_prim_nested_children(prim):
"""
Grabs all nested prims starting from root @prim via depth-first-search
Args:
prim (Usd.Prim): root prim from which to search for nested children prims
Returns:
list of Usd.Prim: nested prims
"""
prims = []
for child in lazy.omni.isaac.core.utils.prims.get_prim_children(prim):
prims.append(child)
prims += get_prim_nested_children(prim=child)
return prims
def create_joint(prim_path, joint_type, body0=None, body1=None, enabled=True,
joint_frame_in_parent_frame_pos=None, joint_frame_in_parent_frame_quat=None,
joint_frame_in_child_frame_pos=None, joint_frame_in_child_frame_quat=None,
break_force=None, break_torque=None):
"""
Creates a joint between @body0 and @body1 of specified type @joint_type
Args:
prim_path (str): absolute path to where the joint will be created
joint_type (str or JointType): type of joint to create. Valid options are:
"FixedJoint", "Joint", "PrismaticJoint", "RevoluteJoint", "SphericalJoint"
(equivalently, one of JointType)
body0 (str or None): absolute path to the first body's prim. At least @body0 or @body1 must be specified.
body1 (str or None): absolute path to the second body's prim. At least @body0 or @body1 must be specified.
enabled (bool): whether to enable this joint or not.
joint_frame_in_parent_frame_pos (np.ndarray or None): relative position of the joint frame to the parent frame (body0).
joint_frame_in_parent_frame_quat (np.ndarray or None): relative orientation of the joint frame to the parent frame (body0).
joint_frame_in_child_frame_pos (np.ndarray or None): relative position of the joint frame to the child frame (body1).
joint_frame_in_child_frame_quat (np.ndarray or None): relative orientation of the joint frame to the child frame (body1).
break_force (float or None): break force for linear dofs, unit is Newton.
break_torque (float or None): break torque for angular dofs, unit is Newton-meter.
Returns:
Usd.Prim: Created joint prim
"""
# Make sure we have valid joint_type
assert JointType.is_valid(joint_type=joint_type), \
f"Invalid joint specified for creation: {joint_type}"
# Make sure at least body0 or body1 is specified
assert body0 is not None or body1 is not None, \
f"At least either body0 or body1 must be specified when creating a joint!"
# Create the joint
joint = getattr(lazy.pxr.UsdPhysics, joint_type).Define(og.sim.stage, prim_path)
# Possibly add body0, body1 targets
if body0 is not None:
assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body0), f"Invalid body0 path specified: {body0}"
joint.GetBody0Rel().SetTargets([lazy.pxr.Sdf.Path(body0)])
if body1 is not None:
assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body1), f"Invalid body1 path specified: {body1}"
joint.GetBody1Rel().SetTargets([lazy.pxr.Sdf.Path(body1)])
# Get the prim pointed to at this path
joint_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path)
# Apply joint API interface
lazy.pxr.PhysxSchema.PhysxJointAPI.Apply(joint_prim)
# We need to step rendering once to auto-fill the local pose before overwriting it.
# Note that for some reason, if multi_gpu is used, this line will crash if create_joint is called during on_contact
# callback, e.g. when an attachment joint is being created due to contacts.
og.sim.render()
if joint_frame_in_parent_frame_pos is not None:
joint_prim.GetAttribute("physics:localPos0").Set(lazy.pxr.Gf.Vec3f(*joint_frame_in_parent_frame_pos))
if joint_frame_in_parent_frame_quat is not None:
joint_prim.GetAttribute("physics:localRot0").Set(lazy.pxr.Gf.Quatf(*joint_frame_in_parent_frame_quat[[3, 0, 1, 2]]))
if joint_frame_in_child_frame_pos is not None:
joint_prim.GetAttribute("physics:localPos1").Set(lazy.pxr.Gf.Vec3f(*joint_frame_in_child_frame_pos))
if joint_frame_in_child_frame_quat is not None:
joint_prim.GetAttribute("physics:localRot1").Set(lazy.pxr.Gf.Quatf(*joint_frame_in_child_frame_quat[[3, 0, 1, 2]]))
if break_force is not None:
joint_prim.GetAttribute("physics:breakForce").Set(break_force)
if break_torque is not None:
joint_prim.GetAttribute("physics:breakTorque").Set(break_torque)
# Possibly (un-/)enable this joint
joint_prim.GetAttribute("physics:jointEnabled").Set(enabled)
# We update the simulation now without stepping physics if sim is playing so we can bypass the snapping warning from PhysicsUSD
if og.sim.is_playing():
with suppress_omni_log(channels=["omni.physx.plugin"]):
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
# Return this joint
return joint_prim
class RigidContactAPI:
"""
Class containing class methods to aggregate rigid body contacts across all rigid bodies in the simulator
"""
# Dictionary mapping rigid body prim path to corresponding index in the contact view matrix
_PATH_TO_ROW_IDX = None
_PATH_TO_COL_IDX = None
# Numpy array of rigid body prim paths where its array index directly corresponds to the corresponding
# index in the contact view matrix
_ROW_IDX_TO_PATH = None
_COL_IDX_TO_PATH = None
# Contact view for generating contact matrices at each timestep
_CONTACT_VIEW = None
# Current aggregated contacts over all rigid bodies at the current timestep. Shape: (N, N, 3)
_CONTACT_MATRIX = None
# Current cache, mapping 2-tuple (prim_paths_a, prim_paths_b) to contact values
_CONTACT_CACHE = None
@classmethod
def initialize_view(cls):
"""
Initializes the rigid contact view. Note: Can only be done when sim is playing!
"""
assert og.sim.is_playing(), "Cannot create rigid contact view while sim is not playing!"
# Compile deterministic mapping from rigid body path to idx
# Note that omni's ordering is based on the top-down object ordering path on the USD stage, which coincidentally
# matches the same ordering we store objects in our registry. So the mapping we generate from our registry
# mapping aligns with omni's ordering!
i = 0
cls._PATH_TO_COL_IDX = dict()
for obj in og.sim.scene.objects:
if obj.prim_type == PrimType.RIGID:
for link in obj.links.values():
if not link.kinematic_only:
cls._PATH_TO_COL_IDX[link.prim_path] = i
i += 1
# If there are no valid objects, clear the view and terminate early
if i == 0:
cls._CONTACT_VIEW = None
return
# Generate rigid body view, making sure to update the simulation first (without physics) so that the physx
# backend is synchronized with any newly added objects
# We also suppress the omni tensor plugin from giving warnings we expect
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
with suppress_omni_log(channels=["omni.physx.tensors.plugin"]):
cls._CONTACT_VIEW = og.sim.physics_sim_view.create_rigid_contact_view(
pattern="/World/*/*",
filter_patterns=list(cls._PATH_TO_COL_IDX.keys()),
)
# Create deterministic mapping from path to row index
cls._PATH_TO_ROW_IDX = {path: i for i, path in enumerate(cls._CONTACT_VIEW.sensor_paths)}
# Store the reverse mappings as well. This can just be a numpy array since the mapping uses integer indices
cls._ROW_IDX_TO_PATH = np.array(list(cls._PATH_TO_ROW_IDX.keys()))
cls._COL_IDX_TO_PATH = np.array(list(cls._PATH_TO_COL_IDX.keys()))
# Sanity check generated view -- this should generate square matrices of shape (N, N, 3)
n_bodies = len(cls._PATH_TO_COL_IDX)
assert cls._CONTACT_VIEW.filter_count == n_bodies, \
f"Got unexpected contact view shape. Expected: (N, {n_bodies}); " \
f"got: (N, {cls._CONTACT_VIEW.filter_count})"
@classmethod
def get_body_row_idx(cls, prim_path):
"""
Returns:
int: row idx assigned to the rigid body defined by @prim_path
"""
return cls._PATH_TO_ROW_IDX[prim_path]
@classmethod
def get_body_col_idx(cls, prim_path):
"""
Returns:
int: col idx assigned to the rigid body defined by @prim_path
"""
return cls._PATH_TO_COL_IDX[prim_path]
@classmethod
def get_row_idx_prim_path(cls, idx):
"""
Returns:
str: @prim_path corresponding to the row idx @idx in the contact matrix
"""
return cls._ROW_IDX_TO_PATH[idx]
@classmethod
def get_col_idx_prim_path(cls, idx):
"""
Returns:
str: @prim_path corresponding to the column idx @idx in the contact matrix
"""
return cls._COL_IDX_TO_PATH[idx]
@classmethod
def get_all_impulses(cls):
"""
Grab all impulses at the current timestep
Returns:
n-array: (N, M, 3) impulse array defining current impulses between all N contact-sensor enabled rigid bodies
in the simulator and M tracked rigid bodies
"""
# Generate the contact matrix if it doesn't already exist
if cls._CONTACT_MATRIX is None:
cls._CONTACT_MATRIX = cls._CONTACT_VIEW.get_contact_force_matrix(dt=1.0)
return cls._CONTACT_MATRIX
@classmethod
def get_impulses(cls, prim_paths_a, prim_paths_b):
"""
Grabs the matrix representing all impulse forces between rigid prims from @prim_paths_a and
rigid prims from @prim_paths_b
Args:
prim_paths_a (list of str): Rigid body prim path(s) with which to grab contact impulses against
any of the rigid body prim path(s) defined by @prim_paths_b
prim_paths_b (list of str): Rigid body prim path(s) with which to grab contact impulses against
any of the rigid body prim path(s) defined by @prim_paths_a
Returns:
n-array: (N, M, 3) impulse array defining current impulses between N bodies from @prim_paths_a and M bodies
from @prim_paths_b
"""
# Compute subset of matrix and return
idxs_a = [cls._PATH_TO_ROW_IDX[path] for path in prim_paths_a]
idxs_b = [cls._PATH_TO_COL_IDX[path] for path in prim_paths_b]
return cls.get_all_impulses()[idxs_a][:, idxs_b]
@classmethod
def in_contact(cls, prim_paths_a, prim_paths_b):
"""
Check if any rigid prim from @prim_paths_a is in contact with any rigid prim from @prim_paths_b
Args:
prim_paths_a (list of str): Rigid body prim path(s) with which to check contact against any of the rigid
body prim path(s) defined by @prim_paths_b
prim_paths_b (list of str): Rigid body prim path(s) with which to check contact against any of the rigid
body prim path(s) defined by @prim_paths_a
Returns:
bool: Whether any body from @prim_paths_a is in contact with any body from @prim_paths_b
"""
# Check if the contact tuple already exists in the cache; if so, return the value
key = (tuple(prim_paths_a), tuple(prim_paths_b))
if key not in cls._CONTACT_CACHE:
# In contact if any of the matrix values representing the interaction between the two groups is non-zero
cls._CONTACT_CACHE[key] = np.any(cls.get_impulses(prim_paths_a=prim_paths_a, prim_paths_b=prim_paths_b))
return cls._CONTACT_CACHE[key]
@classmethod
def clear(cls):
"""
Clears the internal contact matrix and cache
"""
cls._CONTACT_MATRIX = None
cls._CONTACT_CACHE = dict()
class CollisionAPI:
"""
Class containing class methods to facilitate collision handling, e.g. collision groups
"""
ACTIVE_COLLISION_GROUPS = dict()
@classmethod
def create_collision_group(cls, col_group, filter_self_collisions=False):
"""
Creates a new collision group with name @col_group
Args:
col_group (str): Name of the collision group to create
filter_self_collisions (bool): Whether to ignore self-collisions within the group. Default is False
"""
# Can only be done when sim is stopped
assert og.sim.is_stopped(), "Cannot create a collision group unless og.sim is stopped!"
# Make sure the group doesn't already exist
assert col_group not in cls.ACTIVE_COLLISION_GROUPS, \
f"Cannot create collision group {col_group} because it already exists!"
# Create the group
col_group_prim_path = f"/World/collision_groups/{col_group}"
group = lazy.pxr.UsdPhysics.CollisionGroup.Define(og.sim.stage, col_group_prim_path)
if filter_self_collisions:
# Do not collide with self
group.GetFilteredGroupsRel().AddTarget(col_group_prim_path)
cls.ACTIVE_COLLISION_GROUPS[col_group] = group
@classmethod
def add_to_collision_group(cls, col_group, prim_path):
"""
Adds the prim and all nested prims specified by @prim_path to the global collision group @col_group. If @col_group
does not exist, then it will either be created if @create_if_not_exist is True, otherwise will raise an Error.
Args:
col_group (str): Name of the collision group to assign the prim at @prim_path to
prim_path (str): Prim (and all nested prims) to assign to this @col_group
"""
# Make sure collision group exists
assert col_group in cls.ACTIVE_COLLISION_GROUPS, \
f"Cannot add to collision group {col_group} because it does not exist!"
# Add this prim to the collision group
cls.ACTIVE_COLLISION_GROUPS[col_group].GetCollidersCollectionAPI().GetIncludesRel().AddTarget(prim_path)
@classmethod
def add_group_filter(cls, col_group, filter_group):
"""
Adds a new group filter for group @col_group, filtering all collision with group @filter_group
Args:
col_group (str): Name of the collision group which will have a new filter group added
filter_group (str): Name of the group that should be filtered
"""
# Make sure the group doesn't already exist
for group_name in (col_group, filter_group):
assert group_name in cls.ACTIVE_COLLISION_GROUPS, \
(f"Cannot add group filter {filter_group} to collision group {col_group} because at least one group "
f"does not exist!")
# Grab the group, and add the filter
filter_group_prim_path = f"/World/collision_groups/{filter_group}"
group = cls.ACTIVE_COLLISION_GROUPS[col_group]
group.GetFilteredGroupsRel().AddTarget(filter_group_prim_path)
@classmethod
def clear(cls):
"""
Clears the internal state of this CollisionAPI
"""
cls.ACTIVE_COLLISION_GROUPS = {}
class FlatcacheAPI:
"""
Monolithic class for leveraging functionality meant to be used EXCLUSIVELY with flatcache.
"""
# Modified prims since transition from sim being stopped to sim being played occurred
# This should get cleared every time og.sim.stop() gets called
MODIFIED_PRIMS = set()
@classmethod
def sync_raw_object_transforms_in_usd(cls, prim):
"""
Manually synchronizes the per-link local raw transforms per-joint raw states from entity prim @prim using
dynamic control interface as the ground truth.
NOTE: This slightly abuses the dynamic control - usd integration, and should ONLY be used if flatcache
is active, since the USD is not R/W at runtime and so we can write directly to child link poses on the USD
without breaking the simulation!
Args:
prim (EntityPrim): prim whose owned links and joints should have their raw local states updated to match the
"true" values found from the dynamic control interface
"""
# Make sure flatcache is enabled -- this should NEVER be called otherwise!!
assert gm.ENABLE_FLATCACHE, "Syncing raw object transforms should only occur if flatcache is being used!"
# We're somewhat abusing low-level dynamic control - physx - usd integration, but we (supposedly) know
# what we're doing so we suppress logging so we don't see any error messages :D
with suppress_omni_log(["omni.physx.plugin"]):
# Import here to avoid circular imports
from omnigibson.prims.xform_prim import XFormPrim
# 1. For every link, update its xformOp properties based on the delta_tf between object frame and link frame
obj_pos, obj_quat = XFormPrim.get_local_pose(prim)
for link in prim.links.values():
rel_pos, rel_quat = T.relative_pose_transform(*link.get_position_orientation(), obj_pos, obj_quat)
XFormPrim.set_local_pose(link, rel_pos, rel_quat)
# 2. For every joint, update its linear / angular joint state
if prim.n_joints > 0:
joints_pos = prim.get_joint_positions()
for joint, joint_pos in zip(prim.joints.values(), joints_pos):
state_name = "linear" if joint.joint_type == JointType.JOINT_PRISMATIC else "angular"
joint_pos = joint_pos if joint.joint_type == JointType.JOINT_PRISMATIC else joint_pos * 180.0 / np.pi
joint.set_attribute(f"state:{state_name}:physics:position", float(joint_pos))
# Update the simulation without taking any time
# This is needed because physx complains that we're manually writing to child links' poses, and will
# subsequently not respect any additional writes to the object pose before an additional step is taken.
# So we take a "zero" length step so that any additional writes to the object's pose at the current
# timestep are respected
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
# Add this prim to the set of modified prims
cls.MODIFIED_PRIMS.add(prim)
@classmethod
def reset_raw_object_transforms_in_usd(cls, prim):
"""
Manually resets the per-link local raw transforms and per-joint raw states from entity prim @prim to be zero.
NOTE: This slightly abuses the dynamic control - usd integration, and should ONLY be used if flatcache
is active, since the USD is not R/W at runtime and so we can write directly to child link poses on the USD
without breaking the simulation!
Args:
prim (EntityPrim): prim whose owned links and joints should have their local values reset to be zero
"""
# Make sure flatcache is enabled -- this should NEVER be called otherwise!!
assert gm.ENABLE_FLATCACHE, "Resetting raw object transforms should only occur if flatcache is being used!"
# We're somewhat abusing low-level dynamic control - physx - usd integration, but we (supposedly) know
# what we're doing so we suppress logging so we don't see any error messages :D
with suppress_omni_log(["omni.physx.plugin"]):
# Import here to avoid circular imports
from omnigibson.prims.xform_prim import XFormPrim
# 1. For every link, update its xformOp properties to be 0
for link in prim.links.values():
XFormPrim.set_local_pose(link, np.zeros(3), np.array([0, 0, 0, 1.0]))
# 2. For every joint, update its linear / angular joint state to be 0
if prim.n_joints > 0:
for joint in prim.joints.values():
state_name = "linear" if joint.joint_type == JointType.JOINT_PRISMATIC else "angular"
joint.set_attribute(f"state:{state_name}:physics:position", 0.0)
# Update the simulation without taking any time
# This is needed because physx complains that we're manually writing to child links' poses, and will
# subsequently not respect any additional writes to the object pose before an additional step is taken.
# So we take a "zero" length step so that any additional writes to the object's pose at the current
# timestep are respected
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
@classmethod
def reset(cls):
"""
Resets the internal state of this FlatcacheAPI.This should only occur when the simulator is stopped
"""
# For any prim transforms that were manually updated, we need to restore their original transforms
for prim in cls.MODIFIED_PRIMS:
cls.reset_raw_object_transforms_in_usd(prim)
cls.MODIFIED_PRIMS = set()
class PoseAPI:
"""
This is a singleton class for getting world poses.
Whenever we directly set the pose of a prim, we should call PoseAPI.invalidate().
After that, if we need to access the pose of a prim without stepping physics,
this class will refresh the poses by syncing across USD-fabric-PhysX depending on the flatcache setting.
"""
VALID = False
@classmethod
def invalidate(cls):
cls.VALID = False
@classmethod
def mark_valid(cls):
cls.VALID = True
@classmethod
def _refresh(cls):
if og.sim is not None and not cls.VALID:
# when flatcache is on
if og.sim._physx_fabric_interface:
# no time step is taken here
og.sim._physx_fabric_interface.update(og.sim.get_physics_dt(), og.sim.current_time)
# when flatcache is off
else:
# no time step is taken here
og.sim.psi.fetch_results()
cls.mark_valid()
@classmethod
def get_world_pose(cls, prim_path):
cls._refresh()
position, orientation = lazy.omni.isaac.core.utils.xforms.get_world_pose(prim_path)
return np.array(position), np.array(orientation)[[1, 2, 3, 0]]
@classmethod
def get_world_pose_with_scale(cls, prim_path):
"""
This is used when information about the prim's global scale is needed,
e.g. when converting points in the prim frame to the world frame.
"""
cls._refresh()
return np.array(lazy.omni.isaac.core.utils.xforms._get_world_pose_transform_w_scale(prim_path)).T
def clear():
"""
Clear state tied to singleton classes
"""
PoseAPI.invalidate()
CollisionAPI.clear()
def create_mesh_prim_with_default_xform(primitive_type, prim_path, u_patches=None, v_patches=None, stage=None):
"""
Creates a mesh prim of the specified @primitive_type at the specified @prim_path
Args:
primitive_type (str): Primitive mesh type, should be one of PRIMITIVE_MESH_TYPES to be valid
prim_path (str): Destination prim path to store the mesh prim
u_patches (int or None): If specified, should be an integer that represents how many segments to create in the
u-direction. E.g. 10 means 10 segments (and therefore 11 vertices) will be created.
v_patches (int or None): If specified, should be an integer that represents how many segments to create in the
v-direction. E.g. 10 means 10 segments (and therefore 11 vertices) will be created.
Both u_patches and v_patches need to be specified for them to be effective.
stage (None or Usd.Stage): If specified, stage on which the primitive mesh should be generated. If None, will
use og.sim.stage
"""
MESH_PRIM_TYPE_TO_EVALUATOR_MAPPING = {
"Sphere": lazy.omni.kit.primitive.mesh.evaluators.sphere.SphereEvaluator,
"Disk": lazy.omni.kit.primitive.mesh.evaluators.disk.DiskEvaluator,
"Plane": lazy.omni.kit.primitive.mesh.evaluators.plane.PlaneEvaluator,
"Cylinder": lazy.omni.kit.primitive.mesh.evaluators.cylinder.CylinderEvaluator,
"Torus": lazy.omni.kit.primitive.mesh.evaluators.torus.TorusEvaluator,
"Cone": lazy.omni.kit.primitive.mesh.evaluators.cone.ConeEvaluator,
"Cube": lazy.omni.kit.primitive.mesh.evaluators.cube.CubeEvaluator,
}
assert primitive_type in PRIMITIVE_MESH_TYPES, "Invalid primitive mesh type: {primitive_type}"
evaluator = MESH_PRIM_TYPE_TO_EVALUATOR_MAPPING[primitive_type]
u_backup = lazy.carb.settings.get_settings().get(evaluator.SETTING_U_SCALE)
v_backup = lazy.carb.settings.get_settings().get(evaluator.SETTING_V_SCALE)
hs_backup = lazy.carb.settings.get_settings().get(evaluator.SETTING_OBJECT_HALF_SCALE)
lazy.carb.settings.get_settings().set(evaluator.SETTING_U_SCALE, 1)
lazy.carb.settings.get_settings().set(evaluator.SETTING_V_SCALE, 1)
stage = og.sim.stage if stage is None else stage
# Default half_scale (i.e. half-extent, half_height, radius) is 1.
# TODO (eric): change it to 0.5 once the mesh generator API accepts floating-number HALF_SCALE
# (currently it only accepts integer-number and floors 0.5 into 0).
lazy.carb.settings.get_settings().set(evaluator.SETTING_OBJECT_HALF_SCALE, 1)
kwargs = dict(prim_type=primitive_type, prim_path=prim_path, stage=stage)
if u_patches is not None and v_patches is not None:
kwargs["u_patches"] = u_patches
kwargs["v_patches"] = v_patches
# Import now to avoid too-eager load of Omni classes due to inheritance
from omnigibson.utils.deprecated_utils import CreateMeshPrimWithDefaultXformCommand
CreateMeshPrimWithDefaultXformCommand(**kwargs).do()
lazy.carb.settings.get_settings().set(evaluator.SETTING_U_SCALE, u_backup)
lazy.carb.settings.get_settings().set(evaluator.SETTING_V_SCALE, v_backup)
lazy.carb.settings.get_settings().set(evaluator.SETTING_OBJECT_HALF_SCALE, hs_backup)
def mesh_prim_mesh_to_trimesh_mesh(mesh_prim, include_normals=True, include_texcoord=True):
"""
Generates trimesh mesh from @mesh_prim if mesh_type is "Mesh"
Args:
mesh_prim (Usd.Prim): Mesh prim to convert into trimesh mesh
include_normals (bool): Whether to include the normals in the resulting trimesh or not
include_texcoord (bool): Whether to include the corresponding 2D-texture coordinates in the resulting
trimesh or not
Returns:
trimesh.Trimesh: Generated trimesh mesh
"""
mesh_type = mesh_prim.GetPrimTypeInfo().GetTypeName()
assert mesh_type == "Mesh", f"Expected mesh prim to have type Mesh, got {mesh_type}"
face_vertex_counts = np.array(mesh_prim.GetAttribute("faceVertexCounts").Get())
vertices = np.array(mesh_prim.GetAttribute("points").Get())
face_indices = np.array(mesh_prim.GetAttribute("faceVertexIndices").Get())
faces = []
i = 0
for count in face_vertex_counts:
for j in range(count - 2):
faces.append([face_indices[i], face_indices[i + j + 1], face_indices[i + j + 2]])
i += count
kwargs = dict(vertices=vertices, faces=faces)
if include_normals:
kwargs["vertex_normals"] = np.array(mesh_prim.GetAttribute("normals").Get())
if include_texcoord:
raw_texture = mesh_prim.GetAttribute("primvars:st").Get()
if raw_texture is not None:
kwargs["visual"] = trimesh.visual.TextureVisuals(uv=np.array(raw_texture))
return trimesh.Trimesh(**kwargs)
def mesh_prim_shape_to_trimesh_mesh(mesh_prim):
"""
Generates trimesh mesh from @mesh_prim if mesh_type is "Sphere", "Cube", "Cone" or "Cylinder"
Args:
mesh_prim (Usd.Prim): Mesh prim to convert into trimesh mesh
Returns:
trimesh.Trimesh: Generated trimesh mesh
"""
mesh_type = mesh_prim.GetPrimTypeInfo().GetTypeName()
if mesh_type == "Sphere":
radius = mesh_prim.GetAttribute("radius").Get()
trimesh_mesh = trimesh.creation.icosphere(subdivision=3, radius=radius)
elif mesh_type == "Cube":
extent = mesh_prim.GetAttribute("size").Get()
trimesh_mesh = trimesh.creation.box([extent] * 3)
elif mesh_type == "Cone":
radius = mesh_prim.GetAttribute("radius").Get()
height = mesh_prim.GetAttribute("height").Get()
trimesh_mesh = trimesh.creation.cone(radius=radius, height=height)
# Trimesh cones are centered at the base. We'll move them down by half the height.
transform = trimesh.transformations.translation_matrix([0, 0, -height / 2])
trimesh_mesh.apply_transform(transform)
elif mesh_type == "Cylinder":
radius = mesh_prim.GetAttribute("radius").Get()
height = mesh_prim.GetAttribute("height").Get()
trimesh_mesh = trimesh.creation.cylinder(radius=radius, height=height)
else:
raise ValueError(f"Expected mesh prim to have type Sphere, Cube, Cone or Cylinder, got {mesh_type}")
return trimesh_mesh
def mesh_prim_to_trimesh_mesh(mesh_prim, include_normals=True, include_texcoord=True, world_frame=False):
"""
Generates trimesh mesh from @mesh_prim
Args:
mesh_prim (Usd.Prim): Mesh prim to convert into trimesh mesh
include_normals (bool): Whether to include the normals in the resulting trimesh or not
include_texcoord (bool): Whether to include the corresponding 2D-texture coordinates in the resulting
trimesh or not
world_frame (bool): Whether to convert the mesh to the world frame or not
Returns:
trimesh.Trimesh: Generated trimesh mesh
"""
mesh_type = mesh_prim.GetTypeName()
if mesh_type == "Mesh":
trimesh_mesh = mesh_prim_mesh_to_trimesh_mesh(mesh_prim, include_normals, include_texcoord)
else:
trimesh_mesh = mesh_prim_shape_to_trimesh_mesh(mesh_prim)
if world_frame:
trimesh_mesh.apply_transform(PoseAPI.get_world_pose_with_scale(mesh_prim.GetPath().pathString))
return trimesh_mesh
def sample_mesh_keypoints(mesh_prim, n_keypoints, n_keyfaces, seed=None):
"""
Samples keypoints and keyfaces for mesh @mesh_prim
Args:
mesh_prim (Usd.Prim): Mesh prim to be sampled from
n_keypoints (int): number of (unique) keypoints to randomly sample from @mesh_prim
n_keyfaces (int): number of (unique) keyfaces to randomly sample from @mesh_prim
seed (None or int): If set, sets the random seed for deterministic results
Returns:
2-tuple:
- n-array: (n,) 1D int array representing the randomly sampled point idxs from @mesh_prim.
Note that since this is without replacement, the total length of the array may be less than
@n_keypoints
- None or n-array: 1D int array representing the randomly sampled face idxs from @mesh_prim.
Note that since this is without replacement, the total length of the array may be less than
@n_keyfaces
"""
# Set seed if deterministic
if seed is not None:
np.random.seed(seed)
# Generate trimesh mesh from which to aggregate points
tm = mesh_prim_mesh_to_trimesh_mesh(mesh_prim=mesh_prim, include_normals=False, include_texcoord=False)
n_unique_vertices, n_unique_faces = len(tm.vertices), len(tm.faces)
faces_flat = tm.faces.flatten()
n_vertices = len(faces_flat)
# Sample vertices
unique_vertices = np.unique(faces_flat)
assert len(unique_vertices) == n_unique_vertices
keypoint_idx = np.random.choice(unique_vertices, size=n_keypoints, replace=False) if \
n_unique_vertices > n_keypoints else unique_vertices
# Sample faces
keyface_idx = np.random.choice(n_unique_faces, size=n_keyfaces, replace=False) if \
n_unique_faces > n_keyfaces else np.arange(n_unique_faces)
return keypoint_idx, keyface_idx
def get_mesh_volume_and_com(mesh_prim, world_frame=False):
"""
Computes the volume and center of mass for @mesh_prim
Args:
mesh_prim (Usd.Prim): Mesh prim to compute volume and center of mass for
world_frame (bool): Whether to return the volume and CoM in the world frame
Returns:
Tuple[float, np.array]: Tuple containing the (volume, center_of_mass) in the mesh frame or the world frame
"""
trimesh_mesh = mesh_prim_to_trimesh_mesh(mesh_prim, include_normals=False, include_texcoord=False, world_frame=world_frame)
if trimesh_mesh.is_volume:
volume = trimesh_mesh.volume
com = trimesh_mesh.center_mass
else:
# If the mesh is not a volume, we compute its convex hull and use that instead
try:
trimesh_mesh_convex = trimesh_mesh.convex_hull
volume = trimesh_mesh_convex.volume
com = trimesh_mesh_convex.center_mass
except:
# if convex hull computation fails, it usually means the mesh is degenerated: use trivial values.
volume = 0.0
com = np.zeros(3)
return volume, com
def check_extent_radius_ratio(mesh_prim):
"""
Checks if the min extent in world frame and the extent radius ratio in local frame of @mesh_prim is within the
acceptable range for PhysX GPU acceleration (not too thin, and not too oblong)
Ref: https://github.com/NVIDIA-Omniverse/PhysX/blob/561a0df858d7e48879cdf7eeb54cfe208f660f18/physx/source/geomutils/src/convex/GuConvexMeshData.h#L183-L190
Args:
mesh_prim (Usd.Prim): Mesh prim to check
Returns:
bool: True if the min extent (world) and the extent radius ratio (local frame) is acceptable, False otherwise
"""
mesh_type = mesh_prim.GetPrimTypeInfo().GetTypeName()
# Non-mesh prims are always considered to be within the acceptable range
if mesh_type != "Mesh":
return True
trimesh_mesh_world = mesh_prim_to_trimesh_mesh(mesh_prim, include_normals=False, include_texcoord=False, world_frame=True)
min_extent = trimesh_mesh_world.extents.min()
# If the mesh is too flat in the world frame, omniverse cannot create convex mesh for it
if min_extent < 1e-5:
return False
trimesh_mesh = mesh_prim_to_trimesh_mesh(mesh_prim, include_normals=False, include_texcoord=False, world_frame=False)
if not trimesh_mesh.is_volume:
trimesh_mesh = trimesh_mesh.convex_hull
max_radius = trimesh_mesh.extents.max() / 2.0
min_radius = trimesh.proximity.closest_point(trimesh_mesh, np.array([trimesh_mesh.center_mass]))[1][0]
ratio = max_radius / min_radius
# PhysX requires ratio to be < 100.0. We use 95.0 to be safe.
return ratio < 95.0
def create_primitive_mesh(prim_path, primitive_type, extents=1.0, u_patches=None, v_patches=None, stage=None):
"""
Helper function that generates a UsdGeom.Mesh prim at specified @prim_path of type @primitive_type.
NOTE: Generated mesh prim will, by default, have extents equaling [1, 1, 1]
Args:
prim_path (str): Where the loaded mesh should exist on the stage
primitive_type (str): Type of primitive mesh to create. Should be one of:
{"Cone", "Cube", "Cylinder", "Disk", "Plane", "Sphere", "Torus"}
extents (float or 3-array): Specifies the extents of the generated mesh. Default is 1.0, i.e.:
generated mesh will be in be contained in a [1,1,1] sized bounding box
u_patches (int or None): If specified, should be an integer that represents how many segments to create in the
u-direction. E.g. 10 means 10 segments (and therefore 11 vertices) will be created.
v_patches (int or None): If specified, should be an integer that represents how many segments to create in the
v-direction. E.g. 10 means 10 segments (and therefore 11 vertices) will be created.
Both u_patches and v_patches need to be specified for them to be effective.
stage (None or Usd.Stage): If specified, stage on which the primitive mesh should be generated. If None, will
use og.sim.stage
Returns:
UsdGeom.Mesh: Generated primitive mesh as a prim on the active stage
"""
assert_valid_key(key=primitive_type, valid_keys=PRIMITIVE_MESH_TYPES, name="primitive mesh type")
create_mesh_prim_with_default_xform(primitive_type, prim_path, u_patches=u_patches, v_patches=v_patches, stage=stage)
mesh = lazy.pxr.UsdGeom.Mesh.Define(og.sim.stage if stage is None else stage, prim_path)
# Modify the points and normals attributes so that total extents is the desired
# This means multiplying omni's default by extents * 50.0, as the native mesh generated has extents [-0.01, 0.01]
# -- i.e.: 2cm-wide mesh
extents = np.ones(3) * extents if isinstance(extents, float) else np.array(extents)
for attr in (mesh.GetPointsAttr(), mesh.GetNormalsAttr()):
vals = np.array(attr.Get()).astype(np.float64)
attr.Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*(val * extents * 50.0)) for val in vals]))
mesh.GetExtentAttr().Set(lazy.pxr.Vt.Vec3fArray([lazy.pxr.Gf.Vec3f(*(-extents / 2.0)), lazy.pxr.Gf.Vec3f(*(extents / 2.0))]))
return mesh
def add_asset_to_stage(asset_path, prim_path):
"""
Adds asset file (either USD or OBJ) at @asset_path at the location @prim_path
Args:
asset_path (str): Absolute or relative path to the asset file to load
prim_path (str): Where loaded asset should exist on the stage
Returns:
Usd.Prim: Loaded prim as a USD prim
"""
# Make sure this is actually a supported asset type
assert asset_path[-4:].lower() in {".usd", ".obj"}, f"Cannot load a non-USD or non-OBJ file as a USD prim!"
asset_type = asset_path[-3:]
# Make sure the path exists
assert os.path.exists(asset_path), f"Cannot load {asset_type.upper()} file {asset_path} because it does not exist!"
# Add reference to stage and grab prim
lazy.omni.isaac.core.utils.stage.add_reference_to_stage(usd_path=asset_path, prim_path=prim_path)
prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path)
# Make sure prim was loaded correctly
assert prim, f"Failed to load {asset_type.upper()} object from path: {asset_path}"
return prim
def get_world_prim():
"""
Returns:
Usd.Prim: Active world prim in the current stage
"""
return lazy.omni.isaac.core.utils.prims.get_prim_at_path("/World")
| 40,894 | Python | 45.209039 | 159 | 0.665208 |
StanfordVL/OmniGibson/omnigibson/utils/config_utils.py | import collections.abc
import json
import os
import numpy as np
import yaml
# File I/O related
def parse_config(config):
"""
Parse OmniGibson config file / object
Args:
config (dict or str): Either config dictionary or path to yaml config to load
Returns:
dict: Parsed config
"""
if isinstance(config, collections.abc.Mapping):
return config
else:
assert isinstance(config, str)
if not os.path.exists(config):
raise IOError(
"config path {} does not exist. Please either pass in a dict or a string that represents the file path to the config yaml.".format(
config
)
)
with open(config, "r") as f:
config_data = yaml.load(f, Loader=yaml.FullLoader)
return config_data
def parse_str_config(config):
"""
Parse string config
Args:
config (str): Yaml cfg as a string to load
Returns:
dict: Parsed config
"""
return yaml.safe_load(config)
def dump_config(config):
"""
Converts YML config into a string
Args:
config (dict): Config to dump
Returns:
str: Config as a string
"""
return yaml.dump(config)
def load_default_config():
"""
Loads a default configuration to use for OmniGibson
Returns:
dict: Loaded default configuration file
"""
from omnigibson import example_config_path
return parse_config(f"{example_config_path}/default_cfg.yaml")
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj) | 1,691 | Python | 20.974026 | 143 | 0.628622 |
StanfordVL/OmniGibson/omnigibson/utils/constants.py | """
Constant Definitions
"""
from functools import cache
import hashlib
import os
import numpy as np
from enum import Enum, IntEnum
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.asset_utils import get_og_avg_category_specs, get_all_object_categories
MAX_INSTANCE_COUNT = np.iinfo(np.uint32).max
MAX_CLASS_COUNT = np.iinfo(np.uint32).max
MAX_VIEWER_SIZE = 2048
class ViewerMode(IntEnum):
NAVIGATION = 0
MANIPULATION = 1
PLANNING = 2
class LightingMode(str, Enum):
# See https://stackoverflow.com/a/58608362 for info on string enums
STAGE = "stage"
CAMERA = "camera"
RIG_DEFAULT = "Default"
RIG_GREY = "Grey Studio"
RIG_COLORED = "Colored Lights"
class SimulatorMode(IntEnum):
GUI = 1
HEADLESS = 2
VR = 3
# Specific methods for applying / removing particles
class ParticleModifyMethod(str, Enum):
ADJACENCY = "adjacency"
PROJECTION = "projection"
# Specific condition types for applying / removing particles
class ParticleModifyCondition(str, Enum):
FUNCTION = "function"
SATURATED = "saturated"
TOGGLEDON = "toggled_on"
GRAVITY = "gravity"
# Structure categories that need to always be loaded for stability purposes
STRUCTURE_CATEGORIES = frozenset({"floors", "walls", "ceilings", "lawn", "driveway", "fence", "roof", "background"})
# Joint friction magic values to assign to objects based on their category
DEFAULT_JOINT_FRICTION = 10.0
SPECIAL_JOINT_FRICTIONS = {
"oven": 30.0,
"dishwasher": 30.0,
"toilet": 3.0,
}
class PrimType(IntEnum):
RIGID = 0
CLOTH = 1
class EmitterType(IntEnum):
FIRE = 0
STEAM = 1
# Valid primitive mesh types
PRIMITIVE_MESH_TYPES = {
"Cone",
"Cube",
"Cylinder",
"Disk",
"Plane",
"Sphere",
"Torus",
}
# Valid geom types
GEOM_TYPES = {"Sphere", "Cube", "Cone", "Cylinder", "Mesh"}
# Valid joint axis
JointAxis = ["X", "Y", "Z"]
# TODO: Clean up this class to be better enum with sanity checks
# Joint types
class JointType:
JOINT = "Joint"
JOINT_FIXED = "FixedJoint"
JOINT_PRISMATIC = "PrismaticJoint"
JOINT_REVOLUTE = "RevoluteJoint"
JOINT_SPHERICAL = "SphericalJoint"
_STR_TO_TYPE = {
"Joint": JOINT,
"FixedJoint": JOINT_FIXED,
"PrismaticJoint": JOINT_PRISMATIC,
"RevoluteJoint": JOINT_REVOLUTE,
"SphericalJoint": JOINT_SPHERICAL,
}
_TYPE_TO_STR = {
JOINT: "Joint",
JOINT_FIXED: "FixedJoint",
JOINT_PRISMATIC: "PrismaticJoint",
JOINT_REVOLUTE: "RevoluteJoint",
JOINT_SPHERICAL: "SphericalJoint",
}
@classmethod
def get_type(cls, str_type):
assert str_type in cls._STR_TO_TYPE, f"Invalid string joint type name received: {str_type}"
return cls._STR_TO_TYPE[str_type]
@classmethod
def get_str(cls, joint_type):
assert joint_type in cls._TYPE_TO_STR, f"Invalid joint type name received: {joint_type}"
return cls._TYPE_TO_STR[joint_type]
@classmethod
def is_valid(cls, joint_type):
return joint_type in cls._TYPE_TO_STR if isinstance(joint_type, cls) else joint_type in cls._STR_TO_TYPE
# Object category specs
AVERAGE_OBJ_DENSITY = 67.0
AVERAGE_CATEGORY_SPECS = get_og_avg_category_specs()
def get_collision_group_mask(groups_to_exclude=[]):
"""Get a collision group mask that has collisions enabled for every group except those in groups_to_exclude."""
collision_mask = ALL_COLLISION_GROUPS_MASK
for group in groups_to_exclude:
collision_mask &= ~(1 << group)
return collision_mask
class OccupancyGridState:
OBSTACLES = 0.0
UNKNOWN = 0.5
FREESPACE = 1.0
MAX_TASK_RELEVANT_OBJS = 50
TASK_RELEVANT_OBJS_OBS_DIM = 9
AGENT_POSE_DIM = 6
# TODO: What the hell is this magic list?? It's not used anywhere
UNDER_OBJECTS = [
"breakfast_table",
"coffee_table",
"console_table",
"desk",
"gaming_table",
"pedestal_table",
"pool_table",
"stand",
"armchair",
"chaise_longue",
"folding_chair",
"highchair",
"rocking_chair",
"straight_chair",
"swivel_chair",
"bench",
]
@cache
def semantic_class_name_to_id():
"""
Get mapping from semantic class name to class id
Returns:
dict: class name to class id
"""
categories = get_all_object_categories()
from omnigibson.systems.system_base import REGISTERED_SYSTEMS
systems = sorted(REGISTERED_SYSTEMS)
all_semantics = sorted(set(categories + systems + ["background", "unlabelled", "object", "light", "agent"]))
# Assign a unique class id to each class name with hashing
class_name_to_class_id = {s: int(hashlib.md5(s.encode()).hexdigest(), 16) % (2 ** 32) for s in all_semantics}
return class_name_to_class_id
@cache
def semantic_class_id_to_name():
"""
Get mapping from semantic class id to class name
Returns:
dict: class id to class name
"""
return {v: k for k, v in semantic_class_name_to_id().items()}
| 5,031 | Python | 23.546341 | 116 | 0.659113 |
StanfordVL/OmniGibson/omnigibson/utils/teleop_utils.py | import numpy as np
import time
from typing import Iterable, Optional, Tuple
import omnigibson as og
import omnigibson.lazy as lazy
import omnigibson.utils.transform_utils as T
from omnigibson.macros import create_module_macros
from omnigibson.objects import USDObject
from omnigibson.robots.robot_base import BaseRobot
try:
from telemoma.human_interface.teleop_core import TeleopAction, TeleopObservation
from telemoma.human_interface.teleop_policy import TeleopPolicy
from telemoma.utils.general_utils import AttrDict
from telemoma.configs.base_config import teleop_config
except ImportError as e:
raise e from ValueError("For teleoperation, install telemoma by running 'pip install telemoma'")
m = create_module_macros(module_path=__file__)
m.movement_speed = 0.2 # the speed of the robot base movement
class TeleopSystem(TeleopPolicy):
"""
Base class for teleop policy
"""
def __init__(self, config: AttrDict, robot: BaseRobot, show_control_marker: bool = False) -> None:
"""
Initializes the Teleoperation System
Args:
config (AttrDict): configuration dictionary
robot (BaseRobot): the robot that will be controlled.
show_control_marker (bool): whether to show a visual marker that indicates the target pose of the control.
"""
super().__init__(config)
self.teleop_action: TeleopAction = TeleopAction()
self.robot_obs: TeleopObservation = TeleopObservation()
self.robot = robot
self.robot_arms = ["left", "right"] if self.robot.n_arms == 2 else ["right"]
# robot parameters
self.movement_speed = m.movement_speed
self.show_control_marker = show_control_marker
self.control_markers = {}
if show_control_marker:
for arm in robot.arm_names:
arm_name = "right" if arm == robot.default_arm else "left"
self.control_markers[arm_name] = USDObject(name=f"target_{arm_name}", usd_path=robot.eef_usd_path[arm],
visual_only=True)
og.sim.import_object(self.control_markers[arm_name])
def get_obs(self) -> TeleopObservation:
"""
Retrieve observation data from robot
Returns:
TeleopObservation: dataclass containing robot observations
"""
robot_obs = TeleopObservation()
base_pos, base_orn = self.robot.get_position_orientation()
robot_obs.base = np.r_[base_pos[:2], [T.quat2euler(base_orn)[2]]]
for i, arm in enumerate(self.robot_arms):
abs_cur_pos, abs_cur_orn = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position_orientation()
rel_cur_pos, rel_cur_orn = T.relative_pose_transform(abs_cur_pos, abs_cur_orn, base_pos, base_orn)
gripper_pos = np.mean(
self.robot.get_joint_positions(normalized=True)[self.robot.gripper_control_idx[self.robot.arm_names[i]]]
)
# if we are grasping, we manually set the gripper position to be at most 0.5
if self.robot.controllers[f"gripper_{self.robot.arm_names[i]}"].is_grasping():
gripper_pos = min(gripper_pos, 0.5)
robot_obs[arm] = np.r_[
rel_cur_pos,
rel_cur_orn,
gripper_pos
]
return robot_obs
def get_action(self, robot_obs: TeleopObservation) -> np.ndarray:
"""
Generate action data from VR input for robot teleoperation
Args:
robot_obs (TeleopObservation): dataclass containing robot observations
Returns:
np.ndarray: array of action data
"""
# get teleop action
self.teleop_action = super().get_action(robot_obs)
# optionally update control marker
if self.show_control_marker:
for arm_name in self.control_markers:
delta_pos, delta_orn = self.teleop_action[arm_name][:3], T.euler2quat(self.teleop_action[arm_name][3:6])
rel_target_pos = robot_obs[arm_name][:3] + delta_pos
rel_target_orn = T.quat_multiply(delta_orn, robot_obs[arm_name][3:7])
base_pos, base_orn = self.robot.get_position_orientation()
target_pos, target_orn = T.pose_transform(base_pos, base_orn, rel_target_pos, rel_target_orn)
self.control_markers[arm_name].set_position_orientation(target_pos, target_orn)
return self.robot.teleop_data_to_action(self.teleop_action)
def reset(self) -> None:
"""
Reset the teleop policy
"""
self.teleop_action = TeleopAction()
self.robot_obs = TeleopObservation()
for interface in self.interfaces.values():
interface.reset_state()
class OVXRSystem(TeleopSystem):
"""
VR Teleoperation System build on top of Omniverse XR extension and TeleMoMa's TeleopSystem
"""
def __init__(
self,
robot: BaseRobot,
show_control_marker: bool = False,
system: str = "SteamVR",
disable_display_output: bool = False,
enable_touchpad_movement: bool = False,
align_anchor_to_robot_base: bool = False,
use_hand_tracking: bool = False,
) -> None:
"""
Initializes the VR system
Args:
robot (BaseRobot): the robot that VR will control.
show_control_marker (bool): whether to show a control marker
system (str): the VR system to use, one of ["OpenXR", "SteamVR"], default is "SteamVR".
disable_display_output (bool): whether we will not display output to the VR headset (only use controller tracking), default is False.
enable_touchpad_movement (bool): whether to enable VR system anchor movement by controller, default is False.
align_anchor_to_robot_base (bool): whether to align VR anchor to robot base, default is False.
use_hand_tracking (bool): whether to use hand tracking instead of controllers, default is False.
show_controller (bool): whether to show the controller model in the scene, default is False.
NOTE: enable_touchpad_movement and align_anchor_to_robot_base cannot be enabled at the same time.
The former is to enable free movement of the VR system (i.e. the user), while the latter is constraining the VR system to the robot pose.
"""
self.raw_data = {}
# enable xr extension
lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.xr.profile.vr")
self.xr_device_class = lazy.omni.kit.xr.core.XRDeviceClass
# run super method
super().__init__(teleop_config, robot, show_control_marker)
# we want to further slow down the movement speed if we are using touchpad movement
if enable_touchpad_movement:
self.movement_speed *= 0.3
# get xr core and profile
self.xr_core = lazy.omni.kit.xr.core.XRCore.get_singleton()
self.vr_profile = self.xr_core.get_profile("vr")
self.disable_display_output = disable_display_output
self.enable_touchpad_movement = enable_touchpad_movement
self.align_anchor_to_robot_base = align_anchor_to_robot_base
assert not (self.enable_touchpad_movement and self.align_anchor_to_robot_base), \
"enable_touchpad_movement and align_anchor_to_robot_base cannot be True at the same time!"
# set avatar
if self.show_control_marker:
self.vr_profile.set_avatar(lazy.omni.kit.xr.ui.stage.common.XRAvatarManager.get_singleton().create_avatar("basic_avatar", {}))
else:
self.vr_profile.set_avatar(lazy.omni.kit.xr.ui.stage.common.XRAvatarManager.get_singleton().create_avatar("empty_avatar", {}))
# set anchor mode to be custom anchor
lazy.carb.settings.get_settings().set(self.vr_profile.get_scene_persistent_path() + "anchorMode", "scene origin")
# set vr system
lazy.carb.settings.get_settings().set(self.vr_profile.get_persistent_path() + "system/display", system)
# set display mode
lazy.carb.settings.get_settings().set(
self.vr_profile.get_persistent_path() + "disableDisplayOutput", disable_display_output
)
lazy.carb.settings.get_settings().set('/rtx/rendermode', "RaytracedLighting")
# devices info
self.hmd = None
self.controllers = {}
self.trackers = {}
self.xr2og_orn_offset = np.array([0.5, -0.5, -0.5, -0.5])
self.og2xr_orn_offset = np.array([-0.5, 0.5, 0.5, -0.5])
# setup event subscriptions
self.reset()
self.use_hand_tracking = use_hand_tracking
if use_hand_tracking:
self.raw_data["hand_data"] = {}
self.teleop_action.hand_data = {}
self._hand_tracking_subscription = self.xr_core.get_event_stream().create_subscription_to_pop_by_type(
lazy.omni.kit.xr.core.XRCoreEventType.hand_joints, self._update_hand_tracking_data, name="hand tracking"
)
def xr2og(self, transform: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Apply the orientation offset from the Omniverse XR coordinate system to the OmniGibson coordinate system
Note that we have to transpose the transform matrix because Omniverse uses row-major matrices
while OmniGibson uses column-major matrices
Args:
transform (np.ndarray): the transform matrix in the Omniverse XR coordinate system
Returns:
tuple(np.ndarray, np.ndarray): the position and orientation in the OmniGibson coordinate system
"""
pos, orn = T.mat2pose(np.array(transform).T)
orn = T.quat_multiply(orn, self.xr2og_orn_offset)
return pos, orn
def og2xr(self, pos: np.ndarray, orn: np.ndarray) -> np.ndarray:
"""
Apply the orientation offset from the OmniGibson coordinate system to the Omniverse XR coordinate system
Args:
pos (np.ndarray): the position in the OmniGibson coordinate system
orn (np.ndarray): the orientation in the OmniGibson coordinate system
Returns:
np.ndarray: the transform matrix in the Omniverse XR coordinate system
"""
orn = T.quat_multiply(self.og2xr_orn_offset, orn)
return T.pose2mat((pos, orn)).T.astype(np.float64)
def reset(self) -> None:
"""
Reset the teleop policy
"""
super().reset()
self.raw_data = {}
self.teleop_action.is_valid = {"left": False, "right": False, "head": False}
self.teleop_action.reset = {"left": False, "right": False}
self.teleop_action.head = np.zeros(6)
@property
def is_enabled(self) -> bool:
"""
Checks whether the VR system is enabled
Returns:
bool: whether the VR system is enabled
"""
return self.vr_profile.is_enabled()
def start(self) -> None:
"""
Enabling the VR profile
"""
self.vr_profile.request_enable_profile()
og.sim.step()
assert self.vr_profile.is_enabled(), "[VRSys] VR profile not enabled!"
# We want to make sure the hmd is tracking so that the whole system is ready to go
while True:
print("[VRSys] Waiting for VR headset to become active...")
self._update_devices()
if self.hmd is not None:
break
time.sleep(1)
og.sim.step()
def stop(self) -> None:
"""
disable VR profile
"""
self.xr_core.request_disable_profile()
og.sim.step()
assert not self.vr_profile.is_enabled(), "[VRSys] VR profile not disabled!"
def update(self) -> None:
"""
Steps the VR system and update self.teleop_action
"""
# update raw data
self._update_devices()
self._update_device_transforms()
self._update_button_data()
# Update teleop data based on controller input if not using hand tracking
if not self.use_hand_tracking:
self.teleop_action.base = np.zeros(3)
self.teleop_action.torso = 0.0
# update right hand related info
for arm_name, arm in zip(["left", "right"], self.robot_arms):
if arm in self.controllers:
self.teleop_action[arm_name] = np.concatenate((
self.raw_data["transforms"]["controllers"][arm][0],
T.quat2euler(T.quat_multiply(
self.raw_data["transforms"]["controllers"][arm][1],
self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(arm)]]
)),
[self.raw_data["button_data"][arm]["axis"]["trigger"]]
))
self.teleop_action.is_valid[arm_name] = self._is_valid_transform(self.raw_data["transforms"]["controllers"][arm])
else:
self.teleop_action.is_valid[arm_name] = False
# update base and reset info
if "right" in self.controllers:
self.teleop_action.reset["right"] = self.raw_data["button_data"]["right"]["press"]["grip"]
right_axis = self.raw_data["button_data"]["right"]["axis"]
self.teleop_action.base[0] = right_axis["touchpad_y"] * self.movement_speed
self.teleop_action.torso = -right_axis["touchpad_x"] * self.movement_speed
if "left" in self.controllers:
self.teleop_action.reset["left"] = self.raw_data["button_data"]["left"]["press"]["grip"]
left_axis = self.raw_data["button_data"]["left"]["axis"]
self.teleop_action.base[1] = -left_axis["touchpad_x"] * self.movement_speed
self.teleop_action.base[2] = left_axis["touchpad_y"] * self.movement_speed
# update head related info
self.teleop_action.head = np.r_[self.raw_data["transforms"]["head"][0], T.quat2euler(self.raw_data["transforms"]["head"][1])]
self.teleop_action.is_valid["head"] = self._is_valid_transform(self.raw_data["transforms"]["head"])
# Optionally move anchor
if self.enable_touchpad_movement:
# we use x, y from right controller for 2d movement and y from left controller for z movement
self._move_anchor(pos_offset=np.r_[[self.teleop_action.torso], self.teleop_action.base[[0, 2]]])
if self.align_anchor_to_robot_base:
robot_base_pos, robot_base_orn = self.robot.get_position_orientation()
self.vr_profile.set_virtual_world_anchor_transform(self.og2xr(robot_base_pos, robot_base_orn[[0, 2, 1, 3]]))
def teleop_data_to_action(self) -> np.ndarray:
"""
Generate action data from VR input for robot teleoperation
Returns:
np.ndarray: array of action data
"""
# optionally update control marker
if self.show_control_marker:
self.update_control_marker()
return self.robot.teleop_data_to_action(self.teleop_action)
def reset_transform_mapping(self, arm: str = "right") -> None:
"""
Snap device to the robot end effector (ManipulationRobot only)
Args:
arm(str): name of the arm, one of "left" or "right". Default is "right".
"""
robot_base_orn = self.robot.get_orientation()
robot_eef_pos = self.robot.eef_links[self.robot.arm_names[self.robot_arms.index(arm)]].get_position()
target_transform = self.og2xr(pos=robot_eef_pos, orn=robot_base_orn)
self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device(target_transform, self.controllers[arm])
def set_initial_transform(self, pos: Iterable[float], orn: Iterable[float]=[0, 0, 0, 1]) -> None:
"""
Function that sets the initial transform of the VR system (w.r.t.) head
Note that stepping the vr system multiple times is necessary here due to a bug in OVXR plugin
Args:
pos(Iterable[float]): initial position of the vr system
orn(Iterable[float]): initial orientation of the vr system
"""
for _ in range(10):
self.update()
og.sim.step()
self.vr_profile.set_physical_world_to_world_anchor_transform_to_match_xr_device(self.og2xr(pos, orn), self.hmd)
def _move_anchor(
self,
pos_offset: Optional[Iterable[float]] = None,
rot_offset: Optional[Iterable[float]] = None
) -> None:
"""
Updates the anchor of the xr system in the virtual world
Args:
pos_offset (Iterable[float]): the position offset to apply to the anchor *in hmd frame*.
rot_offset (Iterable[float]): the rotation offset to apply to the anchor *in hmd frame*.
"""
if pos_offset is not None:
# note that x is forward, y is down, z is left for ovxr, but x is forward, y is left, z is up for og
pos_offset = np.array([-pos_offset[0], pos_offset[2], -pos_offset[1]]).astype(np.float64)
self.vr_profile.add_move_physical_world_relative_to_device(pos_offset)
if rot_offset is not None:
rot_offset = np.array(rot_offset).astype(np.float64)
self.vr_profile.add_rotate_physical_world_around_device(rot_offset)
def _is_valid_transform(self, transform: Tuple[np.ndarray, np.ndarray]) -> bool:
"""
Determine whether the transform is valid (ovxr plugin will return a zero position and rotation if not valid)
"""
return np.any(np.not_equal(transform[0], np.zeros(3))) \
and np.any(np.not_equal(transform[1], self.og2xr_orn_offset))
def _update_devices(self) -> None:
"""
Update the VR device list
"""
for device in self.vr_profile.get_device_list():
if device.get_class() == self.xr_device_class.xrdisplaydevice:
self.hmd = device
elif device.get_class() == self.xr_device_class.xrcontroller:
# we want the first 2 controllers to be corresponding to the left and right hand
d_idx = device.get_index()
controller_name = ["left", "right"][d_idx] if d_idx < 2 else f"controller_{d_idx+1}"
self.controllers[controller_name] = device
elif device.get_class() == self.xr_device_class.xrtracker:
self.trackers[device.get_index()] = device
def _update_device_transforms(self) -> None:
"""
Get the transform matrix of each VR device *in world frame* and store in self.raw_data
"""
transforms = {}
transforms["head"] = self.xr2og(self.hmd.get_virtual_world_pose())
transforms["controllers"] = {}
transforms["trackers"] = {}
for controller_name in self.controllers:
transforms["controllers"][controller_name] = self.xr2og(
self.controllers[controller_name].get_virtual_world_pose())
for tracker_index in self.trackers:
transforms["trackers"][tracker_index] = self.xr2og(self.trackers[tracker_index].get_virtual_world_pose())
self.raw_data["transforms"] = transforms
def _update_button_data(self):
"""
Get the button data for each controller and store in self.raw_data
Returns:
dict: a dictionary of whether each button is pressed or touched, and the axis state for touchpad and joysticks
"""
button_data = {}
for controller_name in self.controllers:
button_data[controller_name] = {}
button_data[controller_name]["press"] = self.controllers[controller_name].get_button_press_state()
button_data[controller_name]["touch"] = self.controllers[controller_name].get_button_touch_state()
button_data[controller_name]["axis"] = self.controllers[controller_name].get_axis_state()
self.raw_data["button_data"] = button_data
def _update_hand_tracking_data(self, e) -> None:
"""
Get hand tracking data, see https://registry.khronos.org/OpenXR/specs/1.0/html/xrspec.html#convention-of-hand-joints for joint indices
Args:
e (carb.events.IEvent): event that contains hand tracking data as payload
"""
e.consume()
data_dict = e.payload
for hand_name, hand in zip(["left, right"], self.robot_arms):
if data_dict[f"joint_count_{hand}"] != 0:
self.teleop_action.is_valid[hand_name] = True
self.raw_data["hand_data"][hand] = {"pos": [], "orn": []}
# hand_joint_matrices is an array of flattened 4x4 transform matrices for the 26 hand markers
hand_joint_matrices = data_dict[f"joint_matrices_{hand}"]
for i in range(26):
# extract the pose from the flattened transform matrix
pos, orn = self.xr2og(np.reshape(hand_joint_matrices[16 * i: 16 * (i + 1)], (4, 4)))
self.raw_data["hand_data"][hand]["pos"].append(pos)
self.raw_data["hand_data"][hand]["orn"].append(orn)
self.teleop_action[hand_name] = np.r_[
self.raw_data["hand_data"][hand]["pos"][0],
T.quat2euler(T.quat_multiply(
self.raw_data["hand_data"][hand]["orn"][0],
self.robot.teleop_rotation_offset[self.robot.arm_names[self.robot_arms.index(hand)]]
)),
[0]
]
# Get each finger joint's rotation angle from hand tracking data
# joint_angles is a 5 x 3 array of joint rotations (from thumb to pinky, from base to tip)
joint_angles = np.zeros((5, 3))
raw_hand_data = self.raw_data["hand_data"][hand]["pos"]
for i in range(5):
for j in range(3):
# get the 3 related joints indices
prev_joint_idx, cur_joint_idx, next_joint_idx = i * 5 + j + 1, i * 5 + j + 2, i * 5 + j + 3
# get the 3 related joints' positions
prev_joint_pos = raw_hand_data[prev_joint_idx]
cur_joint_pos = raw_hand_data[cur_joint_idx]
next_joint_pos = raw_hand_data[next_joint_idx]
# calculate the angle formed by 3 points
v1 = cur_joint_pos - prev_joint_pos
v2 = next_joint_pos - cur_joint_pos
v1 /= np.linalg.norm(v1)
v2 /= np.linalg.norm(v2)
joint_angles[i, j] = np.arccos(v1 @ v2)
self.teleop_action.hand_data[hand_name] = joint_angles
| 23,087 | Python | 50.079646 | 149 | 0.602157 |
StanfordVL/OmniGibson/omnigibson/utils/render_utils.py | """
Set of rendering utility functions when working with Omni
"""
import numpy as np
import omnigibson as og
from omnigibson.prims import EntityPrim, RigidPrim, VisualGeomPrim
from omnigibson.utils.physx_utils import bind_material
import omnigibson.utils.transform_utils as T
import omnigibson.lazy as lazy
def make_glass(prim):
"""
Links the OmniGlass material with EntityPrim, RigidPrim, or VisualGeomPrim @obj, and procedurally generates
the necessary OmniGlass material prim if necessary.
Args:
prim (EntityPrim or RigidPrim or VisualGeomPrim): Desired prim to convert into glass
"""
# Generate the set of visual meshes we'll convert into glass
if isinstance(prim, EntityPrim):
# Grab all visual meshes from all links
visual_meshes = [vm for link in prim.links.values() for vm in link.visual_meshes.values()]
elif isinstance(prim, RigidPrim):
# Grab all visual meshes from the link
visual_meshes = [vm for vm in prim.visual_meshes.values()]
elif isinstance(prim, VisualGeomPrim):
# Just use this visual mesh
visual_meshes = [prim]
else:
raise ValueError(f"Inputted prim must an instance of EntityPrim, RigidPrim, or VisualGeomPrim "
f"in order to be converted into glass!")
# Grab the glass material prim; if it doesn't exist, we create it on the fly
glass_prim_path = "/Looks/OmniGlass"
if not lazy.omni.isaac.core.utils.prims.get_prim_at_path(glass_prim_path):
mtl_created = []
lazy.omni.kit.commands.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniGlass.mdl",
mtl_name="OmniGlass",
mtl_created_list=mtl_created,
)
# Iterate over all meshes and bind the glass material to the mesh
for vm in visual_meshes:
bind_material(vm.prim_path, material_path=glass_prim_path)
def create_pbr_material(prim_path):
"""
Creates an omni pbr material prim at the specified @prim_path
Args:
prim_path (str): Prim path where the PBR material should be generated
Returns:
Usd.Prim: Generated PBR material prim
"""
# Use DeepWater omni present for rendering water
mtl_created = []
lazy.omni.kit.commands.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniPBR.mdl",
mtl_name="OmniPBR",
mtl_created_list=mtl_created,
)
material_path = mtl_created[0]
# Move prim to desired location
lazy.omni.kit.commands.execute("MovePrim", path_from=material_path, path_to=prim_path)
# Return generated material
return lazy.omni.isaac.core.utils.prims.get_prim_at_path(material_path)
def create_skylight(intensity=500, color=(1.0, 1.0, 1.0)):
"""
Creates a skylight object with the requested @color
Args:
intensity (float): Intensity of the generated skylight
color (3-array): Desired (R,G,B) color to assign to the skylight
Returns:
LightObject: Generated skylight object
"""
# Avoid circular imports
from omnigibson.objects.light_object import LightObject
light = LightObject(prim_path="/World/skylight", name="skylight", light_type="Dome", intensity=intensity)
og.sim.import_object(light)
light.set_orientation(T.euler2quat([0, 0, -np.pi / 4]))
light_prim = light.light_link.prim
light_prim.GetAttribute("color").Set(lazy.pxr.Gf.Vec3f(*color))
return light
| 3,476 | Python | 35.6 | 111 | 0.682969 |
StanfordVL/OmniGibson/omnigibson/utils/control_utils.py | """
Set of utilities for helping to execute robot control
"""
import omnigibson.lazy as lazy
import numpy as np
from numba import jit
import omnigibson.utils.transform_utils as T
class FKSolver:
"""
Class for thinly wrapping Lula Forward Kinematics solver
"""
def __init__(
self,
robot_description_path,
robot_urdf_path,
):
# Create robot description and kinematics
self.robot_description = lazy.lula.load_robot(robot_description_path, robot_urdf_path)
self.kinematics = self.robot_description.kinematics()
def get_link_poses(
self,
joint_positions,
link_names,
):
"""
Given @joint_positions, get poses of the desired links (specified by @link_names)
Args:
joint positions (n-array): Joint positions in configuration space
link_names (list): List of robot link names we want to specify (e.g. "gripper_link")
Returns:
link_poses (dict): Dictionary mapping each robot link name to its pose
"""
# TODO: Refactor this to go over all links at once
link_poses = {}
for link_name in link_names:
pose3_lula = self.kinematics.pose(joint_positions, link_name)
# get position
link_position = pose3_lula.translation
# get orientation
rotation_lula = pose3_lula.rotation
link_orientation = (
rotation_lula.x(),
rotation_lula.y(),
rotation_lula.z(),
rotation_lula.w(),
)
link_poses[link_name] = (link_position, link_orientation)
return link_poses
class IKSolver:
"""
Class for thinly wrapping Lula IK solver
"""
def __init__(
self,
robot_description_path,
robot_urdf_path,
eef_name,
reset_joint_pos,
):
# Create robot description, kinematics, and config
self.robot_description = lazy.lula.load_robot(robot_description_path, robot_urdf_path)
self.kinematics = self.robot_description.kinematics()
self.config = lazy.lula.CyclicCoordDescentIkConfig()
self.eef_name = eef_name
self.reset_joint_pos = reset_joint_pos
def solve(
self,
target_pos,
target_quat=None,
tolerance_pos=0.002,
tolerance_quat=0.01,
weight_pos=1.0,
weight_quat=0.05,
max_iterations=150,
initial_joint_pos=None,
):
"""
Backs out joint positions to achieve desired @target_pos and @target_quat
Args:
target_pos (3-array): desired (x,y,z) local target cartesian position in robot's base coordinate frame
target_quat (4-array or None): If specified, desired (x,y,z,w) local target quaternion orientation in
robot's base coordinate frame. If None, IK will be position-only (will override settings such that
orientation's tolerance is very high and weight is 0)
tolerance_pos (float): Maximum position error (L2-norm) for a successful IK solution
tolerance_quat (float): Maximum orientation error (per-axis L2-norm) for a successful IK solution
weight_pos (float): Weight for the relative importance of position error during CCD
weight_quat (float): Weight for the relative importance of position error during CCD
max_iterations (int): Number of iterations used for each cyclic coordinate descent.
initial_joint_pos (None or n-array): If specified, will set the initial cspace seed when solving for joint
positions. Otherwise, will use self.reset_joint_pos
Returns:
None or n-array: Joint positions for reaching desired target_pos and target_quat, otherwise None if no
solution was found
"""
pos = np.array(target_pos, dtype=np.float64).reshape(3, 1)
rot = np.array(T.quat2mat(np.array([0, 0, 0, 1.0]) if target_quat is None else target_quat), dtype=np.float64)
ik_target_pose = lazy.lula.Pose3(lazy.lula.Rotation3(rot), pos)
# Set the cspace seed and tolerance
initial_joint_pos = self.reset_joint_pos if initial_joint_pos is None else np.array(initial_joint_pos)
self.config.cspace_seeds = [initial_joint_pos]
self.config.position_tolerance = tolerance_pos
self.config.orientation_tolerance = 100.0 if target_quat is None else tolerance_quat
self.config.ccd_position_weight = weight_pos
self.config.ccd_orientation_weight = 0.0 if target_quat is None else weight_quat
self.config.max_num_descents = max_iterations
# Compute target joint positions
ik_results = lazy.lula.compute_ik_ccd(self.kinematics, ik_target_pose, self.eef_name, self.config)
if ik_results.success:
return np.array(ik_results.cspace_position)
else:
return None
@jit(nopython=True)
def orientation_error(desired, current):
"""
This function calculates a 3-dimensional orientation error vector for use in the
impedance controller. It does this by computing the delta rotation between the
inputs and converting that rotation to exponential coordinates (axis-angle
representation, where the 3d vector is axis * angle).
See https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation for more information.
Optimized function to determine orientation error from matrices
Args:
desired (tensor): (..., 3, 3) where final two dims are 2d array representing target orientation matrix
current (tensor): (..., 3, 3) where final two dims are 2d array representing current orientation matrix
Returns:
tensor: (..., 3) where final dim is (ax, ay, az) axis-angle representing orientation error
"""
# convert input shapes
input_shape = desired.shape[:-2]
desired = desired.reshape(-1, 3, 3)
current = current.reshape(-1, 3, 3)
# grab relevant info
rc1 = current[:, :, 0]
rc2 = current[:, :, 1]
rc3 = current[:, :, 2]
rd1 = desired[:, :, 0]
rd2 = desired[:, :, 1]
rd3 = desired[:, :, 2]
error = 0.5 * (np.cross(rc1, rd1) + np.cross(rc2, rd2) + np.cross(rc3, rd3))
# Reshape
error = error.reshape(*input_shape, 3)
return error
| 6,400 | Python | 37.793939 | 118 | 0.634844 |
StanfordVL/OmniGibson/omnigibson/utils/ui_utils.py | """
Helper classes and functions for streamlining user interactions
"""
import contextlib
import logging
import numpy as np
import sys
import datetime
from pathlib import Path
from PIL import Image
from termcolor import colored
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.utils.transform_utils as T
import omnigibson.lazy as lazy
from scipy.spatial.transform import Rotation as R
from scipy.interpolate import CubicSpline
from scipy.integrate import quad
import imageio
from IPython import embed
def print_icon():
raw_texts = [
# Lgrey, grey, lgrey, grey, red, lgrey, red
(" ___________", "", "", "", "", "", "_"),
(" / ", "", "", "", "", "", "/ \\"),
(" / ", "", "", "", "/ /", "__", ""),
(" / ", "", "", "", "", "", "/ / /\\"),
(" /", "__________", "", "", "/ /", "__", "/ \\"),
(" ", "\\ _____ ", "", "", "\\ \\", "__", "\\ /"),
(" ", "\\ \\ ", "/ ", "\\ ", "", "", "\\ \\_/ /"),
(" ", "\\ \\", "/", "___\\ ", "", "", "\\ /"),
(" ", "\\__________", "", "", "", "", "\\_/ "),
]
for (lgrey_text0, grey_text0, lgrey_text1, grey_text1, red_text0, lgrey_text2, red_text1) in raw_texts:
lgrey_text0 = colored(lgrey_text0, "light_grey", attrs=["bold"])
grey_text0 = colored(grey_text0, "light_grey", attrs=["bold", "dark"])
lgrey_text1 = colored(lgrey_text1, "light_grey", attrs=["bold"])
grey_text1 = colored(grey_text1, "light_grey", attrs=["bold", "dark"])
red_text0 = colored(red_text0, "light_red", attrs=["bold"])
lgrey_text2 = colored(lgrey_text2, "light_grey", attrs=["bold"])
red_text1 = colored(red_text1, "light_red", attrs=["bold"])
print(lgrey_text0 + grey_text0 + lgrey_text1 + grey_text1 + red_text0 + lgrey_text2 + red_text1)
def print_logo():
raw_texts = [
(" ___ _", " ____ _ _ "),
(" / _ \ _ __ ___ _ __ (_)", "/ ___(_) |__ ___ ___ _ __ "),
(" | | | | '_ ` _ \| '_ \| |", " | _| | '_ \/ __|/ _ \| '_ \ "),
(" | |_| | | | | | | | | | |", " |_| | | |_) \__ \ (_) | | | |"),
(" \___/|_| |_| |_|_| |_|_|", "\____|_|_.__/|___/\___/|_| |_|"),
]
for (grey_text, red_text) in raw_texts:
grey_text = colored(grey_text, "light_grey", attrs=["bold", "dark"])
red_text = colored(red_text, "light_red", attrs=["bold"])
print(grey_text + red_text)
def logo_small():
grey_text = colored("Omni", "light_grey", attrs=["bold", "dark"])
red_text = colored("Gibson", "light_red", attrs=["bold"])
return grey_text + red_text
def dock_window(space, name, location, ratio=0.5):
"""
Method for docking a specific GUI window in a specified location within the workspace
Args:
space (WindowHandle): Handle to the docking space to dock the window specified by @name
name (str): Name of a window to dock
location (omni.ui.DockPosition): docking position for placing the window specified by @name
ratio (float): Ratio when splitting the docking space between the pre-existing and newly added window
Returns:
WindowHandle: Handle to the docking space that the window specified by @name was placed in
"""
window = lazy.omni.ui.Workspace.get_window(name)
if window and space:
window.dock_in(space, location, ratio=ratio)
return window
class KeyboardEventHandler:
"""
Simple singleton class for handing keyboard events
"""
# Global keyboard callbacks
KEYBOARD_CALLBACKS = dict()
# ID assigned to meta callback method for this class
_CALLBACK_ID = None
def __init__(self):
raise ValueError("Cannot create an instance of keyboard event handler!")
@classmethod
def initialize(cls):
"""
Hook up a meta function callback to the omni backend
"""
appwindow = lazy.omni.appwindow.get_default_app_window()
input_interface = lazy.carb.input.acquire_input_interface()
keyboard = appwindow.get_keyboard()
cls._CALLBACK_ID = input_interface.subscribe_to_keyboard_events(keyboard, cls._meta_callback)
@classmethod
def reset(cls):
"""
Resets this callback interface by removing all current callback functions
"""
appwindow = lazy.omni.appwindow.get_default_app_window()
input_interface = lazy.carb.input.acquire_input_interface()
keyboard = appwindow.get_keyboard()
input_interface.unsubscribe_to_keyboard_events(keyboard, cls._CALLBACK_ID)
cls.KEYBOARD_CALLBACKS = dict()
cls._CALLBACK_ID = None
@classmethod
def add_keyboard_callback(cls, key, callback_fn):
"""
Registers a keyboard callback function with omni, mapping a keypress from @key to run the callback_function
@callback_fn
Args:
key (carb.input.KeyboardInput): key to associate with the callback
callback_fn (function): Callback function to call if the key @key is pressed or repeated. Note that this
function's signature should be:
callback_fn() --> None
"""
# Initialize the interface if not initialized yet
if cls._CALLBACK_ID is None:
cls.initialize()
# Add the callback
cls.KEYBOARD_CALLBACKS[key] = callback_fn
@classmethod
def _meta_callback(cls, event, *args, **kwargs):
"""
Meta callback function that is hooked up to omni's backend
"""
# Check if we've received a key press or repeat
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \
or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT:
# Run the specific callback
cls.KEYBOARD_CALLBACKS.get(event.input, lambda: None)()
# Always return True
return True
@contextlib.contextmanager
def suppress_omni_log(channels):
"""
A context scope for temporarily suppressing logging for certain omni channels.
Args:
channels (None or list of str): Logging channel(s) to suppress. If None, will globally disable logger
"""
# Record the state to restore to after the context exists
log = lazy.omni.log.get_log()
if gm.DEBUG:
# Do nothing
pass
elif channels is None:
# Globally disable log
log.enabled = False
else:
# For some reason, all enabled states always return False even if the logging is clearly enabled for the
# given channel, so we assume all channels are enabled
# We do, however, check what behavior was assigned to this channel, since we force an override during this context
channel_behavior = {channel: log.get_channel_enabled(channel)[2] for channel in channels}
# Suppress the channels
for channel in channels:
log.set_channel_enabled(channel, False, lazy.omni.log.SettingBehavior.OVERRIDE)
yield
if gm.DEBUG:
# Do nothing
pass
elif channels is None:
# Globally re-enable log
log.enabled = True
else:
# Unsuppress the channels
for channel in channels:
log.set_channel_enabled(channel, True, channel_behavior[channel])
@contextlib.contextmanager
def suppress_loggers(logger_names):
"""
A context scope for temporarily suppressing logging for certain omni channels.
Args:
logger_names (list of str): Logger name(s) whose corresponding loggers should be suppressed
"""
if not gm.DEBUG:
# Store prior states so we can restore them after this context exits
logger_levels = {name: logging.getLogger(name).getEffectiveLevel() for name in logger_names}
# Suppress the loggers (only output fatal messages)
for name in logger_names:
logging.getLogger(name).setLevel(logging.FATAL)
yield
if not gm.DEBUG:
# Unsuppress the loggers
for name in logger_names:
logging.getLogger(name).setLevel(logger_levels[name])
def create_module_logger(module_name):
"""
Creates and returns a logger for logging statements from the module represented by @module_name
Args:
module_name (str): Module to create the logger for. Should be the module's `__name__` variable
Returns:
Logger: Created logger for the module
"""
return logging.getLogger(module_name)
def disclaimer(msg):
"""
Prints a disclaimer message, i.e.: "We know this doesn't work; it's an omni issue; we expect it to be fixed in the
next release!
"""
if gm.SHOW_DISCLAIMERS:
print("****** DISCLAIMER ******")
print("Isaac Sim / Omniverse has some significant limitations and bugs in its current release.")
print("This message has popped up because a potential feature in OmniGibson relies upon a feature in Omniverse that "
"is yet to be released publically. Currently, the expected behavior may not be fully functional, but "
"should be resolved by the next Isaac Sim release.")
print(f"Exact Limitation: {msg}")
print("************************")
def debug_breakpoint(msg):
og.log.error(msg)
embed()
def choose_from_options(options, name, random_selection=False):
"""
Prints out options from a list, and returns the requested option.
Args:
options (dict or list): options to choose from. If dict, the value entries are assumed to be docstrings
explaining the individual options
name (str): name of the options
random_selection (bool): if the selection is random (for automatic demo execution). Default False
Returns:
str: Requested option
"""
# Select robot
print("\nHere is a list of available {}s:\n".format(name))
for k, option in enumerate(options):
docstring = ": {}".format(options[option]) if isinstance(options, dict) else ""
print("[{}] {}{}".format(k + 1, option, docstring))
print()
if not random_selection:
try:
s = input("Choose a {} (enter a number from 1 to {}): ".format(name, len(options)))
# parse input into a number within range
k = min(max(int(s), 1), len(options)) - 1
except ValueError:
k = 0
print("Input is not valid. Use {} by default.".format(list(options)[k]))
else:
k = np.random.choice(range(len(options)))
# Return requested option
return list(options)[k]
class CameraMover:
"""
A helper class for manipulating a camera via the keyboard. Utilizes carb keyboard callbacks to move
the camera around.
Args:
cam (VisionSensor): The camera vision sensor to manipulate via the keyboard
delta (float): Change (m) per keypress when moving the camera
save_dir (str): Absolute path to where recorded images should be stored. Default is <OMNIGIBSON_PATH>/imgs
"""
def __init__(self, cam, delta=0.25, save_dir=None):
if save_dir is None:
save_dir = f"{og.root_path}/../images"
self.cam = cam
self.delta = delta
self.light_val = gm.FORCE_LIGHT_INTENSITY
self.save_dir = save_dir
self._appwindow = lazy.omni.appwindow.get_default_app_window()
self._input = lazy.carb.input.acquire_input_interface()
self._keyboard = self._appwindow.get_keyboard()
self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._sub_keyboard_event)
def clear(self):
"""
Clears this camera mover. After this is called, the camera mover cannot be used.
"""
self._input.unsubscribe_to_keyboard_events(self._keyboard, self._sub_keyboard)
def set_save_dir(self, save_dir):
"""
Sets the absolute path corresponding to the image directory where recorded images from this CameraMover
should be saved
Args:
save_dir (str): Absolute path to where recorded images should be stored
"""
self.save_dir = save_dir
def change_light(self, delta):
self.light_val += delta
self.set_lights(self.light_val)
def set_lights(self, intensity):
world = lazy.omni.isaac.core.utils.prims.get_prim_at_path("/World")
for prim in world.GetChildren():
for prim_child in prim.GetChildren():
for prim_child_child in prim_child.GetChildren():
if "Light" in prim_child_child.GetPrimTypeInfo().GetTypeName():
prim_child_child.GetAttribute("intensity").Set(intensity)
def print_info(self):
"""
Prints keyboard command info out to the user
"""
print("*" * 40)
print("CameraMover! Commands:")
print()
print(f"\t Right Click + Drag: Rotate camera")
print(f"\t W / S : Move camera forward / backward")
print(f"\t A / D : Move camera left / right")
print(f"\t T / G : Move camera up / down")
print(f"\t 9 / 0 : Increase / decrease the lights")
print(f"\t P : Print current camera pose")
print(f"\t O: Save the current camera view as an image")
def print_cam_pose(self):
"""
Prints out the camera pose as (position, quaternion) in the world frame
"""
print(f"cam pose: {self.cam.get_position_orientation()}")
def get_image(self):
"""
Helper function for quickly grabbing the currently viewed RGB image
Returns:
np.array: (H, W, 3) sized RGB image array
"""
return self.cam.get_obs()[0]["rgb"][:, :, :-1]
def record_image(self, fpath=None):
"""
Saves the currently viewed image and writes it to disk
Args:
fpath (None or str): If specified, the absolute fpath to the image save location. Default is located in
self.save_dir
"""
og.log.info("Recording image...")
# Use default fpath if not specified
if fpath is None:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
fpath = f"{self.save_dir}/og_{timestamp}.png"
# Make sure save path directory exists, and then save the image to that location
Path(Path(fpath).parent).mkdir(parents=True, exist_ok=True)
Image.fromarray(self.get_image()).save(fpath)
og.log.info(f"Saved current viewer camera image to {fpath}.")
def record_trajectory(self, poses, fps, steps_per_frame=1, fpath=None):
"""
Moves the viewer camera through the poses specified by @poses and records the resulting trajectory to an mp4
video file on disk.
Args:
poses (list of 2-tuple): List of global (position, quaternion) values to set the viewer camera to defining
this trajectory
fps (int): Frames per second when recording this video
steps_per_frame (int): How many sim steps should occur between each frame being recorded. Minimum and
default is 1.
fpath (None or str): If specified, the absolute fpath to the video save location. Default is located in
self.save_dir
"""
og.log.info("Recording trajectory...")
# Use default fpath if not specified
if fpath is None:
timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
fpath = f"{self.save_dir}/og_{timestamp}.mp4"
# Make sure save path directory exists, and then create the video writer
Path(Path(fpath).parent).mkdir(parents=True, exist_ok=True)
video_writer = imageio.get_writer(fpath, fps=fps)
# Iterate through all desired poses, and record the trajectory
for i, (pos, quat) in enumerate(poses):
self.cam.set_position_orientation(position=pos, orientation=quat)
og.sim.step()
if i % steps_per_frame == 0:
video_writer.append_data(self.get_image())
# Close writer
video_writer.close()
og.log.info(f"Saved camera trajectory video to {fpath}.")
def record_trajectory_from_waypoints(self, waypoints, per_step_distance, fps, steps_per_frame=1, fpath=None):
"""
Moves the viewer camera through the waypoints specified by @waypoints and records the resulting trajectory to
an mp4 video file on disk.
Args:
waypoints (np.array): (n, 3) global position waypoint values to set the viewer camera to defining this trajectory
per_step_distance (float): How much distance (in m) should be approximately covered per trajectory step.
This will determine the path length between individual waypoints
fps (int): Frames per second when recording this video
steps_per_frame (int): How many sim steps should occur between each frame being recorded. Minimum and
default is 1.
fpath (None or str): If specified, the absolute fpath to the video save location. Default is located in
self.save_dir
"""
# Create splines and their derivatives
n_waypoints = len(waypoints)
if n_waypoints < 3:
og.log.error("Cannot generate trajectory from waypoints with less than 3 waypoints!")
return
splines = [CubicSpline(range(n_waypoints), waypoints[:, i], bc_type='clamped') for i in range(3)]
dsplines = [spline.derivative() for spline in splines]
# Function help get arc derivative
def arc_derivative(u):
return np.sqrt(np.sum([dspline(u) ** 2 for dspline in dsplines]))
# Function to help get interpolated positions
def get_interpolated_positions(step):
assert step < n_waypoints - 1
dist = quad(func=arc_derivative, a=step, b=step + 1)[0]
path_length = int(dist / per_step_distance)
interpolated_points = np.zeros((path_length, 3))
for i in range(path_length):
curr_step = step + (i / path_length)
interpolated_points[i, :] = np.array([spline(curr_step) for spline in splines])
return interpolated_points
# Iterate over all waypoints and infer the resulting trajectory, recording the resulting poses
poses = []
for i in range(n_waypoints - 1):
positions = get_interpolated_positions(step=i)
for j in range(len(positions) - 1):
# Get direction vector from the current to the following point
direction = positions[j + 1] - positions[j]
direction = direction / np.linalg.norm(direction)
# Infer tilt and pan angles from this direction
xy_direction = direction[:2] / np.linalg.norm(direction[:2])
z = direction[2]
pan_angle = np.arctan2(-xy_direction[0], xy_direction[1])
tilt_angle = np.arcsin(z)
# Infer global quat orientation from these angles
quat = T.euler2quat([np.pi / 2 - tilt_angle, 0.0, pan_angle])
poses.append([positions[j], quat])
# Record the generated trajectory
self.record_trajectory(poses=poses, fps=fps, steps_per_frame=steps_per_frame, fpath=fpath)
def set_delta(self, delta):
"""
Sets the delta value (how much the camera moves with each keypress) for this CameraMover
Args:
delta (float): Change (m) per keypress when moving the camera
"""
self.delta = delta
def set_cam(self, cam):
"""
Sets the active camera sensor for this CameraMover
Args:
cam (VisionSensor): The camera vision sensor to manipulate via the keyboard
"""
self.cam = cam
@property
def input_to_function(self):
"""
Returns:
dict: Mapping from relevant keypresses to corresponding function call to use
"""
return {
lazy.carb.input.KeyboardInput.O: lambda: self.record_image(fpath=None),
lazy.carb.input.KeyboardInput.P: lambda: self.print_cam_pose(),
lazy.carb.input.KeyboardInput.KEY_9: lambda: self.change_light(delta=-2e4),
lazy.carb.input.KeyboardInput.KEY_0: lambda: self.change_light(delta=2e4),
}
@property
def input_to_command(self):
"""
Returns:
dict: Mapping from relevant keypresses to corresponding delta command to apply to the camera pose
"""
return {
lazy.carb.input.KeyboardInput.D: np.array([self.delta, 0, 0]),
lazy.carb.input.KeyboardInput.A: np.array([-self.delta, 0, 0]),
lazy.carb.input.KeyboardInput.W: np.array([0, 0, -self.delta]),
lazy.carb.input.KeyboardInput.S: np.array([0, 0, self.delta]),
lazy.carb.input.KeyboardInput.T: np.array([0, self.delta, 0]),
lazy.carb.input.KeyboardInput.G: np.array([0, -self.delta, 0]),
}
def _sub_keyboard_event(self, event, *args, **kwargs):
"""
Handle keyboard events. Note: The signature is pulled directly from omni.
Args:
event (int): keyboard event type
"""
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \
or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT:
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS and event.input in self.input_to_function:
self.input_to_function[event.input]()
else:
command = self.input_to_command.get(event.input, None)
if command is not None:
# Convert to world frame to move the camera
transform = T.quat2mat(self.cam.get_orientation())
delta_pos_global = transform @ command
self.cam.set_position(self.cam.get_position() + delta_pos_global)
return True
class KeyboardRobotController:
"""
Simple class for controlling OmniGibson robots using keyboard commands
"""
def __init__(self, robot):
"""
Args:
robot (BaseRobot): robot to control
"""
# Store relevant info from robot
self.robot = robot
self.action_dim = robot.action_dim
self.controller_info = dict()
self.joint_idx_to_controller = dict()
idx = 0
for name, controller in robot._controllers.items():
self.controller_info[name] = {
"name": type(controller).__name__,
"start_idx": idx,
"dofs": controller.dof_idx,
"command_dim": controller.command_dim,
}
idx += controller.command_dim
for i in controller.dof_idx:
self.joint_idx_to_controller[i] = controller
# Other persistent variables we need to keep track of
self.joint_names = [name for name in robot.joints.keys()] # Ordered list of joint names belonging to the robot
self.joint_types = [joint.joint_type for joint in robot.joints.values()] # Ordered list of joint types
self.joint_command_idx = None # Indices of joints being directly controlled in the action array
self.joint_control_idx = None # Indices of joints being directly controlled in the actual joint array
self.active_joint_command_idx_idx = 0 # Which index within the joint_command_idx variable is being controlled by the user
self.current_joint = -1 # Active joint being controlled for joint control
self.ik_arms = [] # List of arm controller names to be controlled by IK
self.active_arm_idx = 0 # Which index within self.ik_arms is actively being controlled (only relevant for IK)
self.binary_grippers = [] # Grippers being controlled using multi-finger binary controller
self.active_gripper_idx = 0 # Which index within self.binary_grippers is actively being controlled
self.gripper_direction = None # Flips between -1 and 1, per arm controlled by multi-finger binary control
self.persistent_gripper_action = None # Persistent gripper commands, per arm controlled by multi-finger binary control
# i.e.: if using binary gripper control and when no keypress is active, the gripper action should still the last executed gripper action
self.keypress_mapping = None # Maps omni keybindings to information for controlling various parts of the robot
self.current_keypress = None # Current key that is being pressed
self.active_action = None # Current action information based on the current keypress
self.toggling_gripper = False # Whether we should toggle the gripper during the next action
self.custom_keymapping = None # Dictionary mapping custom keys to custom callback functions / info
# Populate the keypress mapping dictionary
self.populate_keypress_mapping()
# Register the keyboard callback function
self.register_keyboard_handler()
def register_keyboard_handler(self):
"""
Sets up the keyboard callback functionality with omniverse
"""
appwindow = lazy.omni.appwindow.get_default_app_window()
input_interface = lazy.carb.input.acquire_input_interface()
keyboard = appwindow.get_keyboard()
sub_keyboard = input_interface.subscribe_to_keyboard_events(keyboard, self.keyboard_event_handler)
def register_custom_keymapping(self, key, description, callback_fn):
"""
Register a custom keymapping with corresponding callback function for this keyboard controller.
Note that this will automatically override any pre-existing callback that existed for that key.
Args:
key (carb.input.KeyboardInput): Key to map to callback function
description (str): Description for the callback function
callback_fn (function): Callback function, should have signature:
callback_fn() -> None
"""
self.custom_keymapping[key] = {"description": description, "callback": callback_fn}
def generate_ik_keypress_mapping(self, controller_info):
"""
Generates a dictionary for keypress mappings for IK control, based on the inputted @controller_info
Args:
controller_info (dict): Dictionary of controller information for the specific robot arm to control
with IK
Returns:
dict: Populated keypress mappings for IK to control the specified controller
"""
mapping = {}
mapping[lazy.carb.input.KeyboardInput.UP] = {"idx": controller_info["start_idx"] + 0, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.DOWN] = {"idx": controller_info["start_idx"] + 0, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.RIGHT] = {"idx": controller_info["start_idx"] + 1, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.LEFT] = {"idx": controller_info["start_idx"] + 1, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.P] = {"idx": controller_info["start_idx"] + 2, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.SEMICOLON] = {"idx": controller_info["start_idx"] + 2, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.N] = {"idx": controller_info["start_idx"] + 3, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.B] = {"idx": controller_info["start_idx"] + 3, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.O] = {"idx": controller_info["start_idx"] + 4, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.U] = {"idx": controller_info["start_idx"] + 4, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.V] = {"idx": controller_info["start_idx"] + 5, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.C] = {"idx": controller_info["start_idx"] + 5, "val": -0.5}
return mapping
def generate_osc_keypress_mapping(self, controller_info):
"""
Generates a dictionary for keypress mappings for OSC control, based on the inputted @controller_info
Args:
controller_info (dict): Dictionary of controller information for the specific robot arm to control
with OSC
Returns:
dict: Populated keypress mappings for IK to control the specified controller
"""
mapping = {}
mapping[lazy.carb.input.KeyboardInput.UP] = {"idx": controller_info["start_idx"] + 0, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.DOWN] = {"idx": controller_info["start_idx"] + 0, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.RIGHT] = {"idx": controller_info["start_idx"] + 1, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.LEFT] = {"idx": controller_info["start_idx"] + 1, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.P] = {"idx": controller_info["start_idx"] + 2, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.SEMICOLON] = {"idx": controller_info["start_idx"] + 2, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.N] = {"idx": controller_info["start_idx"] + 3, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.B] = {"idx": controller_info["start_idx"] + 3, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.O] = {"idx": controller_info["start_idx"] + 4, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.U] = {"idx": controller_info["start_idx"] + 4, "val": -0.5}
mapping[lazy.carb.input.KeyboardInput.V] = {"idx": controller_info["start_idx"] + 5, "val": 0.5}
mapping[lazy.carb.input.KeyboardInput.C] = {"idx": controller_info["start_idx"] + 5, "val": -0.5}
return mapping
def populate_keypress_mapping(self):
"""
Populates the mapping @self.keypress_mapping, which maps keypresses to action info:
keypress:
idx: <int>
val: <float>
"""
self.keypress_mapping = {}
self.joint_command_idx = []
self.joint_control_idx = []
self.gripper_direction = {}
self.persistent_gripper_action = {}
self.custom_keymapping = {}
# Add mapping for joint control directions (no index because these are inferred at runtime)
self.keypress_mapping[lazy.carb.input.KeyboardInput.RIGHT_BRACKET] = {"idx": None, "val": 0.1}
self.keypress_mapping[lazy.carb.input.KeyboardInput.LEFT_BRACKET] = {"idx": None, "val": -0.1}
# Iterate over all controller info and populate mapping
for component, info in self.controller_info.items():
if info["name"] == "JointController":
for i in range(info["command_dim"]):
cmd_idx = info["start_idx"] + i
self.joint_command_idx.append(cmd_idx)
self.joint_control_idx += info["dofs"].tolist()
elif info["name"] == "DifferentialDriveController":
self.keypress_mapping[lazy.carb.input.KeyboardInput.I] = {"idx": info["start_idx"] + 0, "val": 0.4}
self.keypress_mapping[lazy.carb.input.KeyboardInput.K] = {"idx": info["start_idx"] + 0, "val": -0.4}
self.keypress_mapping[lazy.carb.input.KeyboardInput.L] = {"idx": info["start_idx"] + 1, "val": -0.2}
self.keypress_mapping[lazy.carb.input.KeyboardInput.J] = {"idx": info["start_idx"] + 1, "val": 0.2}
elif info["name"] == "InverseKinematicsController":
self.ik_arms.append(component)
self.keypress_mapping.update(self.generate_ik_keypress_mapping(controller_info=info))
elif info["name"] == "OperationalSpaceController":
self.ik_arms.append(component)
self.keypress_mapping.update(self.generate_osc_keypress_mapping(controller_info=info))
elif info["name"] == "MultiFingerGripperController":
if info["command_dim"] > 1:
for i in range(info["command_dim"]):
cmd_idx = info["start_idx"] + i
self.joint_command_idx.append(cmd_idx)
self.joint_control_idx += info["dofs"].tolist()
else:
self.keypress_mapping[lazy.carb.input.KeyboardInput.T] = {"idx": info["start_idx"], "val": 1.0}
self.gripper_direction[component] = 1.0
self.persistent_gripper_action[component] = 1.0
self.binary_grippers.append(component)
elif info["name"] == "NullJointController":
# We won't send actions if using a null gripper controller
self.keypress_mapping[lazy.carb.input.KeyboardInput.T] = {"idx": None, "val": None}
else:
raise ValueError("Unknown controller name received: {}".format(info["name"]))
def keyboard_event_handler(self, event, *args, **kwargs):
# Check if we've received a key press or repeat
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \
or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT:
# Handle special cases
if event.input in {lazy.carb.input.KeyboardInput.KEY_1, lazy.carb.input.KeyboardInput.KEY_2} and len(self.joint_control_idx) > 1:
# Update joint and print out new joint being controlled
self.active_joint_command_idx_idx = max(0, self.active_joint_command_idx_idx - 1) \
if event.input == lazy.carb.input.KeyboardInput.KEY_1 \
else min(len(self.joint_control_idx) - 1, self.active_joint_command_idx_idx + 1)
print(f"Now controlling joint {self.joint_names[self.joint_control_idx[self.active_joint_command_idx_idx]]}")
elif event.input in {lazy.carb.input.KeyboardInput.KEY_3, lazy.carb.input.KeyboardInput.KEY_4} and len(self.ik_arms) > 1:
# Update arm, update keypress mapping, and print out new arm being controlled
self.active_arm_idx = max(0, self.active_arm_idx - 1) \
if event.input == lazy.carb.input.KeyboardInput.KEY_3 \
else min(len(self.ik_arms) - 1, self.active_arm_idx + 1)
new_arm = self.ik_arms[self.active_arm_idx]
self.keypress_mapping.update(self.generate_ik_keypress_mapping(self.controller_info[new_arm]))
print(f"Now controlling arm {new_arm} with IK")
elif event.input in {lazy.carb.input.KeyboardInput.KEY_5, lazy.carb.input.KeyboardInput.KEY_6} and len(self.binary_grippers) > 1:
# Update gripper, update keypress mapping, and print out new gripper being controlled
self.active_gripper_idx = max(0, self.active_gripper_idx - 1) \
if event.input == lazy.carb.input.KeyboardInput.KEY_5 \
else min(len(self.binary_grippers) - 1, self.active_gripper_idx + 1)
print(f"Now controlling gripper {self.binary_grippers[self.active_gripper_idx]} with binary toggling")
elif event.input == lazy.carb.input.KeyboardInput.M:
# Render the sensor modalities from the robot's camera and lidar
self.robot.visualize_sensors()
elif event.input in self.custom_keymapping:
# Run custom press
self.custom_keymapping[event.input]["callback"]()
elif event.input == lazy.carb.input.KeyboardInput.ESCAPE:
# Terminate immediately
og.shutdown()
else:
# Handle all other actions and update accordingly
self.active_action = self.keypress_mapping.get(event.input, None)
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS:
# Store the current keypress
self.current_keypress = event.input
# Also store whether we pressed the key for toggling gripper actions
if event.input == lazy.carb.input.KeyboardInput.T:
self.toggling_gripper = True
# If we release a key, clear the active action and keypress
elif event.type == lazy.carb.input.KeyboardEventType.KEY_RELEASE:
self.active_action = None
self.current_keypress = None
# Callback always needs to return True
return True
def get_random_action(self):
"""
Returns:
n-array: Generated random action vector (normalized)
"""
return np.random.uniform(-1, 1, self.action_dim)
def get_teleop_action(self):
"""
Returns:
n-array: Generated action vector based on received user inputs from the keyboard
"""
action = np.zeros(self.action_dim)
# Handle the action if any key is actively being pressed
if self.active_action is not None:
idx, val = self.active_action["idx"], self.active_action["val"]
# Only handle the action if the value is specified
if val is not None:
# If there is no index, the user is controlling a joint with "[" and "]"
if idx is None and len(self.joint_command_idx) != 0:
idx = self.joint_command_idx[self.active_joint_command_idx_idx]
# Also potentially modify the value being deployed in we're controlling a prismatic joint
# Lower prismatic joint values modifying delta positions since 0.1m is very different from 0.1rad!
joint_idx = self.joint_control_idx[self.active_joint_command_idx_idx]
# Import here to avoid circular imports
from omnigibson.utils.constants import JointType
controller = self.joint_idx_to_controller[joint_idx]
if (self.joint_types[joint_idx] == JointType.JOINT_PRISMATIC and
controller.use_delta_commands and controller.motor_type == "position"):
val *= 0.2
# Set the action
if idx is not None:
action[idx] = val
# Possibly set the persistent gripper action
if len(self.binary_grippers) > 0 and self.keypress_mapping[lazy.carb.input.KeyboardInput.T]["val"] is not None:
for i, binary_gripper in enumerate(self.binary_grippers):
# Possibly update the stored value if the toggle gripper key has been pressed and
# it's the active gripper being controlled
if self.toggling_gripper and i == self.active_gripper_idx:
# We toggle the gripper direction or this gripper
self.gripper_direction[binary_gripper] *= -1.0
self.persistent_gripper_action[binary_gripper] = \
self.keypress_mapping[lazy.carb.input.KeyboardInput.T]["val"] * self.gripper_direction[binary_gripper]
# Clear the toggling gripper flag
self.toggling_gripper = False
# Set the persistent action
action[self.controller_info[binary_gripper]["start_idx"]] = self.persistent_gripper_action[binary_gripper]
# Print out the user what is being pressed / controlled
sys.stdout.write("\033[K")
keypress_str = self.current_keypress.__str__().split(".")[-1]
print("Pressed {}. Action: {}".format(keypress_str, action))
sys.stdout.write("\033[F")
# Return action
return action
def print_keyboard_teleop_info(self):
"""
Prints out relevant information for teleop controlling a robot
"""
def print_command(char, info):
char += " " * (10 - len(char))
print("{}\t{}".format(char, info))
print()
print("*" * 30)
print("Controlling the Robot Using the Keyboard")
print("*" * 30)
print()
print("Joint Control")
print_command("1, 2", "decrement / increment the joint to control")
print_command("[, ]", "move the joint backwards, forwards, respectively")
print()
print("Differential Drive Control")
print_command("i, k", "turn left, right")
print_command("l, j", "move forward, backwards")
print()
print("Inverse Kinematics Control")
print_command("3, 4", "toggle between the different arm(s) to control")
print_command(u"\u2190, \u2192", "translate arm eef along x-axis")
print_command(u"\u2191, \u2193", "translate arm eef along y-axis")
print_command("p, ;", "translate arm eef along z-axis")
print_command("n, b", "rotate arm eef about x-axis")
print_command("o, u", "rotate arm eef about y-axis")
print_command("v, c", "rotate arm eef about z-axis")
print()
print("Boolean Gripper Control")
print_command("5, 6", "toggle between the different gripper(s) using binary control")
print_command("t", "toggle gripper (open/close)")
print()
print("Sensor Rendering")
print_command("m", "render the onboard sensor modalities (RGB, Depth, Normals, Instance Segmentation, Occupancy Map)")
print()
if len(self.custom_keymapping) > 0:
print("Custom Keymappings")
for key, info in self.custom_keymapping.items():
key_str = key.__str__().split(".")[-1].lower()
print_command(key_str, info["description"])
print()
print("*" * 30)
print()
def generate_box_edges(center, extents):
"""
Generate the edges of a box given its center and extents.
Parameters:
- center: Tuple of (x, y, z) coordinates for the box's center
- extents: Tuple of (width, height, depth) extents of the box
Returns:
- A list of tuples, each containing two points (each a tuple of x, y, z) representing an edge of the box
"""
x_c, y_c, z_c = center
w, h, d = extents
# Calculate the corner points of the box
corners = [
(x_c - w, y_c - h, z_c - d),
(x_c - w, y_c - h, z_c + d),
(x_c - w, y_c + h, z_c - d),
(x_c - w, y_c + h, z_c + d),
(x_c + w, y_c - h, z_c - d),
(x_c + w, y_c - h, z_c + d),
(x_c + w, y_c + h, z_c - d),
(x_c + w, y_c + h, z_c + d)
]
# Define the edges by connecting the corners
edges = [
(corners[0], corners[1]), (corners[0], corners[2]), (corners[1], corners[3]),
(corners[2], corners[3]), (corners[4], corners[5]), (corners[4], corners[6]),
(corners[5], corners[7]), (corners[6], corners[7]), (corners[0], corners[4]),
(corners[1], corners[5]), (corners[2], corners[6]), (corners[3], corners[7])
]
return edges
def draw_line(start, end, color=(1., 0., 0., 1.), size=1.):
"""
Draws a single line between two points.
"""
from omni.isaac.debug_draw import _debug_draw
draw = _debug_draw.acquire_debug_draw_interface()
draw.draw_lines([start], [end], [color], [size])
def draw_box(center, extents, color=(1., 0., 0., 1.), size=1.):
"""
Draws a box defined by its center and extents.
"""
edges = generate_box_edges(center, extents)
for start, end in edges:
draw_line(start, end, color, size)
def draw_aabb(obj):
"""
Draws the axis-aligned bounding box of a given object.
"""
ctr = obj.aabb_center
ext = obj.aabb_extent / 2.0
draw_box(ctr, ext)
def clear_debug_drawing():
"""
Clears all debug drawings.
"""
from omni.isaac.debug_draw import _debug_draw
draw = _debug_draw.acquire_debug_draw_interface()
draw.clear_lines()
| 44,222 | Python | 43.896447 | 144 | 0.600018 |
StanfordVL/OmniGibson/omnigibson/utils/transform_utils.py | """
Utility functions of matrix and vector transformations.
NOTE: convention for quaternions is (x, y, z, w)
"""
import math
import numpy as np
from scipy.spatial.transform import Rotation as R
PI = np.pi
EPS = np.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
"sxyz": (0, 0, 0, 0),
"sxyx": (0, 0, 1, 0),
"sxzy": (0, 1, 0, 0),
"sxzx": (0, 1, 1, 0),
"syzx": (1, 0, 0, 0),
"syzy": (1, 0, 1, 0),
"syxz": (1, 1, 0, 0),
"syxy": (1, 1, 1, 0),
"szxy": (2, 0, 0, 0),
"szxz": (2, 0, 1, 0),
"szyx": (2, 1, 0, 0),
"szyz": (2, 1, 1, 0),
"rzyx": (0, 0, 0, 1),
"rxyx": (0, 0, 1, 1),
"ryzx": (0, 1, 0, 1),
"rxzx": (0, 1, 1, 1),
"rxzy": (1, 0, 0, 1),
"ryzy": (1, 0, 1, 1),
"rzxy": (1, 1, 0, 1),
"ryxy": (1, 1, 1, 1),
"ryxz": (2, 0, 0, 1),
"rzxz": (2, 0, 1, 1),
"rxyz": (2, 1, 0, 1),
"rzyz": (2, 1, 1, 1),
}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
def ewma_vectorized(data, alpha, offset=None, dtype=None, order="C", out=None):
"""
Calculates the exponential moving average over a vector.
Will fail for large inputs.
Args:
data (Iterable): Input data
alpha (float): scalar in range (0,1)
The alpha parameter for the moving average.
offset (None or float): If specified, the offset for the moving average. None defaults to data[0].
dtype (None or type): Data type used for calculations. If None, defaults to float64 unless
data.dtype is float32, then it will use float32.
order (None or str): Order to use when flattening the data. Valid options are {'C', 'F', 'A'}.
None defaults to 'C'.
out (None or np.array): If specified, the location into which the result is stored. If provided, it must have
the same shape as the input. If not provided or `None`,
a freshly-allocated array is returned.
Returns:
np.array: Exponential moving average from @data
"""
data = np.array(data, copy=False)
if dtype is None:
if data.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
else:
dtype = np.dtype(dtype)
if data.ndim > 1:
# flatten input
data = data.reshape(-1, order)
if out is None:
out = np.empty_like(data, dtype=dtype)
else:
assert out.shape == data.shape
assert out.dtype == dtype
if data.size < 1:
# empty input, return empty array
return out
if offset is None:
offset = data[0]
alpha = np.array(alpha, copy=False).astype(dtype, copy=False)
# scaling_factors -> 0 as len(data) gets large
# this leads to divide-by-zeros below
scaling_factors = np.power(1.0 - alpha, np.arange(data.size + 1, dtype=dtype), dtype=dtype)
# create cumulative sum array
np.multiply(data, (alpha * scaling_factors[-2]) / scaling_factors[:-1], dtype=dtype, out=out)
np.cumsum(out, dtype=dtype, out=out)
# cumsums / scaling
out /= scaling_factors[-2::-1]
if offset != 0:
offset = np.array(offset, copy=False).astype(dtype, copy=False)
# add offsets
out += offset * scaling_factors[1:]
return out
def convert_quat(q, to="xyzw"):
"""
Converts quaternion from one convention to another.
The convention to convert TO is specified as an optional argument.
If to == 'xyzw', then the input is in 'wxyz' format, and vice-versa.
Args:
q (np.array): a 4-dim array corresponding to a quaternion
to (str): either 'xyzw' or 'wxyz', determining which convention to convert to.
"""
if to == "xyzw":
return q[[1, 2, 3, 0]]
if to == "wxyz":
return q[[3, 0, 1, 2]]
raise Exception("convert_quat: choose a valid `to` argument (xyzw or wxyz)")
def quat_multiply(quaternion1, quaternion0):
"""
Return multiplication of two quaternions (q1 * q0).
E.g.:
>>> q = quat_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> np.allclose(q, [-44, -14, 48, 28])
True
Args:
quaternion1 (np.array): (x,y,z,w) quaternion
quaternion0 (np.array): (x,y,z,w) quaternion
Returns:
np.array: (x,y,z,w) multiplied quaternion
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return np.array(
(
x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
-x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0,
-x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
),
dtype=np.float32,
)
def quat_conjugate(quaternion):
"""
Return conjugate of quaternion.
E.g.:
>>> q0 = random_quaternion()
>>> q1 = quat_conjugate(q0)
>>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
True
Args:
quaternion (np.array): (x,y,z,w) quaternion
Returns:
np.array: (x,y,z,w) quaternion conjugate
"""
return np.array(
(-quaternion[0], -quaternion[1], -quaternion[2], quaternion[3]),
dtype=np.float32,
)
def quat_inverse(quaternion):
"""
Return inverse of quaternion.
E.g.:
>>> q0 = random_quaternion()
>>> q1 = quat_inverse(q0)
>>> np.allclose(quat_multiply(q0, q1), [0, 0, 0, 1])
True
Args:
quaternion (np.array): (x,y,z,w) quaternion
Returns:
np.array: (x,y,z,w) quaternion inverse
"""
return quat_conjugate(quaternion) / np.dot(quaternion, quaternion)
def quat_distance(quaternion1, quaternion0):
"""
Returns distance between two quaternions, such that distance * quaternion0 = quaternion1
Args:
quaternion1 (np.array): (x,y,z,w) quaternion
quaternion0 (np.array): (x,y,z,w) quaternion
Returns:
np.array: (x,y,z,w) quaternion distance
"""
return quat_multiply(quaternion1, quat_inverse(quaternion0))
def quat_slerp(quat0, quat1, fraction, shortestpath=True):
"""
Return spherical linear interpolation between two quaternions.
E.g.:
>>> q0 = random_quat()
>>> q1 = random_quat()
>>> q = quat_slerp(q0, q1, 0.0)
>>> np.allclose(q, q0)
True
>>> q = quat_slerp(q0, q1, 1.0)
>>> np.allclose(q, q1)
True
>>> q = quat_slerp(q0, q1, 0.5)
>>> angle = math.acos(np.dot(q0, q))
>>> np.allclose(2.0, math.acos(np.dot(q0, q1)) / angle) or \
np.allclose(2.0, math.acos(-np.dot(q0, q1)) / angle)
True
Args:
quat0 (np.array): (x,y,z,w) quaternion startpoint
quat1 (np.array): (x,y,z,w) quaternion endpoint
fraction (float): fraction of interpolation to calculate
shortestpath (bool): If True, will calculate the shortest path
Returns:
np.array: (x,y,z,w) quaternion distance
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = np.dot(q0, q1)
if abs(abs(d) - 1.0) < EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(np.clip(d, -1, 1))
if abs(angle) < EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quat(rand=None):
"""
Return uniform random unit quaternion.
E.g.:
>>> q = random_quat()
>>> np.allclose(1.0, vector_norm(q))
True
>>> q = random_quat(np.random.random(3))
>>> q.shape
(4,)
Args:
rand (3-array or None): If specified, must be three independent random variables that are uniformly distributed
between 0 and 1.
Returns:
np.array: (x,y,z,w) random quaternion
"""
if rand is None:
rand = np.random.rand(3)
else:
assert len(rand) == 3
r1 = np.sqrt(1.0 - rand[0])
r2 = np.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return np.array(
(np.sin(t1) * r1, np.cos(t1) * r1, np.sin(t2) * r2, np.cos(t2) * r2),
dtype=np.float32,
)
def random_axis_angle(angle_limit=None, random_state=None):
"""
Samples an axis-angle rotation by first sampling a random axis
and then sampling an angle. If @angle_limit is provided, the size
of the rotation angle is constrained.
If @random_state is provided (instance of np.random.RandomState), it
will be used to generate random numbers.
Args:
angle_limit (None or float): If set, determines magnitude limit of angles to generate
random_state (None or RandomState): RNG to use if specified
Raises:
AssertionError: [Invalid RNG]
"""
if angle_limit is None:
angle_limit = 2.0 * np.pi
if random_state is not None:
assert isinstance(random_state, np.random.RandomState)
npr = random_state
else:
npr = np.random
# sample random axis using a normalized sample from spherical Gaussian.
# see (http://extremelearning.com.au/how-to-generate-uniformly-random-points-on-n-spheres-and-n-balls/)
# for why it works.
random_axis = npr.randn(3)
random_axis /= np.linalg.norm(random_axis)
random_angle = npr.uniform(low=0.0, high=angle_limit)
return random_axis, random_angle
def vec(values):
"""
Converts value tuple into a numpy vector.
Args:
values (n-array): a tuple of numbers
Returns:
np.array: vector of given values
"""
return np.array(values, dtype=np.float32)
def mat4(array):
"""
Converts an array to 4x4 matrix.
Args:
array (n-array): the array in form of vec, list, or tuple
Returns:
np.array: a 4x4 numpy matrix
"""
return np.array(array, dtype=np.float32).reshape((4, 4))
def mat2pose(hmat):
"""
Converts a homogeneous 4x4 matrix into pose.
Args:
hmat (np.array): a 4x4 homogeneous matrix
Returns:
2-tuple:
- (np.array) (x,y,z) position array in cartesian coordinates
- (np.array) (x,y,z,w) orientation array in quaternion form
"""
pos = hmat[:3, 3]
orn = mat2quat(hmat[:3, :3])
return pos, orn
def mat2quat(rmat):
"""
Converts given rotation matrix to quaternion.
Args:
rmat (np.array): (..., 3, 3) rotation matrix
Returns:
np.array: (..., 4) (x,y,z,w) float quaternion angles
"""
return R.from_matrix(rmat).as_quat()
def vec2quat(vec, up=(0, 0, 1.0)):
"""
Converts given 3d-direction vector @vec to quaternion orientation with respect to another direction vector @up
Args:
vec (3-array): (x,y,z) direction vector (possible non-normalized)
up (3-array): (x,y,z) direction vector representing the canonical up direction (possible non-normalized)
"""
# See https://stackoverflow.com/questions/15873996/converting-a-direction-vector-to-a-quaternion-rotation
# Take cross product of @up and @vec to get @s_n, and then cross @vec and @s_n to get @u_n
# Then compose 3x3 rotation matrix and convert into quaternion
vec_n = vec / np.linalg.norm(vec) # x
up_n = up / np.linalg.norm(up)
s_n = np.cross(up_n, vec_n) # y
u_n = np.cross(vec_n, s_n) # z
return mat2quat(np.array([vec_n, s_n, u_n]).T)
def euler2mat(euler):
"""
Converts euler angles into rotation matrix form
Args:
euler (np.array): (r,p,y) angles
Returns:
np.array: 3x3 rotation matrix
Raises:
AssertionError: [Invalid input shape]
"""
euler = np.asarray(euler, dtype=np.float64)
assert euler.shape[-1] == 3, "Invalid shaped euler {}".format(euler)
return R.from_euler("xyz", euler).as_matrix()
def mat2euler(rmat):
"""
Converts given rotation matrix to euler angles in radian.
Args:
rmat (np.array): 3x3 rotation matrix
Returns:
np.array: (r,p,y) converted euler angles in radian vec3 float
"""
M = np.array(rmat, dtype=np.float32, copy=False)[:3, :3]
return R.from_matrix(M).as_euler("xyz")
def pose2mat(pose):
"""
Converts pose to homogeneous matrix.
Args:
pose (2-tuple): a (pos, orn) tuple where pos is vec3 float cartesian, and
orn is vec4 float quaternion.
Returns:
np.array: 4x4 homogeneous matrix
"""
homo_pose_mat = np.zeros((4, 4), dtype=np.float32)
homo_pose_mat[:3, :3] = quat2mat(pose[1])
homo_pose_mat[:3, 3] = np.array(pose[0], dtype=np.float32)
homo_pose_mat[3, 3] = 1.0
return homo_pose_mat
def quat2mat(quaternion):
"""
Converts given quaternion to matrix.
Args:
quaternion (np.array): (..., 4) (x,y,z,w) float quaternion angles
Returns:
np.array: (..., 3, 3) rotation matrix
"""
return R.from_quat(quaternion).as_matrix()
def quat2axisangle(quat):
"""
Converts quaternion to axis-angle format.
Returns a unit vector direction scaled by its angle in radians.
Args:
quat (np.array): (x,y,z,w) vec4 float angles
Returns:
np.array: (ax,ay,az) axis-angle exponential coordinates
"""
return R.from_quat(quat).as_rotvec()
def axisangle2quat(vec):
"""
Converts scaled axis-angle to quat.
Args:
vec (np.array): (ax,ay,az) axis-angle exponential coordinates
Returns:
np.array: (x,y,z,w) vec4 float angles
"""
return R.from_rotvec(vec).as_quat()
def euler2quat(euler):
"""
Converts euler angles into quaternion form
Args:
euler (np.array): (r,p,y) angles
Returns:
np.array: (x,y,z,w) float quaternion angles
Raises:
AssertionError: [Invalid input shape]
"""
return R.from_euler("xyz", euler).as_quat()
def quat2euler(quat):
"""
Converts euler angles into quaternion form
Args:
quat (np.array): (x,y,z,w) float quaternion angles
Returns:
np.array: (r,p,y) angles
Raises:
AssertionError: [Invalid input shape]
"""
return R.from_quat(quat).as_euler("xyz")
def pose_in_A_to_pose_in_B(pose_A, pose_A_in_B):
"""
Converts a homogenous matrix corresponding to a point C in frame A
to a homogenous matrix corresponding to the same point C in frame B.
Args:
pose_A (np.array): 4x4 matrix corresponding to the pose of C in frame A
pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B
Returns:
np.array: 4x4 matrix corresponding to the pose of C in frame B
"""
# pose of A in B takes a point in A and transforms it to a point in C.
# pose of C in B = pose of A in B * pose of C in A
# take a point in C, transform it to A, then to B
# T_B^C = T_A^C * T_B^A
return pose_A_in_B.dot(pose_A)
def pose_inv(pose_mat):
"""
Computes the inverse of a homogeneous matrix corresponding to the pose of some
frame B in frame A. The inverse is the pose of frame A in frame B.
Args:
pose_mat (np.array): 4x4 matrix for the pose to inverse
Returns:
np.array: 4x4 matrix for the inverse pose
"""
# Note, the inverse of a pose matrix is the following
# [R t; 0 1]^-1 = [R.T -R.T*t; 0 1]
# Intuitively, this makes sense.
# The original pose matrix translates by t, then rotates by R.
# We just invert the rotation by applying R-1 = R.T, and also translate back.
# Since we apply translation first before rotation, we need to translate by
# -t in the original frame, which is -R-1*t in the new frame, and then rotate back by
# R-1 to align the axis again.
pose_inv = np.zeros((4, 4))
pose_inv[:3, :3] = pose_mat[:3, :3].T
pose_inv[:3, 3] = -pose_inv[:3, :3].dot(pose_mat[:3, 3])
pose_inv[3, 3] = 1.0
return pose_inv
def pose_transform(pos1, quat1, pos0, quat0):
"""
Conducts forward transform from pose (pos0, quat0) to pose (pos1, quat1):
pose1 @ pose0, NOT pose0 @ pose1
Args:
pos1: (x,y,z) position to transform
quat1: (x,y,z,w) orientation to transform
pos0: (x,y,z) initial position
quat0: (x,y,z,w) initial orientation
Returns:
2-tuple:
- (np.array) (x,y,z) position array in cartesian coordinates
- (np.array) (x,y,z,w) orientation array in quaternion form
"""
# Get poses
mat0 = pose2mat((pos0, quat0))
mat1 = pose2mat((pos1, quat1))
# Multiply and convert back to pos, quat
return mat2pose(mat1 @ mat0)
def invert_pose_transform(pos, quat):
"""
Inverts a pose transform
Args:
pos: (x,y,z) position to transform
quat: (x,y,z,w) orientation to transform
Returns:
2-tuple:
- (np.array) (x,y,z) position array in cartesian coordinates
- (np.array) (x,y,z,w) orientation array in quaternion form
"""
# Get pose
mat = pose2mat((pos, quat))
# Invert pose and convert back to pos, quat
return mat2pose(pose_inv(mat))
def relative_pose_transform(pos1, quat1, pos0, quat0):
"""
Computes relative forward transform from pose (pos0, quat0) to pose (pos1, quat1), i.e.: solves:
pose1 = pose0 @ transform
Args:
pos1: (x,y,z) position to transform
quat1: (x,y,z,w) orientation to transform
pos0: (x,y,z) initial position
quat0: (x,y,z,w) initial orientation
Returns:
2-tuple:
- (np.array) (x,y,z) position array in cartesian coordinates
- (np.array) (x,y,z,w) orientation array in quaternion form
"""
# Get poses
mat0 = pose2mat((pos0, quat0))
mat1 = pose2mat((pos1, quat1))
# Invert pose0 and calculate transform
return mat2pose(pose_inv(mat0) @ mat1)
def _skew_symmetric_translation(pos_A_in_B):
"""
Helper function to get a skew symmetric translation matrix for converting quantities
between frames.
Args:
pos_A_in_B (np.array): (x,y,z) position of A in frame B
Returns:
np.array: 3x3 skew symmetric translation matrix
"""
return np.array(
[
0.0,
-pos_A_in_B[2],
pos_A_in_B[1],
pos_A_in_B[2],
0.0,
-pos_A_in_B[0],
-pos_A_in_B[1],
pos_A_in_B[0],
0.0,
]
).reshape((3, 3))
def vel_in_A_to_vel_in_B(vel_A, ang_vel_A, pose_A_in_B):
"""
Converts linear and angular velocity of a point in frame A to the equivalent in frame B.
Args:
vel_A (np.array): (vx,vy,vz) linear velocity in A
ang_vel_A (np.array): (wx,wy,wz) angular velocity in A
pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B
Returns:
2-tuple:
- (np.array) (vx,vy,vz) linear velocities in frame B
- (np.array) (wx,wy,wz) angular velocities in frame B
"""
pos_A_in_B = pose_A_in_B[:3, 3]
rot_A_in_B = pose_A_in_B[:3, :3]
skew_symm = _skew_symmetric_translation(pos_A_in_B)
vel_B = rot_A_in_B.dot(vel_A) + skew_symm.dot(rot_A_in_B.dot(ang_vel_A))
ang_vel_B = rot_A_in_B.dot(ang_vel_A)
return vel_B, ang_vel_B
def force_in_A_to_force_in_B(force_A, torque_A, pose_A_in_B):
"""
Converts linear and rotational force at a point in frame A to the equivalent in frame B.
Args:
force_A (np.array): (fx,fy,fz) linear force in A
torque_A (np.array): (tx,ty,tz) rotational force (moment) in A
pose_A_in_B (np.array): 4x4 matrix corresponding to the pose of A in frame B
Returns:
2-tuple:
- (np.array) (fx,fy,fz) linear forces in frame B
- (np.array) (tx,ty,tz) moments in frame B
"""
pos_A_in_B = pose_A_in_B[:3, 3]
rot_A_in_B = pose_A_in_B[:3, :3]
skew_symm = _skew_symmetric_translation(pos_A_in_B)
force_B = rot_A_in_B.T.dot(force_A)
torque_B = -rot_A_in_B.T.dot(skew_symm.dot(force_A)) + rot_A_in_B.T.dot(torque_A)
return force_B, torque_B
def rotation_matrix(angle, direction, point=None):
"""
Returns matrix to rotate about axis defined by point and direction.
E.g.:
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float32)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
Args:
angle (float): Magnitude of rotation
direction (np.array): (ax,ay,az) axis about which to rotate
point (None or np.array): If specified, is the (x,y,z) point about which the rotation will occur
Returns:
np.array: 4x4 homogeneous matrix that includes the desired rotation
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = np.array(((cosa, 0.0, 0.0), (0.0, cosa, 0.0), (0.0, 0.0, cosa)), dtype=np.float32)
R += np.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += np.array(
(
(0.0, -direction[2], direction[1]),
(direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0),
),
dtype=np.float32,
)
M = np.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = np.array(point[:3], dtype=np.float32, copy=False)
M[:3, 3] = point - np.dot(R, point)
return M
def clip_translation(dpos, limit):
"""
Limits a translation (delta position) to a specified limit
Scales down the norm of the dpos to 'limit' if norm(dpos) > limit, else returns immediately
Args:
dpos (n-array): n-dim Translation being clipped (e,g.: (x, y, z)) -- numpy array
limit (float): Value to limit translation by -- magnitude (scalar, in same units as input)
Returns:
2-tuple:
- (np.array) Clipped translation (same dimension as inputs)
- (bool) whether the value was clipped or not
"""
input_norm = np.linalg.norm(dpos)
return (dpos * limit / input_norm, True) if input_norm > limit else (dpos, False)
def clip_rotation(quat, limit):
"""
Limits a (delta) rotation to a specified limit
Converts rotation to axis-angle, clips, then re-converts back into quaternion
Args:
quat (np.array): (x,y,z,w) rotation being clipped
limit (float): Value to limit rotation by -- magnitude (scalar, in radians)
Returns:
2-tuple:
- (np.array) Clipped rotation quaternion (x, y, z, w)
- (bool) whether the value was clipped or not
"""
clipped = False
# First, normalize the quaternion
quat = quat / np.linalg.norm(quat)
den = np.sqrt(max(1 - quat[3] * quat[3], 0))
if den == 0:
# This is a zero degree rotation, immediately return
return quat, clipped
else:
# This is all other cases
x = quat[0] / den
y = quat[1] / den
z = quat[2] / den
a = 2 * math.acos(quat[3])
# Clip rotation if necessary and return clipped quat
if abs(a) > limit:
a = limit * np.sign(a) / 2
sa = math.sin(a)
ca = math.cos(a)
quat = np.array([x * sa, y * sa, z * sa, ca])
clipped = True
return quat, clipped
def make_pose(translation, rotation):
"""
Makes a homogeneous pose matrix from a translation vector and a rotation matrix.
Args:
translation (np.array): (x,y,z) translation value
rotation (np.array): a 3x3 matrix representing rotation
Returns:
pose (np.array): a 4x4 homogeneous matrix
"""
pose = np.zeros((4, 4))
pose[:3, :3] = rotation
pose[:3, 3] = translation
pose[3, 3] = 1.0
return pose
def unit_vector(data, axis=None, out=None):
"""
Returns ndarray normalized by length, i.e. eucledian norm, along axis.
E.g.:
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float32)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
Args:
data (np.array): data to normalize
axis (None or int): If specified, determines specific axis along data to normalize
out (None or np.array): If specified, will store computation in this variable
Returns:
None or np.array: If @out is not specified, will return normalized vector. Otherwise, stores the output in @out
"""
if out is None:
data = np.array(data, dtype=np.float32, copy=True)
if data.ndim == 1:
data /= math.sqrt(np.dot(data, data))
return data
else:
if out is not data:
out[:] = np.array(data, copy=False)
data = out
length = np.atleast_1d(np.sum(data * data, axis))
np.sqrt(length, length)
if axis is not None:
length = np.expand_dims(length, axis)
data /= length
if out is None:
return data
def get_orientation_error(target_orn, current_orn):
"""
Returns the difference between two quaternion orientations as a 3 DOF numpy array.
For use in an impedance controller / task-space PD controller.
Args:
target_orn (np.array): (x, y, z, w) desired quaternion orientation
current_orn (np.array): (x, y, z, w) current quaternion orientation
Returns:
orn_error (np.array): (ax,ay,az) current orientation error, corresponds to
(target_orn - current_orn)
"""
current_orn = np.array([current_orn[3], current_orn[0], current_orn[1], current_orn[2]])
target_orn = np.array([target_orn[3], target_orn[0], target_orn[1], target_orn[2]])
pinv = np.zeros((3, 4))
pinv[0, :] = [-current_orn[1], current_orn[0], -current_orn[3], current_orn[2]]
pinv[1, :] = [-current_orn[2], current_orn[3], current_orn[0], -current_orn[1]]
pinv[2, :] = [-current_orn[3], -current_orn[2], current_orn[1], current_orn[0]]
orn_error = 2.0 * pinv.dot(np.array(target_orn))
return orn_error
def get_orientation_diff_in_radian(orn0, orn1):
"""
Returns the difference between two quaternion orientations in radian
Args:
orn0 (np.array): (x, y, z, w)
orn1 (np.array): (x, y, z, w)
Returns:
orn_diff (float): orientation difference in radian
"""
vec0 = quat2axisangle(orn0)
vec0 /= np.linalg.norm(vec0)
vec1 = quat2axisangle(orn1)
vec1 /= np.linalg.norm(vec1)
return np.arccos(np.dot(vec0, vec1))
def get_pose_error(target_pose, current_pose):
"""
Computes the error corresponding to target pose - current pose as a 6-dim vector.
The first 3 components correspond to translational error while the last 3 components
correspond to the rotational error.
Args:
target_pose (np.array): a 4x4 homogenous matrix for the target pose
current_pose (np.array): a 4x4 homogenous matrix for the current pose
Returns:
np.array: 6-dim pose error.
"""
error = np.zeros(6)
# compute translational error
target_pos = target_pose[:3, 3]
current_pos = current_pose[:3, 3]
pos_err = target_pos - current_pos
# compute rotational error
r1 = current_pose[:3, 0]
r2 = current_pose[:3, 1]
r3 = current_pose[:3, 2]
r1d = target_pose[:3, 0]
r2d = target_pose[:3, 1]
r3d = target_pose[:3, 2]
rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d))
error[:3] = pos_err
error[3:] = rot_err
return error
def matrix_inverse(matrix):
"""
Helper function to have an efficient matrix inversion function.
Args:
matrix (np.array): 2d-array representing a matrix
Returns:
np.array: 2d-array representing the matrix inverse
"""
return np.linalg.inv(matrix)
def vecs2axisangle(vec0, vec1):
"""
Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into an axis-angle representation of the angle
Args:
vec0 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized
vec1 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized
"""
# Normalize vectors
vec0 = normalize(vec0, axis=-1)
vec1 = normalize(vec1, axis=-1)
# Get cross product for direction of angle, and multiply by arcos of the dot product which is the angle
return np.cross(vec0, vec1) * np.arccos((vec0 * vec1).sum(-1, keepdims=True))
def vecs2quat(vec0, vec1, normalized=False):
"""
Converts the angle from unnormalized 3D vectors @vec0 to @vec1 into a quaternion representation of the angle
Args:
vec0 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized
vec1 (np.array): (..., 3) (x,y,z) 3D vector, possibly unnormalized
normalized (bool): If True, @vec0 and @vec1 are assumed to already be normalized and we will skip the
normalization step (more efficient)
"""
# Normalize vectors if requested
if not normalized:
vec0 = normalize(vec0, axis=-1)
vec1 = normalize(vec1, axis=-1)
# Half-way Quaternion Solution -- see https://stackoverflow.com/a/11741520
cos_theta = np.sum(vec0 * vec1, axis=-1, keepdims=True)
quat_unnormalized = np.where(cos_theta == -1, np.array([1.0, 0, 0, 0]), np.concatenate([np.cross(vec0, vec1), 1 + cos_theta], axis=-1))
return quat_unnormalized / np.linalg.norm(quat_unnormalized, axis=-1, keepdims=True)
def l2_distance(v1, v2):
"""Returns the L2 distance between vector v1 and v2."""
return np.linalg.norm(np.array(v1) - np.array(v2))
def frustum(left, right, bottom, top, znear, zfar):
"""Create view frustum matrix."""
assert right != left
assert bottom != top
assert znear != zfar
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = +2.0 * znear / (right - left)
M[2, 0] = (right + left) / (right - left)
M[1, 1] = +2.0 * znear / (top - bottom)
# TODO: Put this back to 3,1
# M[3, 1] = (top + bottom) / (top - bottom)
M[2, 1] = (top + bottom) / (top - bottom)
M[2, 2] = -(zfar + znear) / (zfar - znear)
M[3, 2] = -2.0 * znear * zfar / (zfar - znear)
M[2, 3] = -1.0
return M
def ortho(left, right, bottom, top, znear, zfar):
"""Create orthonormal projection matrix."""
assert right != left
assert bottom != top
assert znear != zfar
M = np.zeros((4, 4), dtype=np.float32)
M[0, 0] = 2.0 / (right - left)
M[1, 1] = 2.0 / (top - bottom)
M[2, 2] = -2.0 / (zfar - znear)
M[3, 0] = -(right + left) / (right - left)
M[3, 1] = -(top + bottom) / (top - bottom)
M[3, 2] = -(zfar + znear) / (zfar - znear)
M[3, 3] = 1.0
return M
def perspective(fovy, aspect, znear, zfar):
"""Create perspective projection matrix."""
# fovy is in degree
assert znear != zfar
h = np.tan(fovy / 360.0 * np.pi) * znear
w = h * aspect
return frustum(-w, w, -h, h, znear, zfar)
def anorm(x, axis=None, keepdims=False):
"""Compute L2 norms alogn specified axes."""
return np.linalg.norm(x, axis=axis, keepdims=keepdims)
def normalize(v, axis=None, eps=1e-10):
"""L2 Normalize along specified axes."""
norm = anorm(v, axis=axis, keepdims=True)
return v / np.where(norm < eps, eps, norm)
def cartesian_to_polar(x, y):
"""Convert cartesian coordinate to polar coordinate"""
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return rho, phi
def deg2rad(deg):
return deg * np.pi / 180.
def rad2deg(rad):
return rad * 180. / np.pi
def check_quat_right_angle(quat, atol=5e-2):
"""
Check by making sure the quaternion is some permutation of +/- (1, 0, 0, 0),
+/- (0.707, 0.707, 0, 0), or +/- (0.5, 0.5, 0.5, 0.5)
Because orientations are all normalized (same L2-norm), every orientation should have a unique L1-norm
So we check the L1-norm of the absolute value of the orientation as a proxy for verifying these values
Args:
quat (4-array): (x,y,z,w) quaternion orientation to check
atol (float): Absolute tolerance permitted
Returns:
bool: Whether the quaternion is a right angle or not
"""
return np.any(np.isclose(np.abs(quat).sum(), np.array([1.0, 1.414, 2.0]), atol=atol))
def z_angle_from_quat(quat):
"""Get the angle around the Z axis produced by the quaternion."""
rotated_X_axis = R.from_quat(quat).apply([1, 0, 0])
return np.arctan2(rotated_X_axis[1], rotated_X_axis[0])
def z_rotation_from_quat(quat):
"""Get the quaternion for the rotation around the Z axis produced by the quaternion."""
return R.from_euler("z", z_angle_from_quat(quat)).as_quat()
| 34,011 | Python | 28.524306 | 139 | 0.591191 |
StanfordVL/OmniGibson/omnigibson/utils/vision_utils.py | import colorsys
import numpy as np
from PIL import Image, ImageDraw
try:
import accimage
except ImportError:
accimage = None
class RandomScale:
"""Rescale the input PIL.Image to the given size.
Args:
minsize (sequence or int): Desired min output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
maxsize (sequence or int): Desired max output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR``
"""
def __init__(self, minsize, maxsize, interpolation=Image.BILINEAR):
assert isinstance(minsize, int)
assert isinstance(maxsize, int)
self.minsize = minsize
self.maxsize = maxsize
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be scaled.
Returns:
PIL.Image: Rescaled image.
"""
size = np.random.randint(self.minsize, self.maxsize + 1)
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), self.interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), self.interpolation)
else:
raise NotImplementedError()
class Remapper:
"""
Remaps values in an image from old_mapping to new_mapping using an efficient key_array.
See more details in the remap method.
"""
def __init__(self):
self.key_array = np.array([], dtype=np.uint32) # Initialize the key_array as empty
self.known_ids = set()
def clear(self):
"""Resets the key_array to empty."""
self.key_array = np.array([], dtype=np.uint32)
self.known_ids = set()
def remap(self, old_mapping, new_mapping, image):
"""
Remaps values in the given image from old_mapping to new_mapping using an efficient key_array.
If the image contains values that are not in old_mapping, they are remapped to the value in new_mapping
that corresponds to 'unlabelled'.
Args:
old_mapping (dict): The old mapping dictionary that maps a set of image values to labels
e.g. {1: 'desk', 2: 'chair'}.
new_mapping (dict): The new mapping dictionary that maps another set of image values to labels,
e.g. {5: 'desk', 7: 'chair', 100: 'unlabelled'}.
image (np.ndarray): The 2D image to remap, e.g. [[1, 3], [1, 2]].
Returns:
np.ndarray: The remapped image, e.g. [[5,100],[5,7]].
dict: The remapped labels dictionary, e.g. {5: 'desk', 7: 'chair', 100: 'unlabelled'}.
"""
# Make sure that max uint32 doesn't match any value in the new mapping
assert np.all(np.array(list(new_mapping.keys())) != np.iinfo(np.uint32).max), "New mapping contains default unmapped value!"
image_max_key = np.max(image)
key_array_max_key = len(self.key_array) - 1
if image_max_key > key_array_max_key:
prev_key_array = self.key_array.copy()
# We build a new key array and use max uint32 as the default value.
self.key_array = np.full(image_max_key + 1, np.iinfo(np.uint32).max, dtype=np.uint32)
# Copy the previous key array into the new key array
self.key_array[:len(prev_key_array)] = prev_key_array
new_keys = old_mapping.keys() - self.known_ids
if new_keys:
self.known_ids.update(new_keys)
# Populate key_array with new keys
for key in new_keys:
label = old_mapping[key]
new_key = next((k for k, v in new_mapping.items() if v == label), None)
assert new_key is not None, f"Could not find a new key for label {label} in new_mapping!"
self.key_array[key] = new_key
# For all the values that exist in the image but not in old_mapping.keys(), we map them to whichever key in
# new_mapping that equals to 'unlabelled'. This is needed because some values in the image don't necessarily
# show up in the old_mapping, i.e. particle systems.
for key in np.unique(image):
if key not in old_mapping.keys():
new_key = next((k for k, v in new_mapping.items() if v == 'unlabelled'), None)
assert new_key is not None, f"Could not find a new key for label 'unlabelled' in new_mapping!"
self.key_array[key] = new_key
# Apply remapping
remapped_img = self.key_array[image]
# Make sure all values are correctly remapped and not equal to the default value
assert np.all(remapped_img != np.iinfo(np.uint32).max), "Not all keys in the image are in the key array!"
remapped_labels = {}
for key in np.unique(remapped_img):
remapped_labels[key] = new_mapping[key]
return remapped_img, remapped_labels
def remap_bbox(self, semantic_id):
"""
Remaps a semantic id to a new id using the key_array.
Args:
semantic_id (int): The semantic id to remap.
Returns:
int: The remapped id.
"""
assert semantic_id < len(self.key_array), f"Semantic id {semantic_id} is out of range!"
return self.key_array[semantic_id]
def randomize_colors(N, bright=True):
"""
Modified from https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/visualize.py#L59
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
Args:
N (int): Number of colors to generate
Returns:
bright (bool): whether to increase the brightness of the colors or not
"""
brightness = 1.0 if bright else 0.5
hsv = [(1.0 * i / N, 1, brightness) for i in range(N)]
colors = np.array(list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)))
rstate = np.random.RandomState(seed=20)
np.random.shuffle(colors)
colors[0] = [0, 0, 0] # First color is black
return colors
def segmentation_to_rgb(seg_im, N, colors=None):
"""
Helper function to visualize segmentations as RGB frames.
NOTE: assumes that geom IDs go up to N at most - if not,
multiple geoms might be assigned to the same color.
Args:
seg_im ((W, H)-array): Segmentation image
N (int): Maximum segmentation ID from @seg_im
colors (None or list of 3-array): If specified, colors to apply
to different segmentation IDs. Otherwise, will be generated randomly
"""
# ensure all values lie within [0, N]
seg_im = np.mod(seg_im, N)
if colors is None:
use_colors = randomize_colors(N=N, bright=True)
else:
use_colors = colors
if N <= 256:
return (255.0 * use_colors[seg_im]).astype(np.uint8)
else:
return (use_colors[seg_im]).astype(np.float)
def colorize_bboxes_3d(bbox_3d_data, rgb_image, camera_params):
"""
Project 3D bounding box data onto 2D and colorize the bounding boxes for visualization.
Reference: https://forums.developer.nvidia.com/t/mathematical-definition-of-3d-bounding-boxes-annotator-nvidia-omniverse-isaac-sim/223416
Args:
bbox_3d_data (np.ndarray): 3D bounding box data
rgb_image (np.ndarray): RGB image
camera_params (dict): Camera parameters
Returns:
np.ndarray: RGB image with 3D bounding boxes drawn
"""
def world_to_image_pinhole(world_points, camera_params):
# Project corners to image space (assumes pinhole camera model)
proj_mat = camera_params["cameraProjection"].reshape(4, 4)
view_mat = camera_params["cameraViewTransform"].reshape(4, 4)
view_proj_mat = np.dot(view_mat, proj_mat)
world_points_homo = np.pad(world_points, ((0, 0), (0, 1)), constant_values=1.0)
tf_points = np.dot(world_points_homo, view_proj_mat)
tf_points = tf_points / (tf_points[..., -1:])
return 0.5 * (tf_points[..., :2] + 1)
def draw_lines_and_points_for_boxes(img, all_image_points):
width, height = img.size
draw = ImageDraw.Draw(img)
# Define connections between the corners of the bounding box
connections = [
(0, 1), (1, 3), (3, 2), (2, 0), # Front face
(4, 5), (5, 7), (7, 6), (6, 4), # Back face
(0, 4), (1, 5), (2, 6), (3, 7) # Side edges connecting front and back faces
]
# Calculate the number of bounding boxes
num_boxes = len(all_image_points) // 8
# Generate random colors for each bounding box
from omni.replicator.core import random_colours
box_colors = random_colours(num_boxes, enable_random=True, num_channels=3)
# Ensure colors are in the correct format for drawing (255 scale)
box_colors = [(int(r), int(g), int(b)) for r, g, b in box_colors]
# Iterate over each set of 8 points (each bounding box)
for i in range(0, len(all_image_points), 8):
image_points = all_image_points[i:i+8]
image_points[:, 1] = height - image_points[:, 1] # Flip Y-axis to match image coordinates
# Use a distinct color for each bounding box
line_color = box_colors[i // 8]
# Draw lines for each connection
for start, end in connections:
draw.line((image_points[start][0], image_points[start][1],
image_points[end][0], image_points[end][1]),
fill=line_color, width=2)
rgb = Image.fromarray(rgb_image)
# Get 3D corners
from omni.syntheticdata.scripts.helpers import get_bbox_3d_corners
corners_3d = get_bbox_3d_corners(bbox_3d_data)
corners_3d = corners_3d.reshape(-1, 3)
# Project to image space
corners_2d = world_to_image_pinhole(corners_3d, camera_params)
width, height = rgb.size
corners_2d *= np.array([[width, height]])
# Now, draw all bounding boxes
draw_lines_and_points_for_boxes(rgb, corners_2d)
return np.array(rgb)
| 10,880 | Python | 39.752809 | 141 | 0.598805 |
StanfordVL/OmniGibson/omnigibson/utils/lazy_import_utils.py | import importlib
from types import ModuleType
class LazyImporter(ModuleType):
"""Replace a module's global namespace with me to support lazy imports of submodules and members."""
def __init__(self, module_name, module):
super().__init__("lazy_" + module_name)
self._module_path = module_name
self._module = module
self._not_module = set()
self._submodules = {}
def __getattr__(self, name: str):
# First, try the argument as a module name.
if name not in self._not_module:
submodule = self._get_module(name)
if submodule:
return submodule
else:
# Record module not found so that we don't keep looking.
self._not_module.add(name)
# If it's not a module name, try it as a member of this module.
try:
return getattr(self._module, name)
except:
raise AttributeError(
f"module {self.__name__} has no attribute {name}"
) from None
def _get_module(self, module_name: str):
"""Recursively create and return a LazyImporter for the given module name."""
# Get the fully qualified module name by prepending self._module_path
if self._module_path:
module_name = f"{self._module_path}.{module_name}"
if module_name in self._submodules:
return self._submodules[module_name]
try:
wrapper = LazyImporter(module_name, importlib.import_module(module_name))
self._submodules[module_name] = wrapper
return wrapper
except ModuleNotFoundError:
return None | 1,701 | Python | 34.458333 | 104 | 0.586714 |
StanfordVL/OmniGibson/omnigibson/utils/sim_utils.py | import numpy as np
from collections import namedtuple
from collections.abc import Iterable
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils import python_utils
import omnigibson.utils.transform_utils as T
import omnigibson.lazy as lazy
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Raw Body Contact Information
# See https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.contact_sensor/docs/index.html?highlight=contact%20sensor#omni.isaac.contact_sensor._contact_sensor.CsRawData for more info.
CsRawData = namedtuple("RawBodyData", ["time", "dt", "body0", "body1", "position", "normal", "impulse"])
def set_carb_setting(carb_settings, setting, value):
"""
Convenience function to set settings.
Args:
setting (str): Name of setting to change.
value (Any): New value for the setting.
Raises:
TypeError: If the type of value does not match setting type.
"""
if isinstance(value, str):
carb_settings.set_string(setting, value)
elif isinstance(value, bool):
carb_settings.set_bool(setting, value)
elif isinstance(value, int):
carb_settings.set_int(setting, value)
elif isinstance(value, float):
carb_settings.set_float(setting, value)
elif isinstance(value, Iterable) and not isinstance(value, dict):
if len(value) == 0:
raise TypeError(f"Array of type {type(value)} must be nonzero.")
if isinstance(value[0], str):
carb_settings.set_string_array(setting, value)
elif isinstance(value[0], bool):
carb_settings.set_bool_array(setting, value)
elif isinstance(value[0], int):
carb_settings.set_int_array(setting, value)
elif isinstance(value[0], float):
carb_settings.set_float_array(setting, value)
else:
raise TypeError(f"Value of type {type(value)} is not supported.")
else:
raise TypeError(f"Value of type {type(value)} is not supported.")
def check_deletable_prim(prim_path):
"""
Checks whether the prim defined at @prim_path can be deleted.
Args:
prim_path (str): Path defining which prim should be checked for deletion
Returns:
bool: Whether the prim can be deleted or not
"""
if not lazy.omni.isaac.core.utils.prims.is_prim_path_valid(prim_path):
return False
if lazy.omni.isaac.core.utils.prims.is_prim_no_delete(prim_path):
return False
if lazy.omni.isaac.core.utils.prims.is_prim_ancestral(prim_path):
return False
if lazy.omni.isaac.core.utils.prims.get_prim_type_name(prim_path=prim_path) == "PhysicsScene":
return False
if prim_path == "/World":
return False
if prim_path == "/":
return False
# Don't remove any /Render prims as that can cause crashes
if prim_path.startswith("/Render"):
return False
return True
def prims_to_rigid_prim_set(inp_prims):
"""
Converts prims @inp_prims into its corresponding set of rigid prims
Args:
inp_prims (list of RigidPrim or EntityPrim): Arbitrary prims
Returns:
set of RigidPrim: Aggregated set of RigidPrims from @inp_prims
"""
# Avoid circular imports
from omnigibson.prims.entity_prim import EntityPrim
from omnigibson.prims.rigid_prim import RigidPrim
out = set()
for prim in inp_prims:
if isinstance(prim, EntityPrim):
out.update({link for link in prim.links.values()})
elif isinstance(prim, RigidPrim):
out.add(prim)
else:
raise ValueError(f"Inputted prims must be either EntityPrim or RigidPrim instances "
f"when getting collisions! Type: {type(prim)}")
return out
def get_collisions(prims=None, prims_check=None, prims_exclude=None, step_physics=False):
"""
Grab collisions that occurred during the most recent physics timestep associated with prims @prims
Args:
prims (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): Prim(s) to check for collision.
If None, will check against all objects currently in the scene.
prims_check (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
only check for collisions with these specific prim(s)
prims_exclude (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
explicitly ignore any collisions with these specific prim(s)
step_physics (bool): Whether to step the physics first before checking collisions. Default is False
Returns:
set of 2-tuple: Unique collision pairs occurring in the simulation at the current timestep between the
specified prim(s), represented by their prim_paths
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Cannot get collisions while sim is not playing!"
# Optionally step physics and then update contacts
if step_physics:
og.sim.step_physics()
# Standardize inputs
prims = og.sim.scene.objects if prims is None else prims if isinstance(prims, Iterable) else [prims]
prims_check = [] if prims_check is None else prims_check if isinstance(prims_check, Iterable) else [prims_check]
prims_exclude = [] if prims_exclude is None else prims_exclude if isinstance(prims_exclude, Iterable) else [prims_exclude]
# Convert into prim paths to check for collision
def get_paths_from_rigid_prims(inp_prims):
return {prim.prim_path for prim in inp_prims}
def get_contacts(inp_prims):
return {(c.body0, c.body1) for prim in inp_prims for c in prim.contact_list()}
rprims = prims_to_rigid_prim_set(prims)
rprims_check = prims_to_rigid_prim_set(prims_check)
rprims_exclude = prims_to_rigid_prim_set(prims_exclude)
paths = get_paths_from_rigid_prims(rprims)
paths_check = get_paths_from_rigid_prims(rprims_check)
paths_exclude = get_paths_from_rigid_prims(rprims_exclude)
# Run sanity checks
assert paths_check.isdisjoint(paths_exclude), \
f"Paths to check and paths to ignore collisions for should be mutually exclusive! " \
f"paths_check: {paths_check}, paths_exclude: {paths_exclude}"
# Determine whether we're checking / filtering any collision from collision set A
should_check_collisions = len(paths_check) > 0
should_filter_collisions = len(paths_exclude) > 0
# Get all collisions from the objects set
collisions = get_contacts(rprims)
# Only run the following (expensive) code if we are actively using filtering criteria
if should_check_collisions or should_filter_collisions:
# First filter out unnecessary collisions
if should_filter_collisions:
# First filter pass, remove the intersection of the main contacts and the contacts from the exclusion set minus
# the intersection between the exclusion and normal set
# This filters out any matching collisions in the exclusion set that are NOT an overlap
# between @rprims and @rprims_exclude
rprims_exclude_intersect = rprims_exclude.intersection(rprims)
exclude_disjoint_collisions = get_contacts(rprims_exclude - rprims_exclude_intersect)
collisions.difference_update(exclude_disjoint_collisions)
# Second filter pass, we remove collisions that may include self-collisions
# This is a bit more tricky because we need to actually look at the individual contact pairs to determine
# whether it's a collision (which may include a self-collision) that should be filtered
# We do this by grabbing the contacts of the intersection between the exclusion and normal rprims sets,
# and then making sure the resulting contact pair sets are completely disjoint from the paths intersection
exclude_intersect_collisions = get_contacts(rprims_exclude_intersect)
collisions.difference_update({pair for pair in exclude_intersect_collisions if paths.issuperset(set(pair))})
# Now, we additionally check for explicit collisions, filtering out any that do not meet this criteria
# This is essentially the inverse of the filter collision process, where we do two passes again, but for each
# case we look at the union rather than the subtraction of the two sets
if should_check_collisions:
# First check pass, keep the intersection of the main contacts and the contacts from the check set minus
# the intersection between the check and normal set
# This keeps any matching collisions in the check set that overlap between @rprims and @rprims_check
rprims_check_intersect = rprims_check.intersection(rprims)
check_disjoint_collisions = get_contacts(rprims_check - rprims_check_intersect)
valid_other_collisions = collisions.intersection(check_disjoint_collisions)
# Second check pass, we additionally keep collisions that may include self-collisions
# This is a bit more tricky because we need to actually look at the individual contact pairs to determine
# whether it's a collision (which may include a self-collision) that should be kept
# We do this by grabbing the contacts of the intersection between the check and normal rprims sets,
# and then making sure the resulting contact pair sets is strictly a subset of the original set
# Lastly, we only keep the intersection of this resulting set with the original collision set, so that
# any previously filtered collisions are respected
check_intersect_collisions = get_contacts(rprims_check_intersect)
valid_intersect_collisions = collisions.intersection({pair for pair in check_intersect_collisions if paths.issuperset(set(pair))})
# Collisions is union of valid other and valid self collisions
collisions = valid_other_collisions.union(valid_intersect_collisions)
# Only going into this if it is for logging --> efficiency
if gm.DEBUG:
for item in collisions:
log.debug("linkA:{}, linkB:{}".format(item[0], item[1]))
return collisions
def check_collision(prims=None, prims_check=None, prims_exclude=None, step_physics=False):
"""
Checks if any valid collisions occurred during the most recent physics timestep associated with prims @prims
Args:
prims (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): Prim(s) to check for collision.
If None, will check against all objects currently in the scene.
prims_check (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
only check for collisions with these specific prim(s)
prims_exclude (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
explicitly ignore any collisions with these specific prim(s)
step_physics (bool): Whether to step the physics first before checking collisions. Default is False
Returns:
bool: True if a valid collision has occurred, else False
"""
return len(get_collisions(
prims=prims,
prims_check=prims_check,
prims_exclude=prims_exclude,
step_physics=step_physics)) > 0
def filter_collisions(collisions, filter_prims):
"""
Filters collision pairs @collisions based on a set of prims @filter_prims.
Args:
collisions (set of 2-tuple): Collision pairs that should be filtered
filter_prims (EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): Prim(s) specifying which
collisions to filter for. Any collisions that include prims from this filter
set will be removed
Returns:
set of 2-tuple: Filtered collision pairs
"""
paths = prims_to_rigid_prim_set(filter_prims)
filtered_collisions = set()
for pair in collisions:
if set(pair).isdisjoint(paths):
filtered_collisions.add(pair)
return filtered_collisions
def place_base_pose(obj, pos, quat=None, z_offset=None):
"""
Place the object so that its base (z-min) rests at the location of @pos
Args:
obj (BaseObject): Object to place in the environment
pos (3-array): Global (x,y,z) location to place the base of the robot
quat (None or 4-array): Optional (x,y,z,w) quaternion orientation when placing the object.
If None, the object's current orientation will be used
z_offset (None or float): Optional additional z_offset to apply
"""
# avoid circular dependency
from omnigibson.object_states import AABB
lower, _ = obj.states[AABB].get_value()
cur_pos = obj.get_position()
z_diff = cur_pos[2] - lower[2]
obj.set_position_orientation(pos + np.array([0, 0, z_diff if z_offset is None else z_diff + z_offset]), quat)
def test_valid_pose(obj, pos, quat=None, z_offset=None):
"""
Test if the object can be placed with no collision.
Args:
obj (BaseObject): Object to place in the environment
pos (3-array): Global (x,y,z) location to place the object
quat (None or 4-array): Optional (x,y,z,w) quaternion orientation when placing the object.
If None, the object's current orientation will be used
z_offset (None or float): Optional additional z_offset to apply
Returns:
bool: Whether the placed object position is valid
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Cannot test valid pose while sim is not playing!"
# Store state before checking object position
state = og.sim.scene.dump_state(serialized=False)
# Set the pose of the object
place_base_pose(obj, pos, quat, z_offset)
obj.keep_still()
# Check whether we're in collision after taking a single physics step
in_collision = check_collision(prims=obj, step_physics=True)
# Restore state after checking the collision
og.sim.load_state(state, serialized=False)
# Valid if there are no collisions
return not in_collision
def land_object(obj, pos, quat=None, z_offset=None):
"""
Land the object at the specified position @pos, given a valid position and orientation.
Args:
obj (BaseObject): Object to place in the environment
pos (3-array): Global (x,y,z) location to place the object
quat (None or 4-array): Optional (x,y,z,w) quaternion orientation when placing the object.
If None, a random orientation about the z-axis will be sampled
z_offset (None or float): Optional additional z_offset to apply
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Cannot land object while sim is not playing!"
# Set the object's pose
quat = T.euler2quat([0, 0, np.random.uniform(0, np.pi * 2)]) if quat is None else quat
place_base_pose(obj, pos, quat, z_offset)
obj.keep_still()
# Check to make sure we landed successfully
# land for maximum 1 second, should fall down ~5 meters
land_success = False
max_simulator_step = int(1.0 / og.sim.get_rendering_dt())
for _ in range(max_simulator_step):
# Run a sim step and see if we have any contacts
og.sim.step()
land_success = check_collision(prims=obj)
if land_success:
# Once we're successful, we can break immediately
log.info(f"Landed object {obj.name} successfully!")
break
# Print out warning in case we failed to land the object successfully
if not land_success:
log.warning(f"Object {obj.name} failed to land.")
obj.keep_still()
def meets_minimum_isaac_version(minimum_version):
return python_utils.meets_minimum_version(lazy.omni.isaac.version.get_version()[0], minimum_version)
| 16,079 | Python | 43.666667 | 205 | 0.687232 |
StanfordVL/OmniGibson/omnigibson/utils/motion_planning_utils.py | import numpy as np
from math import ceil
import heapq
import omnigibson as og
from omnigibson.macros import create_module_macros
from omnigibson.object_states import ContactBodies
import omnigibson.utils.transform_utils as T
from omnigibson.utils.control_utils import IKSolver
import omnigibson.lazy as lazy
m = create_module_macros(module_path=__file__)
m.ANGLE_DIFF = 0.3
m.DIST_DIFF = 0.1
def _wrap_angle(theta):
""""
Converts an angle to the range [-pi, pi).
Args:
theta (float): angle in radians
Returns:
float: angle in radians in range [-pi, pi)
"""
return (theta + np.pi) % (2 * np.pi) - np.pi
def plan_base_motion(
robot,
end_conf,
context,
planning_time=15.0,
):
"""
Plans a base motion to a 2d pose
Args:
robot (omnigibson.object_states.Robot): Robot object to plan for
end_conf (Iterable): [x, y, yaw] 2d pose to plan to
context (PlanningContext): Context to plan in that includes the robot copy
planning_time (float): Time to plan for
Returns:
Array of arrays: Array of 2d poses that the robot should navigate to
"""
from ompl import base as ob
from ompl import geometric as ompl_geo
class CustomMotionValidator(ob.MotionValidator):
def __init__(self, si, space):
super(CustomMotionValidator, self).__init__(si)
self.si = si
self.space = space
def checkMotion(self, s1, s2):
if not self.si.isValid(s2):
return False
start = np.array([s1.getX(), s1.getY(), s1.getYaw()])
goal = np.array([s2.getX(), s2.getY(), s2.getYaw()])
segment_theta = self.get_angle_between_poses(start, goal)
# Start rotation
if not self.is_valid_rotation(self.si, start, segment_theta):
return False
# Navigation
dist = np.linalg.norm(goal[:2] - start[:2])
num_points = ceil(dist / m.DIST_DIFF) + 1
nav_x = np.linspace(start[0], goal[0], num_points).tolist()
nav_y = np.linspace(start[1], goal[1], num_points).tolist()
for i in range(num_points):
state = create_state(self.si, nav_x[i], nav_y[i], segment_theta)
if not self.si.isValid(state()):
return False
# Goal rotation
if not self.is_valid_rotation(self.si, [goal[0], goal[1], segment_theta], goal[2]):
return False
return True
@staticmethod
def is_valid_rotation(si, start_conf, final_orientation):
diff = _wrap_angle(final_orientation - start_conf[2])
direction = np.sign(diff)
diff = abs(diff)
num_points = ceil(diff / m.ANGLE_DIFF) + 1
nav_angle = np.linspace(0.0, diff, num_points) * direction
angles = nav_angle + start_conf[2]
for i in range(num_points):
state = create_state(si.getStateSpace(), start_conf[0], start_conf[1], angles[i])
if not si.isValid(state()):
return False
return True
@staticmethod
# Get angle between 2d robot poses
def get_angle_between_poses(p1, p2):
segment = []
segment.append(p2[0] - p1[0])
segment.append(p2[1] - p1[1])
return np.arctan2(segment[1], segment[0])
def create_state(space, x, y, yaw):
x = float(x)
y = float(y)
yaw = float(yaw)
state = ob.State(space)
state().setX(x)
state().setY(y)
state().setYaw(_wrap_angle(yaw))
return state
def state_valid_fn(q):
x = q.getX()
y = q.getY()
yaw = q.getYaw()
pose = ([x, y, 0.0], T.euler2quat((0, 0, yaw)))
return not set_base_and_detect_collision(context, pose)
def remove_unnecessary_rotations(path):
"""
Removes unnecessary rotations from a path when possible for the base where the yaw for each pose in the path is in the direction of the
the position of the next pose in the path
Args:
path (Array of arrays): Array of 2d poses
Returns:
Array of numpy arrays: Array of 2d poses with unnecessary rotations removed
"""
# Start at the same starting pose
new_path = [path[0]]
# Process every intermediate waypoint
for i in range(1, len(path) - 1):
# compute the yaw you'd be at when arriving into path[i] and departing from it
arriving_yaw = CustomMotionValidator.get_angle_between_poses(path[i-1], path[i])
departing_yaw = CustomMotionValidator.get_angle_between_poses(path[i], path[i+1])
# check if you are able to make that rotation directly.
arriving_state = (path[i][0], path[i][1], arriving_yaw)
if CustomMotionValidator.is_valid_rotation(si, arriving_state, departing_yaw):
# Then use the arriving yaw directly
new_path.append(arriving_state)
else:
# Otherwise, keep the waypoint
new_path.append(path[i])
# Don't forget to add back the same ending pose
new_path.append(path[-1])
return new_path
pos = robot.get_position()
yaw = T.quat2euler(robot.get_orientation())[2]
start_conf = (pos[0], pos[1], yaw)
# create an SE(2) state space
space = ob.SE2StateSpace()
# set lower and upper bounds
bbox_vals = []
for floor in filter(lambda o: o.category == "floors", og.sim.scene.objects):
bbox_vals += floor.aabb[0][:2].tolist()
bbox_vals += floor.aabb[1][:2].tolist()
bounds = ob.RealVectorBounds(2)
bounds.setLow(min(bbox_vals))
bounds.setHigh(max(bbox_vals))
space.setBounds(bounds)
# create a simple setup object
ss = ompl_geo.SimpleSetup(space)
ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
si = ss.getSpaceInformation()
si.setMotionValidator(CustomMotionValidator(si, space))
# TODO: Try changing to RRTConnect in the future. Currently using RRT because movement is not direction invariant. Can change to RRTConnect
# possibly if hasSymmetricInterpolate is set to False for the state space. Doc here https://ompl.kavrakilab.org/classompl_1_1base_1_1StateSpace.html
planner = ompl_geo.RRT(si)
ss.setPlanner(planner)
start = create_state(space, start_conf[0], start_conf[1], start_conf[2])
print(start)
goal = create_state(space, end_conf[0], end_conf[1], end_conf[2])
print(goal)
ss.setStartAndGoalStates(start, goal)
if not state_valid_fn(start()) or not state_valid_fn(goal()):
return
solved = ss.solve(planning_time)
if solved:
# try to shorten the path
ss.simplifySolution()
sol_path = ss.getSolutionPath()
return_path = []
for i in range(sol_path.getStateCount()):
x = sol_path.getState(i).getX()
y = sol_path.getState(i).getY()
yaw = sol_path.getState(i).getYaw()
return_path.append([x, y, yaw])
return remove_unnecessary_rotations(return_path)
return None
def plan_arm_motion(
robot,
end_conf,
context,
planning_time=15.0,
torso_fixed=True,
):
"""
Plans an arm motion to a final joint position
Args:
robot (BaseRobot): Robot object to plan for
end_conf (Iterable): Final joint position to plan to
context (PlanningContext): Context to plan in that includes the robot copy
planning_time (float): Time to plan for
Returns:
Array of arrays: Array of joint positions that the robot should navigate to
"""
from ompl import base as ob
from ompl import geometric as ompl_geo
if torso_fixed:
joint_control_idx = robot.arm_control_idx[robot.default_arm]
dim = len(joint_control_idx)
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
else:
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
dim = len(joint_control_idx)
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
def state_valid_fn(q):
joint_pos = initial_joint_pos
joint_pos[control_idx_in_joint_pos] = [q[i] for i in range(dim)]
return not set_arm_and_detect_collision(context, joint_pos)
# create an SE2 state space
space = ob.RealVectorStateSpace(dim)
# set lower and upper bounds
bounds = ob.RealVectorBounds(dim)
joints = np.array([joint for joint in robot.joints.values()])
arm_joints = joints[joint_control_idx]
for i, joint in enumerate(arm_joints):
if end_conf[i] > joint.upper_limit:
end_conf[i] = joint.upper_limit
if end_conf[i] < joint.lower_limit:
end_conf[i] = joint.lower_limit
bounds.setLow(i, float(joint.lower_limit))
bounds.setHigh(i, float(joint.upper_limit))
space.setBounds(bounds)
# create a simple setup object
ss = ompl_geo.SimpleSetup(space)
ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
si = ss.getSpaceInformation()
planner = ompl_geo.BITstar(si)
ss.setPlanner(planner)
start_conf = robot.get_joint_positions()[joint_control_idx]
start = ob.State(space)
for i in range(dim):
start[i] = float(start_conf[i])
goal = ob.State(space)
for i in range(dim):
goal[i] = float(end_conf[i])
ss.setStartAndGoalStates(start, goal)
if not state_valid_fn(start) or not state_valid_fn(goal):
return
# this will automatically choose a default planner with
# default parameters
solved = ss.solve(planning_time)
if solved:
# try to shorten the path
# ss.simplifySolution()
sol_path = ss.getSolutionPath()
return_path = []
for i in range(sol_path.getStateCount()):
joint_pos = [sol_path.getState(i)[j] for j in range(dim)]
return_path.append(joint_pos)
return return_path
return None
def plan_arm_motion_ik(
robot,
end_conf,
context,
planning_time=15.0,
torso_fixed=True,
):
"""
Plans an arm motion to a final end effector pose
Args:
robot (BaseRobot): Robot object to plan for
end_conf (Iterable): Final end effector pose to plan to
context (PlanningContext): Context to plan in that includes the robot copy
planning_time (float): Time to plan for
Returns:
Array of arrays: Array of end effector pose that the robot should navigate to
"""
from ompl import base as ob
from ompl import geometric as ompl_geo
DOF = 6
if torso_fixed:
joint_control_idx = robot.arm_control_idx[robot.default_arm]
dim = len(joint_control_idx)
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
robot_description_path = robot.robot_arm_descriptor_yamls["left_fixed"]
else:
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
dim = len(joint_control_idx)
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
robot_description_path = robot.robot_arm_descriptor_yamls[robot.default_arm]
ik_solver = IKSolver(
robot_description_path=robot_description_path,
robot_urdf_path=robot.urdf_path,
reset_joint_pos=robot.reset_joint_pos[joint_control_idx],
eef_name=robot.eef_link_names[robot.default_arm],
)
def state_valid_fn(q):
joint_pos = initial_joint_pos
eef_pose = [q[i] for i in range(6)]
control_joint_pos = ik_solver.solve(
target_pos=eef_pose[:3],
target_quat=T.axisangle2quat(eef_pose[3:]),
max_iterations=1000,
)
if control_joint_pos is None:
return False
joint_pos[control_idx_in_joint_pos] = control_joint_pos
return not set_arm_and_detect_collision(context, joint_pos)
# create an SE2 state space
space = ob.RealVectorStateSpace(DOF)
# set lower and upper bounds for eef position
bounds = ob.RealVectorBounds(DOF)
EEF_X_LIM = [-0.8, 0.8]
EEF_Y_LIM = [-0.8, 0.8]
EEF_Z_LIM = [-2.0, 2.0]
bounds.setLow(0, EEF_X_LIM[0])
bounds.setHigh(0, EEF_X_LIM[1])
bounds.setLow(1, EEF_Y_LIM[0])
bounds.setHigh(1, EEF_Y_LIM[1])
bounds.setLow(2, EEF_Z_LIM[0])
bounds.setHigh(2, EEF_Z_LIM[1])
# # set lower and upper bounds for eef orientation (axis angle bounds)
for i in range(3, 6):
bounds.setLow(i, -np.pi)
bounds.setHigh(i, np.pi)
space.setBounds(bounds)
# create a simple setup object
ss = ompl_geo.SimpleSetup(space)
ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
si = ss.getSpaceInformation()
planner = ompl_geo.BITstar(si)
ss.setPlanner(planner)
start_conf = np.append(robot.get_relative_eef_position(), T.quat2axisangle(robot.get_relative_eef_orientation()))
# do fk
start = ob.State(space)
for i in range(DOF):
start[i] = float(start_conf[i])
goal = ob.State(space)
for i in range(DOF):
goal[i] = float(end_conf[i])
ss.setStartAndGoalStates(start, goal)
if not state_valid_fn(start) or not state_valid_fn(goal):
return
# this will automatically choose a default planner with
# default parameters
solved = ss.solve(planning_time)
if solved:
# try to shorten the path
# ss.simplifySolution()
sol_path = ss.getSolutionPath()
return_path = []
for i in range(sol_path.getStateCount()):
eef_pose = [sol_path.getState(i)[j] for j in range(DOF)]
return_path.append(eef_pose)
return return_path
return None
def set_base_and_detect_collision(context, pose):
"""
Moves the robot and detects robot collisions with the environment and itself
Args:
context (PlanningContext): Context to plan in that includes the robot copy
pose (Array): Pose in the world frame to check for collisions at
Returns:
bool: Whether the robot is in collision
"""
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
translation = lazy.pxr.Gf.Vec3d(*np.array(pose[0], dtype=float))
robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
return detect_robot_collision(context)
def set_arm_and_detect_collision(context, joint_pos):
"""
Sets joint positions of the robot and detects robot collisions with the environment and itself
Args:
context (PlanningContext): Context to plan in that includes the robot copy
joint_pos (Array): Joint positions to set the robot to
Returns:
bool: Whether the robot is in a valid state i.e. not in collision
"""
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
arm_links = context.robot.manipulation_link_names
link_poses = context.fk_solver.get_link_poses(joint_pos, arm_links)
for link in arm_links:
pose = link_poses[link]
if link in robot_copy.meshes[robot_copy_type].keys():
for mesh_name, mesh in robot_copy.meshes[robot_copy_type][link].items():
relative_pose = robot_copy.relative_poses[robot_copy_type][link][mesh_name]
mesh_pose = T.pose_transform(*pose, *relative_pose)
translation = lazy.pxr.Gf.Vec3d(*np.array(mesh_pose[0], dtype=float))
mesh.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(mesh_pose[1], dtype=float)[[3, 0, 1, 2]]
mesh.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
return detect_robot_collision(context)
def detect_robot_collision(context):
"""
Detects robot collisions
Args:
context (PlanningContext): Context to plan in that includes the robot copy
Returns:
bool: Whether the robot is in collision
"""
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
# Define function for checking overlap
valid_hit = False
mesh_path = None
def overlap_callback(hit):
nonlocal valid_hit
valid_hit = hit.rigid_body not in context.disabled_collision_pairs_dict[mesh_path]
return not valid_hit
for meshes in robot_copy.meshes[robot_copy_type].values():
for mesh in meshes.values():
if valid_hit:
return valid_hit
mesh_path = mesh.GetPrimPath().pathString
mesh_id = lazy.pxr.PhysicsSchemaTools.encodeSdfPath(mesh_path)
if mesh.GetTypeName() == "Mesh":
og.sim.psqi.overlap_mesh(*mesh_id, reportFn=overlap_callback)
else:
og.sim.psqi.overlap_shape(*mesh_id, reportFn=overlap_callback)
return valid_hit
def detect_robot_collision_in_sim(robot, filter_objs=[], ignore_obj_in_hand=True):
"""
Detects robot collisions with the environment, but not with itself using the ContactBodies API
Args:
robot (BaseRobot): Robot object to detect collisions for
filter_objs (Array of StatefulObject): Objects to ignore collisions with
ignore_obj_in_hand (bool): Whether to ignore collisions with the object in the robot's hand
Returns:
bool: Whether the robot is in collision
"""
filter_categories = ["floors"]
obj_in_hand = robot._ag_obj_in_hand[robot.default_arm]
if obj_in_hand is not None and ignore_obj_in_hand:
filter_objs.append(obj_in_hand)
collision_prims = list(robot.states[ContactBodies].get_value(ignore_objs=tuple(filter_objs)))
for col_prim in collision_prims:
tokens = col_prim.prim_path.split("/")
obj_prim_path = "/".join(tokens[:-1])
col_obj = og.sim.scene.object_registry("prim_path", obj_prim_path)
if col_obj.category in filter_categories:
collision_prims.remove(col_prim)
return len(collision_prims) > 0
def astar(search_map, start, goal, eight_connected=True):
"""
A* search algorithm for finding a path from start to goal on a grid map
Args:
search_map (Array): 2D Grid map to search on
start (Array): Start position on the map
goal (Array): Goal position on the map
eight_connected (bool): Whether we consider the sides and diagonals of a cell as neighbors or just the sides
Returns:
2D numpy array or None: Array of shape (N, 2) where N is the number of steps in the path.
Each row represents the (x, y) coordinates of a step on the path.
If no path is found, returns None.
"""
def heuristic(node):
# Calculate the Euclidean distance from node to goal
return np.sqrt((node[0] - goal[0])**2 + (node[1] - goal[1])**2)
def get_neighbors(cell):
if eight_connected:
# 8-connected grid
return [(cell[0] + 1, cell[1]), (cell[0] - 1, cell[1]), (cell[0], cell[1] + 1), (cell[0], cell[1] - 1),
(cell[0] + 1, cell[1] + 1), (cell[0] - 1, cell[1] - 1), (cell[0] + 1, cell[1] - 1), (cell[0] - 1, cell[1] + 1)]
else:
# 4-connected grid
return [(cell[0] + 1, cell[1]), (cell[0] - 1, cell[1]), (cell[0], cell[1] + 1), (cell[0], cell[1] - 1)]
def is_valid(cell):
# Check if cell is within the map and traversable
return (0 <= cell[0] < search_map.shape[0] and
0 <= cell[1] < search_map.shape[1] and
search_map[cell] != 0)
def cost(cell1, cell2):
# Define the cost of moving from cell1 to cell2
# Return 1 for adjacent cells and square root of 2 for diagonal cells in an 8-connected grid.
if cell1[0] == cell2[0] or cell1[1] == cell2[1]:
return 1
else:
return np.sqrt(2)
open_set = [(0, start)]
came_from = {}
visited = set()
g_score = {cell: float('inf') for cell in np.ndindex(search_map.shape)}
g_score[start] = 0
while open_set:
_, current = heapq.heappop(open_set)
visited.add(current)
if current == goal:
# Reconstruct path
path = []
while current in came_from:
path.insert(0, current)
current = came_from[current]
path.insert(0, start)
return np.array(path)
for neighbor in get_neighbors(current):
# Skip neighbors that are not valid or have already been visited
if not is_valid(neighbor) or neighbor in visited:
continue
tentative_g_score = g_score[current] + cost(current, neighbor)
if tentative_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = tentative_g_score
f_score = tentative_g_score + heuristic(neighbor)
heapq.heappush(open_set, (f_score, neighbor))
# Return None if no path is found
return None
| 22,689 | Python | 35.420546 | 152 | 0.61294 |
StanfordVL/OmniGibson/omnigibson/utils/registry_utils.py | """
A set of utility functions for registering and tracking objects
"""
from inspect import isclass
import numpy as np
from collections.abc import Iterable
from omnigibson.macros import create_module_macros
from omnigibson.utils.python_utils import Serializable, SerializableNonInstance, UniquelyNamed
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Token identifier for default values if a key doesn't exist in a given object
m.DOES_NOT_EXIST = "DOES_NOT_EXIST"
class Registry(UniquelyNamed):
"""
Simple class for easily registering and tracking arbitrary objects of the same (or very similar) class types.
Elements added are automatically organized by attributes specified by @unique_keys and @group_keys, and
can be accessed at runtime by specifying the desired key and indexing value to grab the object(s).
Default_key is a 1-to-1 mapping: i.e.: a single indexing value will return a single object.
default: "name" -- indexing by object.name (i.e.: every object's name should be unique)
Unique_keys are other 1-to-1 mappings: i.e.: a single indexing value will return a single object.
example: indexing by object.name (every object's name should be unique)
Group_keys are 1-to-many mappings: i.e.: a single indexing value will return a set of objects.
example: indexing by object.in_rooms (many objects can be in a single room)
Note that if a object's attribute is an array of values, then it will be stored under ALL of its values.
example: object.in_rooms = ["kitchen", "living_room"], indexing by in_rooms with a value of either kitchen OR
living room will return this object as part of its set!
You can also easily check for membership in this registry, via either the object's name OR the object itself,
e.g.:
> object.name in registry
> object in registry
If the latter, note that default_key attribute will automatically be used to search for the object
"""
def __init__(
self,
name,
class_types=object,
default_key="name",
unique_keys=None,
group_keys=None,
default_value=m.DOES_NOT_EXIST,
):
"""
Args:
name (str): name of this registry
class_types (class or list of class): class expected for all entries in this registry. Default is `object`,
meaning any object entered will be accepted. This is used to sanity check added entries using add()
to make sure their type is correct (either that the entry itself is a valid class, or that they are an
object of the valid class). Note that if a list of classes are passed, any one of the classes are
considered a valid type for added objects
default_key (str): default key by which to reference a given object. This key should be a
publically accessible attribute in a given object (e.g.: object.name) and uniquely identify
any entries
unique_keys (None or list of str): keys by which to reference a given object. Any key should be a
publically accessible attribute in a given object (e.g.: object.name)
i.e.: these keys should map to a single object
group_keys (None or list of str): keys by which to reference a group of objects, based on the key
(e.g.: object.room)
i.e.: these keys can map to multiple objects
e.g.: default is "name" key only, so we will store objects by their object.name attribute
default_value (any): Default value to use if the attribute @key does not exist in the object
"""
self._name = name
self.class_types = class_types if isinstance(class_types, Iterable) else [class_types]
self.default_key = default_key
self.unique_keys = set([] if unique_keys is None else unique_keys)
self.group_keys = set([] if group_keys is None else group_keys)
self.default_value = default_value
# We always add in the "name" attribute as well
self.unique_keys.add(self.default_key)
# Make sure there's no overlap between the unique and group keys
assert len(self.unique_keys.intersection(self.group_keys)) == 0,\
f"Cannot create registry with unique and group object keys that are the same! " \
f"Unique keys: {self.unique_keys}, group keys: {self.group_keys}"
# Create the dicts programmatically
for k in self.unique_keys.union(self.group_keys):
self.__setattr__(f"_objects_by_{k}", dict())
# Run super init
super().__init__()
@property
def name(self):
return self._name
def add(self, obj):
"""
Adds Instance @obj to this registry
Args:
obj (any): Instance to add to this registry
"""
# Make sure that obj is of the correct class type
assert any([isinstance(obj, class_type) or issubclass(obj, class_type) for class_type in self.class_types]), \
f"Added object must be either an instance or subclass of one of the following classes: {self.class_types}!"
self._add(obj=obj, keys=self.all_keys)
def _add(self, obj, keys=None):
"""
Same as self.add, but allows for selective @keys for adding this object to. Useful for internal things,
such as internal updating of mappings
Args:
obj (any): Instance to add to this registry
keys (None or set or list of str): Which object keys to use for adding the object to mappings.
None is default, which corresponds to all keys
"""
keys = self.all_keys if keys is None else keys
for k in keys:
obj_attr = self._get_obj_attr(obj=obj, attr=k)
# Standardize input as a list
obj_attr = obj_attr if \
isinstance(obj_attr, Iterable) and not isinstance(obj_attr, str) else [obj_attr]
# Loop over all values in this attribute and add to all mappings
for attr in obj_attr:
mapping = self.get_dict(k)
if k in self.unique_keys:
# Handle unique case
if attr in mapping:
log.warning(f"Instance identifier '{k}' should be unique for adding to this registry mapping! Existing {k}: {attr}")
# Special case for "name" attribute, which should ALWAYS be unique
assert k != "name", "For name attribute, objects MUST be unique."
mapping[attr] = obj
else:
# Not unique case
# Possibly initialize list
if attr not in mapping:
mapping[attr] = set()
mapping[attr].add(obj)
def remove(self, obj):
"""
Removes object @object from this registry
Args:
obj (any): Instance to remove from this registry
"""
# Iterate over all keys
for k in self.all_keys:
# Grab the attribute from the object
obj_attr = self._get_obj_attr(obj=obj, attr=k)
# Standardize input as a list
obj_attr = obj_attr if \
isinstance(obj_attr, Iterable) and not isinstance(obj_attr, str) else [obj_attr]
# Loop over all values in this attribute and remove them from all mappings
for attr in obj_attr:
mapping = self.get_dict(k)
if k in self.unique_keys:
# Handle unique case -- in this case, we just directly pop the value from the dictionary
mapping.pop(attr)
else:
# Not unique case
# We remove a value from the resulting set
mapping[attr].remove(obj)
def clear(self):
"""
Removes all owned objects from this registry
"""
# Re-create the owned dicts programmatically
for k in self.unique_keys.union(self.group_keys):
self.__setattr__(f"_objects_by_{k}", dict())
def update(self, keys=None):
"""
Updates this registry, refreshing all internal mappings in case an object's value was updated
Args:
keys (None or str or set or list of str): Which object keys to update. None is default, which corresponds
to all keys
"""
objects = self.objects
keys = self.all_keys if keys is None else \
(keys if type(keys) in {tuple, list} else [keys])
# Delete and re-create all keys mappings
for k in keys:
self.__delattr__(f"_objects_by_{k}")
self.__setattr__(f"_objects_by_{k}", dict())
# Iterate over all objects and re-populate the mappings
for obj in objects:
self._add(obj=obj, keys=[k])
def object_is_registered(self, obj):
"""
Check if a given object @object is registered
Args:
obj (any): Instance to check if it is internally registered
"""
return obj in self.objects
def get_dict(self, key):
"""
Specific mapping dictionary within this registry corresponding to the mappings of @key.
e.g.: if key = "name", this will return the dictionary mapping object.name to objects
Args:
key (str): Key with which to grab mapping dict from
Returns:
dict: Mapping from identifiers to object(s) based on @key
"""
return getattr(self, f"_objects_by_{key}")
def get_ids(self, key):
"""
All identifiers within this registry corresponding to the mappings of @key.
e.g.: if key = "name", this will return all "names" stored internally that index into a object
Args:
key (str): Key with which to grab all identifiers from
Returns:
set: All identifiers within this registry corresponding to the mappings of @key.
"""
return set(self.get_dict(key=key).keys())
def _get_obj_attr(self, obj, attr):
"""
Grabs object's @obj's attribute @attr. Additionally checks to see if @obj is a class or a class instance, and
uses the correct logic
Args:
obj (any): Object to grab attribute from
attr (str): String name of the attribute to grab
Return:
any: Attribute @k of @obj
"""
# We try to grab the object's attribute, and if it fails we fallback to the default value
try:
val = getattr(obj, attr)
except:
val = self.default_value
return val
@property
def objects(self):
"""
Get the objects in this registry
Returns:
list of any: Instances owned by this registry
"""
return list(self.get_dict(self.default_key).values())
@property
def all_keys(self):
"""
Returns:
set of str: All object keys that are valid identification methods to index object(s)
"""
return self.unique_keys.union(self.group_keys)
def __call__(self, key, value, default_val=None):
"""
Grab the object in this registry based on @key and @value
Args:
key (str): What identification type to use to grab the requested object(s).
Should be one of @self.all_keys.
value (any): Value to grab. Should be the value of your requested object.<key> attribute
default_val (any): Default value to return if @value is not found
Returns:
any or set of any: requested unique object if @key is one of unique_keys, else a set if
@key is one of group_keys
"""
assert key in self.all_keys,\
f"Invalid key requested! Valid options are: {self.all_keys}, got: {key}"
return self.get_dict(key).get(value, default_val)
def __contains__(self, obj):
# Instance can be either a string (default key) OR the object itself
if isinstance(obj, str):
obj = self(self.default_key, obj)
return self.object_is_registered(obj=obj)
class SerializableRegistry(Registry, Serializable):
"""
Registry that is serializable, i.e.: entries contain states that can themselves be serialized /deserialized.
Note that this assumes that any objects added to this registry are themselves of @Serializable type!
"""
def add(self, obj):
# In addition to any other class types, we make sure that the object is a serializable instance / class
validate_class = issubclass if isclass(obj) else isinstance
assert any([validate_class(obj, class_type) for class_type in (Serializable, SerializableNonInstance)]), \
f"Added object must be either an instance or subclass of Serializable or SerializableNonInstance!"
# Run super like normal
super().add(obj=obj)
@property
def state_size(self):
return sum(obj.state_size for obj in self.objects)
def _dump_state(self):
# Iterate over all objects and grab their states
state = dict()
for obj in self.objects:
state[obj.name] = obj.dump_state(serialized=False)
return state
def _load_state(self, state):
# Iterate over all objects and load their states. Currently the objects and the state don't have to match, i.e.
# there might be objects in the scene that do not appear in the state dict (a warning will be printed), or
# the state might contain additional information about objects that are NOT in the scene. For both cases, state
# loading will be skipped.
for obj in self.objects:
if obj.name not in state:
log.warning(f"Object '{obj.name}' is not in the state dict to load from. Skip loading its state.")
continue
obj.load_state(state[obj.name], serialized=False)
def _serialize(self, state):
# Iterate over the entire dict and flatten
return np.concatenate([obj.serialize(state[obj.name]) for obj in self.objects]) if \
len(self.objects) > 0 else np.array([])
def _deserialize(self, state):
state_dict = dict()
# Iterate over all the objects and deserialize their individual states, incrementing the index counter
# along the way
idx = 0
for obj in self.objects:
log.debug(f"obj: {obj.name}, state size: {obj.state_size}, idx: {idx}, passing in state length: {len(state[idx:])}")
# We pass in the entire remaining state vector, assuming the object only parses the relevant states
# at the beginning
state_dict[obj.name] = obj.deserialize(state[idx:])
idx += obj.state_size
return state_dict, idx
| 15,260 | Python | 41.391667 | 140 | 0.613434 |
Subsets and Splits