file_path
stringlengths 21
202
| content
stringlengths 19
1.02M
| size
int64 19
1.02M
| lang
stringclasses 8
values | avg_line_length
float64 5.88
100
| max_line_length
int64 12
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
StanfordVL/OmniGibson/omnigibson/utils/vision_utils.py | import colorsys
import numpy as np
from PIL import Image, ImageDraw
try:
import accimage
except ImportError:
accimage = None
class RandomScale:
"""Rescale the input PIL.Image to the given size.
Args:
minsize (sequence or int): Desired min output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
maxsize (sequence or int): Desired max output size. If size is a sequence like
(w, h), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR``
"""
def __init__(self, minsize, maxsize, interpolation=Image.BILINEAR):
assert isinstance(minsize, int)
assert isinstance(maxsize, int)
self.minsize = minsize
self.maxsize = maxsize
self.interpolation = interpolation
def __call__(self, img):
"""
Args:
img (PIL.Image): Image to be scaled.
Returns:
PIL.Image: Rescaled image.
"""
size = np.random.randint(self.minsize, self.maxsize + 1)
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), self.interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), self.interpolation)
else:
raise NotImplementedError()
class Remapper:
"""
Remaps values in an image from old_mapping to new_mapping using an efficient key_array.
See more details in the remap method.
"""
def __init__(self):
self.key_array = np.array([], dtype=np.uint32) # Initialize the key_array as empty
self.known_ids = set()
def clear(self):
"""Resets the key_array to empty."""
self.key_array = np.array([], dtype=np.uint32)
self.known_ids = set()
def remap(self, old_mapping, new_mapping, image):
"""
Remaps values in the given image from old_mapping to new_mapping using an efficient key_array.
If the image contains values that are not in old_mapping, they are remapped to the value in new_mapping
that corresponds to 'unlabelled'.
Args:
old_mapping (dict): The old mapping dictionary that maps a set of image values to labels
e.g. {1: 'desk', 2: 'chair'}.
new_mapping (dict): The new mapping dictionary that maps another set of image values to labels,
e.g. {5: 'desk', 7: 'chair', 100: 'unlabelled'}.
image (np.ndarray): The 2D image to remap, e.g. [[1, 3], [1, 2]].
Returns:
np.ndarray: The remapped image, e.g. [[5,100],[5,7]].
dict: The remapped labels dictionary, e.g. {5: 'desk', 7: 'chair', 100: 'unlabelled'}.
"""
# Make sure that max uint32 doesn't match any value in the new mapping
assert np.all(np.array(list(new_mapping.keys())) != np.iinfo(np.uint32).max), "New mapping contains default unmapped value!"
image_max_key = np.max(image)
key_array_max_key = len(self.key_array) - 1
if image_max_key > key_array_max_key:
prev_key_array = self.key_array.copy()
# We build a new key array and use max uint32 as the default value.
self.key_array = np.full(image_max_key + 1, np.iinfo(np.uint32).max, dtype=np.uint32)
# Copy the previous key array into the new key array
self.key_array[:len(prev_key_array)] = prev_key_array
new_keys = old_mapping.keys() - self.known_ids
if new_keys:
self.known_ids.update(new_keys)
# Populate key_array with new keys
for key in new_keys:
label = old_mapping[key]
new_key = next((k for k, v in new_mapping.items() if v == label), None)
assert new_key is not None, f"Could not find a new key for label {label} in new_mapping!"
self.key_array[key] = new_key
# For all the values that exist in the image but not in old_mapping.keys(), we map them to whichever key in
# new_mapping that equals to 'unlabelled'. This is needed because some values in the image don't necessarily
# show up in the old_mapping, i.e. particle systems.
for key in np.unique(image):
if key not in old_mapping.keys():
new_key = next((k for k, v in new_mapping.items() if v == 'unlabelled'), None)
assert new_key is not None, f"Could not find a new key for label 'unlabelled' in new_mapping!"
self.key_array[key] = new_key
# Apply remapping
remapped_img = self.key_array[image]
# Make sure all values are correctly remapped and not equal to the default value
assert np.all(remapped_img != np.iinfo(np.uint32).max), "Not all keys in the image are in the key array!"
remapped_labels = {}
for key in np.unique(remapped_img):
remapped_labels[key] = new_mapping[key]
return remapped_img, remapped_labels
def remap_bbox(self, semantic_id):
"""
Remaps a semantic id to a new id using the key_array.
Args:
semantic_id (int): The semantic id to remap.
Returns:
int: The remapped id.
"""
assert semantic_id < len(self.key_array), f"Semantic id {semantic_id} is out of range!"
return self.key_array[semantic_id]
def randomize_colors(N, bright=True):
"""
Modified from https://github.com/matterport/Mask_RCNN/blob/master/mrcnn/visualize.py#L59
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
Args:
N (int): Number of colors to generate
Returns:
bright (bool): whether to increase the brightness of the colors or not
"""
brightness = 1.0 if bright else 0.5
hsv = [(1.0 * i / N, 1, brightness) for i in range(N)]
colors = np.array(list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)))
rstate = np.random.RandomState(seed=20)
np.random.shuffle(colors)
colors[0] = [0, 0, 0] # First color is black
return colors
def segmentation_to_rgb(seg_im, N, colors=None):
"""
Helper function to visualize segmentations as RGB frames.
NOTE: assumes that geom IDs go up to N at most - if not,
multiple geoms might be assigned to the same color.
Args:
seg_im ((W, H)-array): Segmentation image
N (int): Maximum segmentation ID from @seg_im
colors (None or list of 3-array): If specified, colors to apply
to different segmentation IDs. Otherwise, will be generated randomly
"""
# ensure all values lie within [0, N]
seg_im = np.mod(seg_im, N)
if colors is None:
use_colors = randomize_colors(N=N, bright=True)
else:
use_colors = colors
if N <= 256:
return (255.0 * use_colors[seg_im]).astype(np.uint8)
else:
return (use_colors[seg_im]).astype(np.float)
def colorize_bboxes_3d(bbox_3d_data, rgb_image, camera_params):
"""
Project 3D bounding box data onto 2D and colorize the bounding boxes for visualization.
Reference: https://forums.developer.nvidia.com/t/mathematical-definition-of-3d-bounding-boxes-annotator-nvidia-omniverse-isaac-sim/223416
Args:
bbox_3d_data (np.ndarray): 3D bounding box data
rgb_image (np.ndarray): RGB image
camera_params (dict): Camera parameters
Returns:
np.ndarray: RGB image with 3D bounding boxes drawn
"""
def world_to_image_pinhole(world_points, camera_params):
# Project corners to image space (assumes pinhole camera model)
proj_mat = camera_params["cameraProjection"].reshape(4, 4)
view_mat = camera_params["cameraViewTransform"].reshape(4, 4)
view_proj_mat = np.dot(view_mat, proj_mat)
world_points_homo = np.pad(world_points, ((0, 0), (0, 1)), constant_values=1.0)
tf_points = np.dot(world_points_homo, view_proj_mat)
tf_points = tf_points / (tf_points[..., -1:])
return 0.5 * (tf_points[..., :2] + 1)
def draw_lines_and_points_for_boxes(img, all_image_points):
width, height = img.size
draw = ImageDraw.Draw(img)
# Define connections between the corners of the bounding box
connections = [
(0, 1), (1, 3), (3, 2), (2, 0), # Front face
(4, 5), (5, 7), (7, 6), (6, 4), # Back face
(0, 4), (1, 5), (2, 6), (3, 7) # Side edges connecting front and back faces
]
# Calculate the number of bounding boxes
num_boxes = len(all_image_points) // 8
# Generate random colors for each bounding box
from omni.replicator.core import random_colours
box_colors = random_colours(num_boxes, enable_random=True, num_channels=3)
# Ensure colors are in the correct format for drawing (255 scale)
box_colors = [(int(r), int(g), int(b)) for r, g, b in box_colors]
# Iterate over each set of 8 points (each bounding box)
for i in range(0, len(all_image_points), 8):
image_points = all_image_points[i:i+8]
image_points[:, 1] = height - image_points[:, 1] # Flip Y-axis to match image coordinates
# Use a distinct color for each bounding box
line_color = box_colors[i // 8]
# Draw lines for each connection
for start, end in connections:
draw.line((image_points[start][0], image_points[start][1],
image_points[end][0], image_points[end][1]),
fill=line_color, width=2)
rgb = Image.fromarray(rgb_image)
# Get 3D corners
from omni.syntheticdata.scripts.helpers import get_bbox_3d_corners
corners_3d = get_bbox_3d_corners(bbox_3d_data)
corners_3d = corners_3d.reshape(-1, 3)
# Project to image space
corners_2d = world_to_image_pinhole(corners_3d, camera_params)
width, height = rgb.size
corners_2d *= np.array([[width, height]])
# Now, draw all bounding boxes
draw_lines_and_points_for_boxes(rgb, corners_2d)
return np.array(rgb)
| 10,880 | Python | 39.752809 | 141 | 0.598805 |
StanfordVL/OmniGibson/omnigibson/utils/lazy_import_utils.py | import importlib
from types import ModuleType
class LazyImporter(ModuleType):
"""Replace a module's global namespace with me to support lazy imports of submodules and members."""
def __init__(self, module_name, module):
super().__init__("lazy_" + module_name)
self._module_path = module_name
self._module = module
self._not_module = set()
self._submodules = {}
def __getattr__(self, name: str):
# First, try the argument as a module name.
if name not in self._not_module:
submodule = self._get_module(name)
if submodule:
return submodule
else:
# Record module not found so that we don't keep looking.
self._not_module.add(name)
# If it's not a module name, try it as a member of this module.
try:
return getattr(self._module, name)
except:
raise AttributeError(
f"module {self.__name__} has no attribute {name}"
) from None
def _get_module(self, module_name: str):
"""Recursively create and return a LazyImporter for the given module name."""
# Get the fully qualified module name by prepending self._module_path
if self._module_path:
module_name = f"{self._module_path}.{module_name}"
if module_name in self._submodules:
return self._submodules[module_name]
try:
wrapper = LazyImporter(module_name, importlib.import_module(module_name))
self._submodules[module_name] = wrapper
return wrapper
except ModuleNotFoundError:
return None | 1,701 | Python | 34.458333 | 104 | 0.586714 |
StanfordVL/OmniGibson/omnigibson/utils/sim_utils.py | import numpy as np
from collections import namedtuple
from collections.abc import Iterable
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils import python_utils
import omnigibson.utils.transform_utils as T
import omnigibson.lazy as lazy
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Raw Body Contact Information
# See https://docs.omniverse.nvidia.com/py/isaacsim/source/extensions/omni.isaac.contact_sensor/docs/index.html?highlight=contact%20sensor#omni.isaac.contact_sensor._contact_sensor.CsRawData for more info.
CsRawData = namedtuple("RawBodyData", ["time", "dt", "body0", "body1", "position", "normal", "impulse"])
def set_carb_setting(carb_settings, setting, value):
"""
Convenience function to set settings.
Args:
setting (str): Name of setting to change.
value (Any): New value for the setting.
Raises:
TypeError: If the type of value does not match setting type.
"""
if isinstance(value, str):
carb_settings.set_string(setting, value)
elif isinstance(value, bool):
carb_settings.set_bool(setting, value)
elif isinstance(value, int):
carb_settings.set_int(setting, value)
elif isinstance(value, float):
carb_settings.set_float(setting, value)
elif isinstance(value, Iterable) and not isinstance(value, dict):
if len(value) == 0:
raise TypeError(f"Array of type {type(value)} must be nonzero.")
if isinstance(value[0], str):
carb_settings.set_string_array(setting, value)
elif isinstance(value[0], bool):
carb_settings.set_bool_array(setting, value)
elif isinstance(value[0], int):
carb_settings.set_int_array(setting, value)
elif isinstance(value[0], float):
carb_settings.set_float_array(setting, value)
else:
raise TypeError(f"Value of type {type(value)} is not supported.")
else:
raise TypeError(f"Value of type {type(value)} is not supported.")
def check_deletable_prim(prim_path):
"""
Checks whether the prim defined at @prim_path can be deleted.
Args:
prim_path (str): Path defining which prim should be checked for deletion
Returns:
bool: Whether the prim can be deleted or not
"""
if not lazy.omni.isaac.core.utils.prims.is_prim_path_valid(prim_path):
return False
if lazy.omni.isaac.core.utils.prims.is_prim_no_delete(prim_path):
return False
if lazy.omni.isaac.core.utils.prims.is_prim_ancestral(prim_path):
return False
if lazy.omni.isaac.core.utils.prims.get_prim_type_name(prim_path=prim_path) == "PhysicsScene":
return False
if prim_path == "/World":
return False
if prim_path == "/":
return False
# Don't remove any /Render prims as that can cause crashes
if prim_path.startswith("/Render"):
return False
return True
def prims_to_rigid_prim_set(inp_prims):
"""
Converts prims @inp_prims into its corresponding set of rigid prims
Args:
inp_prims (list of RigidPrim or EntityPrim): Arbitrary prims
Returns:
set of RigidPrim: Aggregated set of RigidPrims from @inp_prims
"""
# Avoid circular imports
from omnigibson.prims.entity_prim import EntityPrim
from omnigibson.prims.rigid_prim import RigidPrim
out = set()
for prim in inp_prims:
if isinstance(prim, EntityPrim):
out.update({link for link in prim.links.values()})
elif isinstance(prim, RigidPrim):
out.add(prim)
else:
raise ValueError(f"Inputted prims must be either EntityPrim or RigidPrim instances "
f"when getting collisions! Type: {type(prim)}")
return out
def get_collisions(prims=None, prims_check=None, prims_exclude=None, step_physics=False):
"""
Grab collisions that occurred during the most recent physics timestep associated with prims @prims
Args:
prims (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): Prim(s) to check for collision.
If None, will check against all objects currently in the scene.
prims_check (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
only check for collisions with these specific prim(s)
prims_exclude (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
explicitly ignore any collisions with these specific prim(s)
step_physics (bool): Whether to step the physics first before checking collisions. Default is False
Returns:
set of 2-tuple: Unique collision pairs occurring in the simulation at the current timestep between the
specified prim(s), represented by their prim_paths
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Cannot get collisions while sim is not playing!"
# Optionally step physics and then update contacts
if step_physics:
og.sim.step_physics()
# Standardize inputs
prims = og.sim.scene.objects if prims is None else prims if isinstance(prims, Iterable) else [prims]
prims_check = [] if prims_check is None else prims_check if isinstance(prims_check, Iterable) else [prims_check]
prims_exclude = [] if prims_exclude is None else prims_exclude if isinstance(prims_exclude, Iterable) else [prims_exclude]
# Convert into prim paths to check for collision
def get_paths_from_rigid_prims(inp_prims):
return {prim.prim_path for prim in inp_prims}
def get_contacts(inp_prims):
return {(c.body0, c.body1) for prim in inp_prims for c in prim.contact_list()}
rprims = prims_to_rigid_prim_set(prims)
rprims_check = prims_to_rigid_prim_set(prims_check)
rprims_exclude = prims_to_rigid_prim_set(prims_exclude)
paths = get_paths_from_rigid_prims(rprims)
paths_check = get_paths_from_rigid_prims(rprims_check)
paths_exclude = get_paths_from_rigid_prims(rprims_exclude)
# Run sanity checks
assert paths_check.isdisjoint(paths_exclude), \
f"Paths to check and paths to ignore collisions for should be mutually exclusive! " \
f"paths_check: {paths_check}, paths_exclude: {paths_exclude}"
# Determine whether we're checking / filtering any collision from collision set A
should_check_collisions = len(paths_check) > 0
should_filter_collisions = len(paths_exclude) > 0
# Get all collisions from the objects set
collisions = get_contacts(rprims)
# Only run the following (expensive) code if we are actively using filtering criteria
if should_check_collisions or should_filter_collisions:
# First filter out unnecessary collisions
if should_filter_collisions:
# First filter pass, remove the intersection of the main contacts and the contacts from the exclusion set minus
# the intersection between the exclusion and normal set
# This filters out any matching collisions in the exclusion set that are NOT an overlap
# between @rprims and @rprims_exclude
rprims_exclude_intersect = rprims_exclude.intersection(rprims)
exclude_disjoint_collisions = get_contacts(rprims_exclude - rprims_exclude_intersect)
collisions.difference_update(exclude_disjoint_collisions)
# Second filter pass, we remove collisions that may include self-collisions
# This is a bit more tricky because we need to actually look at the individual contact pairs to determine
# whether it's a collision (which may include a self-collision) that should be filtered
# We do this by grabbing the contacts of the intersection between the exclusion and normal rprims sets,
# and then making sure the resulting contact pair sets are completely disjoint from the paths intersection
exclude_intersect_collisions = get_contacts(rprims_exclude_intersect)
collisions.difference_update({pair for pair in exclude_intersect_collisions if paths.issuperset(set(pair))})
# Now, we additionally check for explicit collisions, filtering out any that do not meet this criteria
# This is essentially the inverse of the filter collision process, where we do two passes again, but for each
# case we look at the union rather than the subtraction of the two sets
if should_check_collisions:
# First check pass, keep the intersection of the main contacts and the contacts from the check set minus
# the intersection between the check and normal set
# This keeps any matching collisions in the check set that overlap between @rprims and @rprims_check
rprims_check_intersect = rprims_check.intersection(rprims)
check_disjoint_collisions = get_contacts(rprims_check - rprims_check_intersect)
valid_other_collisions = collisions.intersection(check_disjoint_collisions)
# Second check pass, we additionally keep collisions that may include self-collisions
# This is a bit more tricky because we need to actually look at the individual contact pairs to determine
# whether it's a collision (which may include a self-collision) that should be kept
# We do this by grabbing the contacts of the intersection between the check and normal rprims sets,
# and then making sure the resulting contact pair sets is strictly a subset of the original set
# Lastly, we only keep the intersection of this resulting set with the original collision set, so that
# any previously filtered collisions are respected
check_intersect_collisions = get_contacts(rprims_check_intersect)
valid_intersect_collisions = collisions.intersection({pair for pair in check_intersect_collisions if paths.issuperset(set(pair))})
# Collisions is union of valid other and valid self collisions
collisions = valid_other_collisions.union(valid_intersect_collisions)
# Only going into this if it is for logging --> efficiency
if gm.DEBUG:
for item in collisions:
log.debug("linkA:{}, linkB:{}".format(item[0], item[1]))
return collisions
def check_collision(prims=None, prims_check=None, prims_exclude=None, step_physics=False):
"""
Checks if any valid collisions occurred during the most recent physics timestep associated with prims @prims
Args:
prims (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): Prim(s) to check for collision.
If None, will check against all objects currently in the scene.
prims_check (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
only check for collisions with these specific prim(s)
prims_exclude (None or EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): If specified, will
explicitly ignore any collisions with these specific prim(s)
step_physics (bool): Whether to step the physics first before checking collisions. Default is False
Returns:
bool: True if a valid collision has occurred, else False
"""
return len(get_collisions(
prims=prims,
prims_check=prims_check,
prims_exclude=prims_exclude,
step_physics=step_physics)) > 0
def filter_collisions(collisions, filter_prims):
"""
Filters collision pairs @collisions based on a set of prims @filter_prims.
Args:
collisions (set of 2-tuple): Collision pairs that should be filtered
filter_prims (EntityPrim or RigidPrim or tuple of EntityPrim or RigidPrim): Prim(s) specifying which
collisions to filter for. Any collisions that include prims from this filter
set will be removed
Returns:
set of 2-tuple: Filtered collision pairs
"""
paths = prims_to_rigid_prim_set(filter_prims)
filtered_collisions = set()
for pair in collisions:
if set(pair).isdisjoint(paths):
filtered_collisions.add(pair)
return filtered_collisions
def place_base_pose(obj, pos, quat=None, z_offset=None):
"""
Place the object so that its base (z-min) rests at the location of @pos
Args:
obj (BaseObject): Object to place in the environment
pos (3-array): Global (x,y,z) location to place the base of the robot
quat (None or 4-array): Optional (x,y,z,w) quaternion orientation when placing the object.
If None, the object's current orientation will be used
z_offset (None or float): Optional additional z_offset to apply
"""
# avoid circular dependency
from omnigibson.object_states import AABB
lower, _ = obj.states[AABB].get_value()
cur_pos = obj.get_position()
z_diff = cur_pos[2] - lower[2]
obj.set_position_orientation(pos + np.array([0, 0, z_diff if z_offset is None else z_diff + z_offset]), quat)
def test_valid_pose(obj, pos, quat=None, z_offset=None):
"""
Test if the object can be placed with no collision.
Args:
obj (BaseObject): Object to place in the environment
pos (3-array): Global (x,y,z) location to place the object
quat (None or 4-array): Optional (x,y,z,w) quaternion orientation when placing the object.
If None, the object's current orientation will be used
z_offset (None or float): Optional additional z_offset to apply
Returns:
bool: Whether the placed object position is valid
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Cannot test valid pose while sim is not playing!"
# Store state before checking object position
state = og.sim.scene.dump_state(serialized=False)
# Set the pose of the object
place_base_pose(obj, pos, quat, z_offset)
obj.keep_still()
# Check whether we're in collision after taking a single physics step
in_collision = check_collision(prims=obj, step_physics=True)
# Restore state after checking the collision
og.sim.load_state(state, serialized=False)
# Valid if there are no collisions
return not in_collision
def land_object(obj, pos, quat=None, z_offset=None):
"""
Land the object at the specified position @pos, given a valid position and orientation.
Args:
obj (BaseObject): Object to place in the environment
pos (3-array): Global (x,y,z) location to place the object
quat (None or 4-array): Optional (x,y,z,w) quaternion orientation when placing the object.
If None, a random orientation about the z-axis will be sampled
z_offset (None or float): Optional additional z_offset to apply
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Cannot land object while sim is not playing!"
# Set the object's pose
quat = T.euler2quat([0, 0, np.random.uniform(0, np.pi * 2)]) if quat is None else quat
place_base_pose(obj, pos, quat, z_offset)
obj.keep_still()
# Check to make sure we landed successfully
# land for maximum 1 second, should fall down ~5 meters
land_success = False
max_simulator_step = int(1.0 / og.sim.get_rendering_dt())
for _ in range(max_simulator_step):
# Run a sim step and see if we have any contacts
og.sim.step()
land_success = check_collision(prims=obj)
if land_success:
# Once we're successful, we can break immediately
log.info(f"Landed object {obj.name} successfully!")
break
# Print out warning in case we failed to land the object successfully
if not land_success:
log.warning(f"Object {obj.name} failed to land.")
obj.keep_still()
def meets_minimum_isaac_version(minimum_version):
return python_utils.meets_minimum_version(lazy.omni.isaac.version.get_version()[0], minimum_version)
| 16,079 | Python | 43.666667 | 205 | 0.687232 |
StanfordVL/OmniGibson/omnigibson/utils/motion_planning_utils.py | import numpy as np
from math import ceil
import heapq
import omnigibson as og
from omnigibson.macros import create_module_macros
from omnigibson.object_states import ContactBodies
import omnigibson.utils.transform_utils as T
from omnigibson.utils.control_utils import IKSolver
import omnigibson.lazy as lazy
m = create_module_macros(module_path=__file__)
m.ANGLE_DIFF = 0.3
m.DIST_DIFF = 0.1
def _wrap_angle(theta):
""""
Converts an angle to the range [-pi, pi).
Args:
theta (float): angle in radians
Returns:
float: angle in radians in range [-pi, pi)
"""
return (theta + np.pi) % (2 * np.pi) - np.pi
def plan_base_motion(
robot,
end_conf,
context,
planning_time=15.0,
):
"""
Plans a base motion to a 2d pose
Args:
robot (omnigibson.object_states.Robot): Robot object to plan for
end_conf (Iterable): [x, y, yaw] 2d pose to plan to
context (PlanningContext): Context to plan in that includes the robot copy
planning_time (float): Time to plan for
Returns:
Array of arrays: Array of 2d poses that the robot should navigate to
"""
from ompl import base as ob
from ompl import geometric as ompl_geo
class CustomMotionValidator(ob.MotionValidator):
def __init__(self, si, space):
super(CustomMotionValidator, self).__init__(si)
self.si = si
self.space = space
def checkMotion(self, s1, s2):
if not self.si.isValid(s2):
return False
start = np.array([s1.getX(), s1.getY(), s1.getYaw()])
goal = np.array([s2.getX(), s2.getY(), s2.getYaw()])
segment_theta = self.get_angle_between_poses(start, goal)
# Start rotation
if not self.is_valid_rotation(self.si, start, segment_theta):
return False
# Navigation
dist = np.linalg.norm(goal[:2] - start[:2])
num_points = ceil(dist / m.DIST_DIFF) + 1
nav_x = np.linspace(start[0], goal[0], num_points).tolist()
nav_y = np.linspace(start[1], goal[1], num_points).tolist()
for i in range(num_points):
state = create_state(self.si, nav_x[i], nav_y[i], segment_theta)
if not self.si.isValid(state()):
return False
# Goal rotation
if not self.is_valid_rotation(self.si, [goal[0], goal[1], segment_theta], goal[2]):
return False
return True
@staticmethod
def is_valid_rotation(si, start_conf, final_orientation):
diff = _wrap_angle(final_orientation - start_conf[2])
direction = np.sign(diff)
diff = abs(diff)
num_points = ceil(diff / m.ANGLE_DIFF) + 1
nav_angle = np.linspace(0.0, diff, num_points) * direction
angles = nav_angle + start_conf[2]
for i in range(num_points):
state = create_state(si.getStateSpace(), start_conf[0], start_conf[1], angles[i])
if not si.isValid(state()):
return False
return True
@staticmethod
# Get angle between 2d robot poses
def get_angle_between_poses(p1, p2):
segment = []
segment.append(p2[0] - p1[0])
segment.append(p2[1] - p1[1])
return np.arctan2(segment[1], segment[0])
def create_state(space, x, y, yaw):
x = float(x)
y = float(y)
yaw = float(yaw)
state = ob.State(space)
state().setX(x)
state().setY(y)
state().setYaw(_wrap_angle(yaw))
return state
def state_valid_fn(q):
x = q.getX()
y = q.getY()
yaw = q.getYaw()
pose = ([x, y, 0.0], T.euler2quat((0, 0, yaw)))
return not set_base_and_detect_collision(context, pose)
def remove_unnecessary_rotations(path):
"""
Removes unnecessary rotations from a path when possible for the base where the yaw for each pose in the path is in the direction of the
the position of the next pose in the path
Args:
path (Array of arrays): Array of 2d poses
Returns:
Array of numpy arrays: Array of 2d poses with unnecessary rotations removed
"""
# Start at the same starting pose
new_path = [path[0]]
# Process every intermediate waypoint
for i in range(1, len(path) - 1):
# compute the yaw you'd be at when arriving into path[i] and departing from it
arriving_yaw = CustomMotionValidator.get_angle_between_poses(path[i-1], path[i])
departing_yaw = CustomMotionValidator.get_angle_between_poses(path[i], path[i+1])
# check if you are able to make that rotation directly.
arriving_state = (path[i][0], path[i][1], arriving_yaw)
if CustomMotionValidator.is_valid_rotation(si, arriving_state, departing_yaw):
# Then use the arriving yaw directly
new_path.append(arriving_state)
else:
# Otherwise, keep the waypoint
new_path.append(path[i])
# Don't forget to add back the same ending pose
new_path.append(path[-1])
return new_path
pos = robot.get_position()
yaw = T.quat2euler(robot.get_orientation())[2]
start_conf = (pos[0], pos[1], yaw)
# create an SE(2) state space
space = ob.SE2StateSpace()
# set lower and upper bounds
bbox_vals = []
for floor in filter(lambda o: o.category == "floors", og.sim.scene.objects):
bbox_vals += floor.aabb[0][:2].tolist()
bbox_vals += floor.aabb[1][:2].tolist()
bounds = ob.RealVectorBounds(2)
bounds.setLow(min(bbox_vals))
bounds.setHigh(max(bbox_vals))
space.setBounds(bounds)
# create a simple setup object
ss = ompl_geo.SimpleSetup(space)
ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
si = ss.getSpaceInformation()
si.setMotionValidator(CustomMotionValidator(si, space))
# TODO: Try changing to RRTConnect in the future. Currently using RRT because movement is not direction invariant. Can change to RRTConnect
# possibly if hasSymmetricInterpolate is set to False for the state space. Doc here https://ompl.kavrakilab.org/classompl_1_1base_1_1StateSpace.html
planner = ompl_geo.RRT(si)
ss.setPlanner(planner)
start = create_state(space, start_conf[0], start_conf[1], start_conf[2])
print(start)
goal = create_state(space, end_conf[0], end_conf[1], end_conf[2])
print(goal)
ss.setStartAndGoalStates(start, goal)
if not state_valid_fn(start()) or not state_valid_fn(goal()):
return
solved = ss.solve(planning_time)
if solved:
# try to shorten the path
ss.simplifySolution()
sol_path = ss.getSolutionPath()
return_path = []
for i in range(sol_path.getStateCount()):
x = sol_path.getState(i).getX()
y = sol_path.getState(i).getY()
yaw = sol_path.getState(i).getYaw()
return_path.append([x, y, yaw])
return remove_unnecessary_rotations(return_path)
return None
def plan_arm_motion(
robot,
end_conf,
context,
planning_time=15.0,
torso_fixed=True,
):
"""
Plans an arm motion to a final joint position
Args:
robot (BaseRobot): Robot object to plan for
end_conf (Iterable): Final joint position to plan to
context (PlanningContext): Context to plan in that includes the robot copy
planning_time (float): Time to plan for
Returns:
Array of arrays: Array of joint positions that the robot should navigate to
"""
from ompl import base as ob
from ompl import geometric as ompl_geo
if torso_fixed:
joint_control_idx = robot.arm_control_idx[robot.default_arm]
dim = len(joint_control_idx)
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
else:
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
dim = len(joint_control_idx)
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
def state_valid_fn(q):
joint_pos = initial_joint_pos
joint_pos[control_idx_in_joint_pos] = [q[i] for i in range(dim)]
return not set_arm_and_detect_collision(context, joint_pos)
# create an SE2 state space
space = ob.RealVectorStateSpace(dim)
# set lower and upper bounds
bounds = ob.RealVectorBounds(dim)
joints = np.array([joint for joint in robot.joints.values()])
arm_joints = joints[joint_control_idx]
for i, joint in enumerate(arm_joints):
if end_conf[i] > joint.upper_limit:
end_conf[i] = joint.upper_limit
if end_conf[i] < joint.lower_limit:
end_conf[i] = joint.lower_limit
bounds.setLow(i, float(joint.lower_limit))
bounds.setHigh(i, float(joint.upper_limit))
space.setBounds(bounds)
# create a simple setup object
ss = ompl_geo.SimpleSetup(space)
ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
si = ss.getSpaceInformation()
planner = ompl_geo.BITstar(si)
ss.setPlanner(planner)
start_conf = robot.get_joint_positions()[joint_control_idx]
start = ob.State(space)
for i in range(dim):
start[i] = float(start_conf[i])
goal = ob.State(space)
for i in range(dim):
goal[i] = float(end_conf[i])
ss.setStartAndGoalStates(start, goal)
if not state_valid_fn(start) or not state_valid_fn(goal):
return
# this will automatically choose a default planner with
# default parameters
solved = ss.solve(planning_time)
if solved:
# try to shorten the path
# ss.simplifySolution()
sol_path = ss.getSolutionPath()
return_path = []
for i in range(sol_path.getStateCount()):
joint_pos = [sol_path.getState(i)[j] for j in range(dim)]
return_path.append(joint_pos)
return return_path
return None
def plan_arm_motion_ik(
robot,
end_conf,
context,
planning_time=15.0,
torso_fixed=True,
):
"""
Plans an arm motion to a final end effector pose
Args:
robot (BaseRobot): Robot object to plan for
end_conf (Iterable): Final end effector pose to plan to
context (PlanningContext): Context to plan in that includes the robot copy
planning_time (float): Time to plan for
Returns:
Array of arrays: Array of end effector pose that the robot should navigate to
"""
from ompl import base as ob
from ompl import geometric as ompl_geo
DOF = 6
if torso_fixed:
joint_control_idx = robot.arm_control_idx[robot.default_arm]
dim = len(joint_control_idx)
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
robot_description_path = robot.robot_arm_descriptor_yamls["left_fixed"]
else:
joint_control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
dim = len(joint_control_idx)
if "combined" in robot.robot_arm_descriptor_yamls:
joint_combined_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx["combined"]])
initial_joint_pos = np.array(robot.get_joint_positions()[joint_combined_idx])
control_idx_in_joint_pos = np.where(np.in1d(joint_combined_idx, joint_control_idx))[0]
else:
initial_joint_pos = np.array(robot.get_joint_positions()[joint_control_idx])
control_idx_in_joint_pos = np.arange(dim)
robot_description_path = robot.robot_arm_descriptor_yamls[robot.default_arm]
ik_solver = IKSolver(
robot_description_path=robot_description_path,
robot_urdf_path=robot.urdf_path,
reset_joint_pos=robot.reset_joint_pos[joint_control_idx],
eef_name=robot.eef_link_names[robot.default_arm],
)
def state_valid_fn(q):
joint_pos = initial_joint_pos
eef_pose = [q[i] for i in range(6)]
control_joint_pos = ik_solver.solve(
target_pos=eef_pose[:3],
target_quat=T.axisangle2quat(eef_pose[3:]),
max_iterations=1000,
)
if control_joint_pos is None:
return False
joint_pos[control_idx_in_joint_pos] = control_joint_pos
return not set_arm_and_detect_collision(context, joint_pos)
# create an SE2 state space
space = ob.RealVectorStateSpace(DOF)
# set lower and upper bounds for eef position
bounds = ob.RealVectorBounds(DOF)
EEF_X_LIM = [-0.8, 0.8]
EEF_Y_LIM = [-0.8, 0.8]
EEF_Z_LIM = [-2.0, 2.0]
bounds.setLow(0, EEF_X_LIM[0])
bounds.setHigh(0, EEF_X_LIM[1])
bounds.setLow(1, EEF_Y_LIM[0])
bounds.setHigh(1, EEF_Y_LIM[1])
bounds.setLow(2, EEF_Z_LIM[0])
bounds.setHigh(2, EEF_Z_LIM[1])
# # set lower and upper bounds for eef orientation (axis angle bounds)
for i in range(3, 6):
bounds.setLow(i, -np.pi)
bounds.setHigh(i, np.pi)
space.setBounds(bounds)
# create a simple setup object
ss = ompl_geo.SimpleSetup(space)
ss.setStateValidityChecker(ob.StateValidityCheckerFn(state_valid_fn))
si = ss.getSpaceInformation()
planner = ompl_geo.BITstar(si)
ss.setPlanner(planner)
start_conf = np.append(robot.get_relative_eef_position(), T.quat2axisangle(robot.get_relative_eef_orientation()))
# do fk
start = ob.State(space)
for i in range(DOF):
start[i] = float(start_conf[i])
goal = ob.State(space)
for i in range(DOF):
goal[i] = float(end_conf[i])
ss.setStartAndGoalStates(start, goal)
if not state_valid_fn(start) or not state_valid_fn(goal):
return
# this will automatically choose a default planner with
# default parameters
solved = ss.solve(planning_time)
if solved:
# try to shorten the path
# ss.simplifySolution()
sol_path = ss.getSolutionPath()
return_path = []
for i in range(sol_path.getStateCount()):
eef_pose = [sol_path.getState(i)[j] for j in range(DOF)]
return_path.append(eef_pose)
return return_path
return None
def set_base_and_detect_collision(context, pose):
"""
Moves the robot and detects robot collisions with the environment and itself
Args:
context (PlanningContext): Context to plan in that includes the robot copy
pose (Array): Pose in the world frame to check for collisions at
Returns:
bool: Whether the robot is in collision
"""
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
translation = lazy.pxr.Gf.Vec3d(*np.array(pose[0], dtype=float))
robot_copy.prims[robot_copy_type].GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
robot_copy.prims[robot_copy_type].GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
return detect_robot_collision(context)
def set_arm_and_detect_collision(context, joint_pos):
"""
Sets joint positions of the robot and detects robot collisions with the environment and itself
Args:
context (PlanningContext): Context to plan in that includes the robot copy
joint_pos (Array): Joint positions to set the robot to
Returns:
bool: Whether the robot is in a valid state i.e. not in collision
"""
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
arm_links = context.robot.manipulation_link_names
link_poses = context.fk_solver.get_link_poses(joint_pos, arm_links)
for link in arm_links:
pose = link_poses[link]
if link in robot_copy.meshes[robot_copy_type].keys():
for mesh_name, mesh in robot_copy.meshes[robot_copy_type][link].items():
relative_pose = robot_copy.relative_poses[robot_copy_type][link][mesh_name]
mesh_pose = T.pose_transform(*pose, *relative_pose)
translation = lazy.pxr.Gf.Vec3d(*np.array(mesh_pose[0], dtype=float))
mesh.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(mesh_pose[1], dtype=float)[[3, 0, 1, 2]]
mesh.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
return detect_robot_collision(context)
def detect_robot_collision(context):
"""
Detects robot collisions
Args:
context (PlanningContext): Context to plan in that includes the robot copy
Returns:
bool: Whether the robot is in collision
"""
robot_copy = context.robot_copy
robot_copy_type = context.robot_copy_type
# Define function for checking overlap
valid_hit = False
mesh_path = None
def overlap_callback(hit):
nonlocal valid_hit
valid_hit = hit.rigid_body not in context.disabled_collision_pairs_dict[mesh_path]
return not valid_hit
for meshes in robot_copy.meshes[robot_copy_type].values():
for mesh in meshes.values():
if valid_hit:
return valid_hit
mesh_path = mesh.GetPrimPath().pathString
mesh_id = lazy.pxr.PhysicsSchemaTools.encodeSdfPath(mesh_path)
if mesh.GetTypeName() == "Mesh":
og.sim.psqi.overlap_mesh(*mesh_id, reportFn=overlap_callback)
else:
og.sim.psqi.overlap_shape(*mesh_id, reportFn=overlap_callback)
return valid_hit
def detect_robot_collision_in_sim(robot, filter_objs=[], ignore_obj_in_hand=True):
"""
Detects robot collisions with the environment, but not with itself using the ContactBodies API
Args:
robot (BaseRobot): Robot object to detect collisions for
filter_objs (Array of StatefulObject): Objects to ignore collisions with
ignore_obj_in_hand (bool): Whether to ignore collisions with the object in the robot's hand
Returns:
bool: Whether the robot is in collision
"""
filter_categories = ["floors"]
obj_in_hand = robot._ag_obj_in_hand[robot.default_arm]
if obj_in_hand is not None and ignore_obj_in_hand:
filter_objs.append(obj_in_hand)
collision_prims = list(robot.states[ContactBodies].get_value(ignore_objs=tuple(filter_objs)))
for col_prim in collision_prims:
tokens = col_prim.prim_path.split("/")
obj_prim_path = "/".join(tokens[:-1])
col_obj = og.sim.scene.object_registry("prim_path", obj_prim_path)
if col_obj.category in filter_categories:
collision_prims.remove(col_prim)
return len(collision_prims) > 0
def astar(search_map, start, goal, eight_connected=True):
"""
A* search algorithm for finding a path from start to goal on a grid map
Args:
search_map (Array): 2D Grid map to search on
start (Array): Start position on the map
goal (Array): Goal position on the map
eight_connected (bool): Whether we consider the sides and diagonals of a cell as neighbors or just the sides
Returns:
2D numpy array or None: Array of shape (N, 2) where N is the number of steps in the path.
Each row represents the (x, y) coordinates of a step on the path.
If no path is found, returns None.
"""
def heuristic(node):
# Calculate the Euclidean distance from node to goal
return np.sqrt((node[0] - goal[0])**2 + (node[1] - goal[1])**2)
def get_neighbors(cell):
if eight_connected:
# 8-connected grid
return [(cell[0] + 1, cell[1]), (cell[0] - 1, cell[1]), (cell[0], cell[1] + 1), (cell[0], cell[1] - 1),
(cell[0] + 1, cell[1] + 1), (cell[0] - 1, cell[1] - 1), (cell[0] + 1, cell[1] - 1), (cell[0] - 1, cell[1] + 1)]
else:
# 4-connected grid
return [(cell[0] + 1, cell[1]), (cell[0] - 1, cell[1]), (cell[0], cell[1] + 1), (cell[0], cell[1] - 1)]
def is_valid(cell):
# Check if cell is within the map and traversable
return (0 <= cell[0] < search_map.shape[0] and
0 <= cell[1] < search_map.shape[1] and
search_map[cell] != 0)
def cost(cell1, cell2):
# Define the cost of moving from cell1 to cell2
# Return 1 for adjacent cells and square root of 2 for diagonal cells in an 8-connected grid.
if cell1[0] == cell2[0] or cell1[1] == cell2[1]:
return 1
else:
return np.sqrt(2)
open_set = [(0, start)]
came_from = {}
visited = set()
g_score = {cell: float('inf') for cell in np.ndindex(search_map.shape)}
g_score[start] = 0
while open_set:
_, current = heapq.heappop(open_set)
visited.add(current)
if current == goal:
# Reconstruct path
path = []
while current in came_from:
path.insert(0, current)
current = came_from[current]
path.insert(0, start)
return np.array(path)
for neighbor in get_neighbors(current):
# Skip neighbors that are not valid or have already been visited
if not is_valid(neighbor) or neighbor in visited:
continue
tentative_g_score = g_score[current] + cost(current, neighbor)
if tentative_g_score < g_score[neighbor]:
came_from[neighbor] = current
g_score[neighbor] = tentative_g_score
f_score = tentative_g_score + heuristic(neighbor)
heapq.heappush(open_set, (f_score, neighbor))
# Return None if no path is found
return None
| 22,689 | Python | 35.420546 | 152 | 0.61294 |
StanfordVL/OmniGibson/omnigibson/utils/registry_utils.py | """
A set of utility functions for registering and tracking objects
"""
from inspect import isclass
import numpy as np
from collections.abc import Iterable
from omnigibson.macros import create_module_macros
from omnigibson.utils.python_utils import Serializable, SerializableNonInstance, UniquelyNamed
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Token identifier for default values if a key doesn't exist in a given object
m.DOES_NOT_EXIST = "DOES_NOT_EXIST"
class Registry(UniquelyNamed):
"""
Simple class for easily registering and tracking arbitrary objects of the same (or very similar) class types.
Elements added are automatically organized by attributes specified by @unique_keys and @group_keys, and
can be accessed at runtime by specifying the desired key and indexing value to grab the object(s).
Default_key is a 1-to-1 mapping: i.e.: a single indexing value will return a single object.
default: "name" -- indexing by object.name (i.e.: every object's name should be unique)
Unique_keys are other 1-to-1 mappings: i.e.: a single indexing value will return a single object.
example: indexing by object.name (every object's name should be unique)
Group_keys are 1-to-many mappings: i.e.: a single indexing value will return a set of objects.
example: indexing by object.in_rooms (many objects can be in a single room)
Note that if a object's attribute is an array of values, then it will be stored under ALL of its values.
example: object.in_rooms = ["kitchen", "living_room"], indexing by in_rooms with a value of either kitchen OR
living room will return this object as part of its set!
You can also easily check for membership in this registry, via either the object's name OR the object itself,
e.g.:
> object.name in registry
> object in registry
If the latter, note that default_key attribute will automatically be used to search for the object
"""
def __init__(
self,
name,
class_types=object,
default_key="name",
unique_keys=None,
group_keys=None,
default_value=m.DOES_NOT_EXIST,
):
"""
Args:
name (str): name of this registry
class_types (class or list of class): class expected for all entries in this registry. Default is `object`,
meaning any object entered will be accepted. This is used to sanity check added entries using add()
to make sure their type is correct (either that the entry itself is a valid class, or that they are an
object of the valid class). Note that if a list of classes are passed, any one of the classes are
considered a valid type for added objects
default_key (str): default key by which to reference a given object. This key should be a
publically accessible attribute in a given object (e.g.: object.name) and uniquely identify
any entries
unique_keys (None or list of str): keys by which to reference a given object. Any key should be a
publically accessible attribute in a given object (e.g.: object.name)
i.e.: these keys should map to a single object
group_keys (None or list of str): keys by which to reference a group of objects, based on the key
(e.g.: object.room)
i.e.: these keys can map to multiple objects
e.g.: default is "name" key only, so we will store objects by their object.name attribute
default_value (any): Default value to use if the attribute @key does not exist in the object
"""
self._name = name
self.class_types = class_types if isinstance(class_types, Iterable) else [class_types]
self.default_key = default_key
self.unique_keys = set([] if unique_keys is None else unique_keys)
self.group_keys = set([] if group_keys is None else group_keys)
self.default_value = default_value
# We always add in the "name" attribute as well
self.unique_keys.add(self.default_key)
# Make sure there's no overlap between the unique and group keys
assert len(self.unique_keys.intersection(self.group_keys)) == 0,\
f"Cannot create registry with unique and group object keys that are the same! " \
f"Unique keys: {self.unique_keys}, group keys: {self.group_keys}"
# Create the dicts programmatically
for k in self.unique_keys.union(self.group_keys):
self.__setattr__(f"_objects_by_{k}", dict())
# Run super init
super().__init__()
@property
def name(self):
return self._name
def add(self, obj):
"""
Adds Instance @obj to this registry
Args:
obj (any): Instance to add to this registry
"""
# Make sure that obj is of the correct class type
assert any([isinstance(obj, class_type) or issubclass(obj, class_type) for class_type in self.class_types]), \
f"Added object must be either an instance or subclass of one of the following classes: {self.class_types}!"
self._add(obj=obj, keys=self.all_keys)
def _add(self, obj, keys=None):
"""
Same as self.add, but allows for selective @keys for adding this object to. Useful for internal things,
such as internal updating of mappings
Args:
obj (any): Instance to add to this registry
keys (None or set or list of str): Which object keys to use for adding the object to mappings.
None is default, which corresponds to all keys
"""
keys = self.all_keys if keys is None else keys
for k in keys:
obj_attr = self._get_obj_attr(obj=obj, attr=k)
# Standardize input as a list
obj_attr = obj_attr if \
isinstance(obj_attr, Iterable) and not isinstance(obj_attr, str) else [obj_attr]
# Loop over all values in this attribute and add to all mappings
for attr in obj_attr:
mapping = self.get_dict(k)
if k in self.unique_keys:
# Handle unique case
if attr in mapping:
log.warning(f"Instance identifier '{k}' should be unique for adding to this registry mapping! Existing {k}: {attr}")
# Special case for "name" attribute, which should ALWAYS be unique
assert k != "name", "For name attribute, objects MUST be unique."
mapping[attr] = obj
else:
# Not unique case
# Possibly initialize list
if attr not in mapping:
mapping[attr] = set()
mapping[attr].add(obj)
def remove(self, obj):
"""
Removes object @object from this registry
Args:
obj (any): Instance to remove from this registry
"""
# Iterate over all keys
for k in self.all_keys:
# Grab the attribute from the object
obj_attr = self._get_obj_attr(obj=obj, attr=k)
# Standardize input as a list
obj_attr = obj_attr if \
isinstance(obj_attr, Iterable) and not isinstance(obj_attr, str) else [obj_attr]
# Loop over all values in this attribute and remove them from all mappings
for attr in obj_attr:
mapping = self.get_dict(k)
if k in self.unique_keys:
# Handle unique case -- in this case, we just directly pop the value from the dictionary
mapping.pop(attr)
else:
# Not unique case
# We remove a value from the resulting set
mapping[attr].remove(obj)
def clear(self):
"""
Removes all owned objects from this registry
"""
# Re-create the owned dicts programmatically
for k in self.unique_keys.union(self.group_keys):
self.__setattr__(f"_objects_by_{k}", dict())
def update(self, keys=None):
"""
Updates this registry, refreshing all internal mappings in case an object's value was updated
Args:
keys (None or str or set or list of str): Which object keys to update. None is default, which corresponds
to all keys
"""
objects = self.objects
keys = self.all_keys if keys is None else \
(keys if type(keys) in {tuple, list} else [keys])
# Delete and re-create all keys mappings
for k in keys:
self.__delattr__(f"_objects_by_{k}")
self.__setattr__(f"_objects_by_{k}", dict())
# Iterate over all objects and re-populate the mappings
for obj in objects:
self._add(obj=obj, keys=[k])
def object_is_registered(self, obj):
"""
Check if a given object @object is registered
Args:
obj (any): Instance to check if it is internally registered
"""
return obj in self.objects
def get_dict(self, key):
"""
Specific mapping dictionary within this registry corresponding to the mappings of @key.
e.g.: if key = "name", this will return the dictionary mapping object.name to objects
Args:
key (str): Key with which to grab mapping dict from
Returns:
dict: Mapping from identifiers to object(s) based on @key
"""
return getattr(self, f"_objects_by_{key}")
def get_ids(self, key):
"""
All identifiers within this registry corresponding to the mappings of @key.
e.g.: if key = "name", this will return all "names" stored internally that index into a object
Args:
key (str): Key with which to grab all identifiers from
Returns:
set: All identifiers within this registry corresponding to the mappings of @key.
"""
return set(self.get_dict(key=key).keys())
def _get_obj_attr(self, obj, attr):
"""
Grabs object's @obj's attribute @attr. Additionally checks to see if @obj is a class or a class instance, and
uses the correct logic
Args:
obj (any): Object to grab attribute from
attr (str): String name of the attribute to grab
Return:
any: Attribute @k of @obj
"""
# We try to grab the object's attribute, and if it fails we fallback to the default value
try:
val = getattr(obj, attr)
except:
val = self.default_value
return val
@property
def objects(self):
"""
Get the objects in this registry
Returns:
list of any: Instances owned by this registry
"""
return list(self.get_dict(self.default_key).values())
@property
def all_keys(self):
"""
Returns:
set of str: All object keys that are valid identification methods to index object(s)
"""
return self.unique_keys.union(self.group_keys)
def __call__(self, key, value, default_val=None):
"""
Grab the object in this registry based on @key and @value
Args:
key (str): What identification type to use to grab the requested object(s).
Should be one of @self.all_keys.
value (any): Value to grab. Should be the value of your requested object.<key> attribute
default_val (any): Default value to return if @value is not found
Returns:
any or set of any: requested unique object if @key is one of unique_keys, else a set if
@key is one of group_keys
"""
assert key in self.all_keys,\
f"Invalid key requested! Valid options are: {self.all_keys}, got: {key}"
return self.get_dict(key).get(value, default_val)
def __contains__(self, obj):
# Instance can be either a string (default key) OR the object itself
if isinstance(obj, str):
obj = self(self.default_key, obj)
return self.object_is_registered(obj=obj)
class SerializableRegistry(Registry, Serializable):
"""
Registry that is serializable, i.e.: entries contain states that can themselves be serialized /deserialized.
Note that this assumes that any objects added to this registry are themselves of @Serializable type!
"""
def add(self, obj):
# In addition to any other class types, we make sure that the object is a serializable instance / class
validate_class = issubclass if isclass(obj) else isinstance
assert any([validate_class(obj, class_type) for class_type in (Serializable, SerializableNonInstance)]), \
f"Added object must be either an instance or subclass of Serializable or SerializableNonInstance!"
# Run super like normal
super().add(obj=obj)
@property
def state_size(self):
return sum(obj.state_size for obj in self.objects)
def _dump_state(self):
# Iterate over all objects and grab their states
state = dict()
for obj in self.objects:
state[obj.name] = obj.dump_state(serialized=False)
return state
def _load_state(self, state):
# Iterate over all objects and load their states. Currently the objects and the state don't have to match, i.e.
# there might be objects in the scene that do not appear in the state dict (a warning will be printed), or
# the state might contain additional information about objects that are NOT in the scene. For both cases, state
# loading will be skipped.
for obj in self.objects:
if obj.name not in state:
log.warning(f"Object '{obj.name}' is not in the state dict to load from. Skip loading its state.")
continue
obj.load_state(state[obj.name], serialized=False)
def _serialize(self, state):
# Iterate over the entire dict and flatten
return np.concatenate([obj.serialize(state[obj.name]) for obj in self.objects]) if \
len(self.objects) > 0 else np.array([])
def _deserialize(self, state):
state_dict = dict()
# Iterate over all the objects and deserialize their individual states, incrementing the index counter
# along the way
idx = 0
for obj in self.objects:
log.debug(f"obj: {obj.name}, state size: {obj.state_size}, idx: {idx}, passing in state length: {len(state[idx:])}")
# We pass in the entire remaining state vector, assuming the object only parses the relevant states
# at the beginning
state_dict[obj.name] = obj.deserialize(state[idx:])
idx += obj.state_size
return state_dict, idx
| 15,260 | Python | 41.391667 | 140 | 0.613434 |
StanfordVL/OmniGibson/omnigibson/utils/git_utils.py | from pathlib import Path
import bddl
import git
import omnigibson as og
def git_info(directory):
repo = git.Repo(directory)
try:
branch_name = repo.active_branch.name
except TypeError:
branch_name = "[DETACHED]"
return {
"directory": str(directory),
"code_diff": repo.git.diff(None),
"code_diff_staged": repo.git.diff("--staged"),
"commit_hash": repo.head.commit.hexsha,
"branch_name": branch_name,
}
def project_git_info():
return {
"OmniGibson": git_info(Path(og.root_path).parent),
"bddl": git_info(Path(bddl.__file__).parent.parent),
}
| 646 | Python | 21.310344 | 60 | 0.605263 |
StanfordVL/OmniGibson/omnigibson/utils/geometry_utils.py | """
A set of helper utility functions for dealing with 3D geometry
"""
import numpy as np
import omnigibson.utils.transform_utils as T
from omnigibson.utils.usd_utils import mesh_prim_mesh_to_trimesh_mesh
def get_particle_positions_in_frame(pos, quat, scale, particle_positions):
"""
Transforms particle positions @positions into the frame specified by @pos and @quat with new scale @scale,
where @pos and @quat are assumed to be specified in the same coordinate frame that @particle_positions is specified
Args:
pos (3-array): (x,y,z) pos of the new frame
quat (4-array): (x,y,z,w) quaternion orientation of the new frame
scale (3-array): (x,y,z) local scale of the new frame
particle_positions ((N, 3) array): positions
Returns:
(N,) array: updated particle positions in the new coordinate frame
"""
# Get pose of origin (global frame) in new_frame
origin_in_new_frame = T.pose_inv(T.pose2mat((pos, quat)))
# Batch the transforms to get all particle points in the local link frame
positions_tensor = np.tile(np.eye(4).reshape(1, 4, 4), (len(particle_positions), 1, 1)) # (N, 4, 4)
# Scale by the new scale#
positions_tensor[:, :3, 3] = particle_positions
particle_positions = (origin_in_new_frame @ positions_tensor)[:, :3, 3] # (N, 3)
# Scale by the new scale
return particle_positions / scale.reshape(1, 3)
def get_particle_positions_from_frame(pos, quat, scale, particle_positions):
"""
Transforms particle positions @positions from the frame specified by @pos and @quat with new scale @scale.
This is similar to @get_particle_positions_in_frame, but does the reverse operation, inverting @pos and @quat
Args:
pos (3-array): (x,y,z) pos of the local frame
quat (4-array): (x,y,z,w) quaternion orientation of the local frame
scale (3-array): (x,y,z) local scale of the local frame
particle_positions ((N, 3) array): positions
Returns:
(N,) array: updated particle positions in the parent coordinate frame
"""
# Scale by the new scale
particle_positions = particle_positions * scale.reshape(1, 3)
# Get pose of origin (global frame) in new_frame
origin_in_new_frame = T.pose2mat((pos, quat))
# Batch the transforms to get all particle points in the local link frame
positions_tensor = np.tile(np.eye(4).reshape(1, 4, 4), (len(particle_positions), 1, 1)) # (N, 4, 4)
# Scale by the new scale#
positions_tensor[:, :3, 3] = particle_positions
return (origin_in_new_frame @ positions_tensor)[:, :3, 3] # (N, 3)
def check_points_in_cube(size, pos, quat, scale, particle_positions):
"""
Checks which points are within a cube with specified size @size.
NOTE: Assumes the cube and positions are expressed
in the same coordinate frame such that the cube's dimensions are axis-aligned with (x,y,z)
Args:
size float: length of each side of the cube, specified in its local frame
pos (3-array): (x,y,z) local location of the cube
quat (4-array): (x,y,z,w) local orientation of the cube
scale (3-array): (x,y,z) local scale of the cube, specified in its local frame
particle_positions ((N, 3) array): positions to check for whether it is in the cube
Returns:
(N,) array: boolean numpy array specifying whether each point lies in the cube.
"""
particle_positions = get_particle_positions_in_frame(
pos=pos,
quat=quat,
scale=scale,
particle_positions=particle_positions,
)
return ((-size / 2.0 < particle_positions) & (particle_positions < size / 2.0)).sum(axis=-1) == 3
def check_points_in_cone(size, pos, quat, scale, particle_positions):
"""
Checks which points are within a cone with specified size @size.
NOTE: Assumes the cone and positions are
expressed in the same coordinate frame such that the cone's height is aligned with the z-axis
Args:
size (2-array): (radius, height) dimensions of the cone, specified in its local frame
pos (3-array): (x,y,z) local location of the cone
quat (4-array): (x,y,z,w) local orientation of the cone
scale (3-array): (x,y,z) local scale of the cone, specified in its local frame
particle_positions ((N, 3) array): positions to check for whether it is in the cone
Returns:
(N,) array: boolean numpy array specifying whether each point lies in the cone.
"""
particle_positions = get_particle_positions_in_frame(
pos=pos,
quat=quat,
scale=scale,
particle_positions=particle_positions,
)
radius, height = size
in_height = (-height / 2.0 < particle_positions[:, -1]) & (particle_positions[:, -1] < height / 2.0)
in_radius = np.linalg.norm(particle_positions[:, :-1], axis=-1) < \
(radius * (1 - (particle_positions[:, -1] + height / 2.0) / height ))
return in_height & in_radius
def check_points_in_cylinder(size, pos, quat, scale, particle_positions):
"""
Checks which points are within a cylinder with specified size @size.
NOTE: Assumes the cylinder and positions are
expressed in the same coordinate frame such that the cylinder's height is aligned with the z-axis
Args:
size (2-array): (radius, height) dimensions of the cylinder, specified in its local frame
pos (3-array): (x,y,z) local location of the cylinder
quat (4-array): (x,y,z,w) local orientation of the cylinder
scale (3-array): (x,y,z) local scale of the cube, specified in its local frame
particle_positions ((N, 3) array): positions to check for whether it is in the cylinder
Returns:
(N,) array: boolean numpy array specifying whether each point lies in the cylinder.
"""
particle_positions = get_particle_positions_in_frame(
pos=pos,
quat=quat,
scale=scale,
particle_positions=particle_positions,
)
radius, height = size
in_height = (-height / 2.0 < particle_positions[:, -1]) & (particle_positions[:, -1] < height / 2.0)
in_radius = np.linalg.norm(particle_positions[:, :-1], axis=-1) < radius
return in_height & in_radius
def check_points_in_sphere(size, pos, quat, scale, particle_positions):
"""
Checks which points are within a sphere with specified size @size.
NOTE: Assumes the sphere and positions are expressed in the same coordinate frame
Args:
size (float): radius dimensions of the sphere
pos (3-array): (x,y,z) local location of the sphere
quat (4-array): (x,y,z,w) local orientation of the sphere
scale (3-array): (x,y,z) local scale of the sphere, specified in its local frame
particle_positions ((N, 3) array): positions to check for whether it is in the sphere
Returns:
(N,) array: boolean numpy array specifying whether each point lies in the sphere
"""
particle_positions = get_particle_positions_in_frame(
pos=pos,
quat=quat,
scale=scale,
particle_positions=particle_positions,
)
return np.linalg.norm(particle_positions, axis=-1) < size
def check_points_in_convex_hull_mesh(mesh_face_centroids, mesh_face_normals, pos, quat, scale, particle_positions):
"""
Checks which points are within a sphere with specified size @size.
NOTE: Assumes the mesh and positions are expressed in the same coordinate frame
Args:
mesh_face_centroids (D, 3): (x,y,z) location of the centroid of each mesh face, expressed in its local frame
mesh_face_normals (D, 3): (x,y,z) normalized direction vector of each mesh face, expressed in its local frame
pos (3-array): (x,y,z) local location of the mesh
quat (4-array): (x,y,z,w) local orientation of the mesh
scale (3-array): (x,y,z) local scale of the cube, specified in its local frame
particle_positions ((N, 3) array): positions to check for whether it is in the mesh
Returns:
(N,) array: boolean numpy array specifying whether each point lies in the mesh
"""
particle_positions = get_particle_positions_in_frame(
pos=pos,
quat=quat,
scale=scale,
particle_positions=particle_positions,
)
# For every mesh point / normal and particle position pair, we check whether it is "inside" (i.e.: the point lies
# BEHIND the normal plane -- this is easily done by taking the dot product with the vector from the point to the
# particle position with the normal, and validating that the value is < 0)
D, _ = mesh_face_centroids.shape
N, _ = particle_positions.shape
mesh_points = np.tile(mesh_face_centroids.reshape(1, D, 3), (N, 1, 1))
mesh_normals = np.tile(mesh_face_normals.reshape(1, D, 3), (N, 1, 1))
particle_positions = np.tile(particle_positions.reshape(N, 1, 3), (1, D, 1))
# All arrays are now (N, D, 3) shape -- efficient for batching
in_range = ((particle_positions - mesh_points) * mesh_normals).sum(axis=-1) < 0 # shape (N, D)
# All D normals must be satisfied for a single point to be considered inside the hull
in_range = in_range.sum(axis=-1) == D
return in_range
def _generate_convex_hull_volume_checker_functions(convex_hull_mesh):
"""
An internal helper function used to programmatically generate lambda funtions to check for particle
points within a convex hull mesh defined by face centroids @mesh_face_centroids and @mesh_face_normals.
Note that this is needed as an EXTERNAL helper function to @generate_points_in_volume_checker_function
because we "bake" certain arguments as part of the lambda internal scope, and
directly generating functions in a for loop results in these local variables being overwritten each time
(meaning that all the generated lambda functions reference the SAME variables!!)
Args:
convex_hull_mesh (Usd.Prim): Raw USD convex hull mesh to generate the volume checker functions
Returns:
2-tuple:
- function: Generated lambda function with signature:
in_range = check_in_volume(mesh, particle_positions)
where @in_range is a N-array boolean numpy array, (True where the particle is in the convex hull mesh
volume), @mesh is the raw USD mesh, and @particle_positions is a (N, 3) array specifying the particle
positions in the SAME coordinate frame as @mesh
- function: Function for grabbing real-time LOCAL scale volume of the container. Signature:
vol = calc_volume(mesh)
where @vol is the total volume being checked (expressed in the mesh's LOCAL scale), and @mesh is the raw
USD mesh
"""
# For efficiency, we pre-compute the mesh using trimesh and find its corresponding faces and normals
trimesh_mesh = mesh_prim_mesh_to_trimesh_mesh(convex_hull_mesh, include_normals=False, include_texcoord=False).convex_hull
assert trimesh_mesh.is_convex, \
f"Trying to generate a volume checker function for a non-convex mesh {convex_hull_mesh.GetPath().pathString}"
face_centroids = trimesh_mesh.vertices[trimesh_mesh.faces].mean(axis=1)
face_normals = trimesh_mesh.face_normals
# This function assumes that:
# 1. @particle_positions are in the local container_link frame
# 2. the @check_points_in_[...] function will convert them into the local @mesh frame
in_volume = lambda mesh, particle_positions: check_points_in_convex_hull_mesh(
mesh_face_centroids=face_centroids,
mesh_face_normals=face_normals,
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array(
[*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
calc_volume = lambda mesh: trimesh_mesh.volume if trimesh_mesh.is_volume else trimesh_mesh.convex_hull.volume
return in_volume, calc_volume
def generate_points_in_volume_checker_function(obj, volume_link, use_visual_meshes=True, mesh_name_prefixes=None):
"""
Generates a function for quickly checking which of a group of points are contained within any container volumes.
Four volume types are supported:
"Cylinder" - Cylinder volume
"Cube" - Cube volume
"Sphere" - Sphere volume
"Mesh" - Convex hull volume
@volume_link should have any number of nested, visual-only meshes of types {Sphere, Cylinder, Cube, Mesh} with
naming prefix "container[...]"
Args:
obj (EntityPrim): Object which contains @volume_link as one of its links
volume_link (RigidPrim): Link to use to grab container volumes composing the values for checking the points
use_visual_meshes (bool): Whether to use @volume_link's visual or collision meshes to generate points fcn
mesh_name_prefixes (None or str): If specified, specifies the substring that must exist in @volume_link's
mesh names in order for that mesh to be included in the volume checker function. If None, no filtering
will be used.
Returns:
2-tuple:
- function: Function with signature:
in_range = check_in_volumes(particle_positions)
where @in_range is a N-array boolean numpy array, (True where the particle is in the volume), and
@particle_positions is a (N, 3) array specifying the particle positions in global coordinates
- function: Function for grabbing real-time global scale volume of the container. Signature:
vol = total_volume()
where @vol is the total volume being checked (expressed in global scale) aggregated across
all container sub-volumes
"""
# Iterate through all visual meshes and keep track of any that are prefixed with container
container_meshes = []
meshes = volume_link.visual_meshes if use_visual_meshes else volume_link.collision_meshes
for container_mesh_name, container_mesh in meshes.items():
if mesh_name_prefixes is None or mesh_name_prefixes in container_mesh_name:
container_meshes.append(container_mesh)
# Programmatically define the volume checker functions based on each container found
volume_checker_fcns = []
for sub_container_mesh in container_meshes:
mesh_type = sub_container_mesh.prim.GetTypeName()
if mesh_type == "Mesh":
fcn, vol_fcn = _generate_convex_hull_volume_checker_functions(convex_hull_mesh=sub_container_mesh.prim)
elif mesh_type == "Sphere":
fcn = lambda mesh, particle_positions: check_points_in_sphere(
size=mesh.GetAttribute("radius").Get(),
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array([*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cylinder":
fcn = lambda mesh, particle_positions: check_points_in_cylinder(
size=[mesh.GetAttribute("radius").Get(), mesh.GetAttribute("height").Get()],
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array([*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cone":
fcn = lambda mesh, particle_positions: check_points_in_cone(
size=[mesh.GetAttribute("radius").Get(), mesh.GetAttribute("height").Get()],
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array([*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
elif mesh_type == "Cube":
fcn = lambda mesh, particle_positions: check_points_in_cube(
size=mesh.GetAttribute("size").Get(),
pos=np.array(mesh.GetAttribute("xformOp:translate").Get()),
quat=np.array([*(mesh.GetAttribute("xformOp:orient").Get().imaginary), mesh.GetAttribute("xformOp:orient").Get().real]),
scale=np.array(mesh.GetAttribute("xformOp:scale").Get()),
particle_positions=particle_positions,
)
else:
raise ValueError(f"Cannot create volume checker function for mesh of type: {mesh_type}")
volume_checker_fcns.append(fcn)
# Define the actual volume checker function
def check_points_in_volumes(particle_positions):
# Algo
# 1. Particles in global frame --> particles in volume link frame (including scaling)
# 2. For each volume checker function, apply volume checking
# 3. Aggregate across all functions with OR condition (any volume satisfied for that point)
######
n_particles = len(particle_positions)
# Get pose of origin (global frame) in frame of volume link
# NOTE: This assumes there is no relative scaling between obj and volume link
volume_link_pos, volume_link_quat = volume_link.get_position_orientation()
particle_positions = get_particle_positions_in_frame(
pos=volume_link_pos,
quat=volume_link_quat,
scale=obj.scale,
particle_positions=particle_positions,
)
in_volumes = np.zeros(n_particles).astype(bool)
for checker_fcn, mesh in zip(volume_checker_fcns, container_meshes):
in_volumes |= checker_fcn(mesh.prim, particle_positions)
return in_volumes
# Define the actual volume calculator function
def calculate_volume(precision=1e-5):
# We use monte-carlo sampling to approximate the voluem up to @precision
# NOTE: precision defines the RELATIVE precision of the volume computation -- i.e.: the relative error with
# respect to the volume link's global AABB
# Convert precision to minimum number of particles to sample
min_n_particles = int(np.ceil(1. / precision))
# Make sure container meshes are visible so AABB computation is correct
for mesh in container_meshes:
mesh.visible = True
# Determine equally-spaced sampling distance to achieve this minimum particle count
aabb_volume = np.product(volume_link.visual_aabb_extent)
sampling_distance = np.cbrt(aabb_volume / min_n_particles)
low, high = volume_link.visual_aabb
n_particles_per_axis = ((high - low) / sampling_distance).astype(int) + 1
assert np.all(n_particles_per_axis), "Must increase precision for calculate_volume -- too coarse for sampling!"
# 1e-10 is added because the extent might be an exact multiple of particle radius
arrs = [np.arange(l, h, sampling_distance)
for l, h, n in zip(low, high, n_particles_per_axis)]
# Generate 3D-rectangular grid of points, and only keep the ones inside the mesh
points = np.stack([arr.flatten() for arr in np.meshgrid(*arrs)]).T
# Re-hide container meshes
for mesh in container_meshes:
mesh.visible = False
# Return the fraction of the link AABB's volume based on fraction of points enclosed within it
return aabb_volume * np.mean(check_points_in_volumes(points))
return check_points_in_volumes, calculate_volume
| 19,995 | Python | 48.130221 | 136 | 0.663766 |
StanfordVL/OmniGibson/omnigibson/utils/gym_utils.py | import gym
from abc import ABCMeta, abstractmethod
import numpy as np
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
def recursively_generate_flat_dict(dic, prefix=None):
"""
Helper function to recursively iterate through dictionary / gym.spaces.Dict @dic and flatten any nested elements,
such that the result is a flat dictionary mapping keys to values
Args:
dic (dict or gym.spaces.Dict): (Potentially nested) dictionary to convert into a flattened dictionary
prefix (None or str): Prefix to append to the beginning of all strings in the flattened dictionary. None results
in no prefix being applied
Returns:
dict: Flattened version of @dic
"""
out = dict()
prefix = "" if prefix is None else f"{prefix}::"
for k, v in dic.items():
if isinstance(v, gym.spaces.Dict) or isinstance(v, dict):
out.update(recursively_generate_flat_dict(dic=v, prefix=f"{prefix}{k}"))
elif isinstance(v, gym.spaces.Tuple) or isinstance(v, tuple):
for i, vv in enumerate(v):
# Assume no dicts are nested within tuples
out[f"{prefix}{k}::{i}"] = vv
else:
# Add to out dict
out[f"{prefix}{k}"] = v
return out
def recursively_generate_compatible_dict(dic):
"""
Helper function to recursively iterate through dictionary and cast values to necessary types to be compatibel with
Gym spaces -- in particular, the Sequence and Tuple types for np.ndarray / np.void values in @dic
Args:
dic (dict or gym.spaces.Dict): (Potentially nested) dictionary to convert into a flattened dictionary
Returns:
dict: Gym-compatible version of @dic
"""
out = dict()
for k, v in dic.items():
if isinstance(v, dict):
out[k] = recursively_generate_compatible_dict(dic=v)
elif isinstance(v, np.ndarray) and len(v.dtype) > 0:
# Map to list of tuples
out[k] = list(map(tuple, v))
else:
# Preserve the key-value pair
out[k] = v
return out
class GymObservable(metaclass=ABCMeta):
"""
Simple class interface for observable objects. These objects should implement a way to grab observations,
(get_obs()), and should define an observation space that is created when load_observation_space() is called
Args:
kwargs: dict, does nothing, used to sink any extraneous arguments during initialization
"""
def __init__(self, *args, **kwargs):
# Initialize variables that we will fill in later
self.observation_space = None
# Call any super methods
super().__init__(*args, **kwargs)
@abstractmethod
def get_obs(self, **kwargs):
"""
Get observations for the object. Note that the shape / nested structure should match that
of @self.observation_space!
Args:
kwargs (dict): Any keyword args necessary for grabbing observations
Returns:
2-tuple:
dict: Keyword-mapped observations mapping observation names to nested observations
dict: Additional information about the observations
"""
raise NotImplementedError()
@staticmethod
def _build_obs_box_space(shape, low, high, dtype=np.float32):
"""
Helper function that builds individual observation box spaces.
Args:
shape (n-array): Shape of the space
low (float): Lower bound of the space
high (float): Upper bound of the space
Returns:
gym.spaces.Box: Generated gym box observation space
"""
return gym.spaces.Box(low=low, high=high, shape=shape, dtype=dtype)
@abstractmethod
def _load_observation_space(self):
"""
Create the observation space for this object. Should be implemented by subclass
Returns:
dict: Keyword-mapped observation space for this object mapping observation name to observation space
"""
raise NotImplementedError()
def load_observation_space(self):
"""
Load the observation space internally, and also return this value
Returns:
gym.spaces.Dict: Loaded observation space for this object
"""
# Load the observation space and convert it into a gym-compatible dictionary
self.observation_space = gym.spaces.Dict(self._load_observation_space())
log.debug(f"Loaded obs space dictionary for: {self.__class__.__name__}")
return self.observation_space
| 4,700 | Python | 34.345864 | 120 | 0.640851 |
StanfordVL/OmniGibson/omnigibson/utils/object_utils.py | """
Helper utility functions for computing relevant object information
"""
import omnigibson as og
import numpy as np
import omnigibson.utils.transform_utils as T
from scipy.spatial.transform import Rotation as R
from omnigibson.utils.geometry_utils import get_particle_positions_from_frame
def sample_stable_orientations(obj, n_samples=10, drop_aabb_offset=0.1):
"""
Samples random stable orientations for obj @obj by stochastically dropping the object and recording its
resulting orientations
Args:
obj (BaseObject): Object whose orientations will be sampled
n_samples (int): How many sampled orientations will be recorded
drop_aabb_offset (float): Offset to apply in the z-direction when dropping the object
Returns:
n-array: (N, 4) array, where each of the N rows are sampled (x,y,z,w) stable orientations
"""
og.sim.play()
assert np.all(obj.scale == 1.0)
aabb_extent = obj.aabb_extent
radius = np.linalg.norm(aabb_extent) / 2.0
drop_pos = np.array([0, 0, radius + drop_aabb_offset])
center_offset = obj.get_position() - obj.aabb_center
drop_orientations = R.random(n_samples).as_quat()
stable_orientations = np.zeros_like(drop_orientations)
for i, drop_orientation in enumerate(drop_orientations):
# Sample orientation, drop, wait to stabilize, then record
pos = drop_pos + T.quat2mat(drop_orientation) @ center_offset
obj.set_position_orientation(pos, drop_orientation)
obj.keep_still()
for j in range(25):
og.sim.step()
stable_orientations[i] = obj.get_orientation()
return stable_orientations
def compute_bbox_offset(obj):
"""
Computes the base link offset of @obj, specifying the relative position of the object's bounding box center wrt to
its root link frame, expressed in the world frame
Args:
obj (BaseObject): Object whose bbox offset will be computed
Returns:
n-array: (x,y,z) offset specifying the relative position from the root link to @obj's bounding box center
"""
og.sim.stop()
assert np.all(obj.scale == 1.0)
obj.set_position_orientation(np.zeros(3), np.array([0, 0, 0, 1.0]))
return obj.aabb_center - obj.get_position()
def compute_native_bbox_extent(obj):
"""
Computes the native bounding box extent for @obj, which is the extent with the obj placed at (0, 0, 0) with
orientation (0, 0, 0, 1) and scale (1, 1, 1)
Args:
obj (BaseObject): Object whose native bbox extent will be computed
Returns:
n-array: (x,y,z) native bounding box extent
"""
og.sim.stop()
assert np.all(obj.scale == 1.0)
obj.set_position_orientation(np.zeros(3), np.array([0, 0, 0, 1.0]))
return obj.aabb_extent
def compute_base_aligned_bboxes(obj):
link_bounding_boxes = {}
for link_name, link in obj.links.items():
link_bounding_boxes[link_name] = {}
for mesh_type, mesh_list in zip(("collision", "visual"), (link.collision_meshes, link.visual_meshes)):
pts_in_link_frame = []
for mesh_name, mesh in mesh_list.items():
pts = mesh.get_attribute("points")
local_pos, local_orn = mesh.get_local_pose()
pts_in_link_frame.append(get_particle_positions_from_frame(local_pos, local_orn, mesh.scale, pts))
pts_in_link_frame = np.concatenate(pts_in_link_frame, axis=0)
max_pt = np.max(pts_in_link_frame, axis=0)
min_pt = np.min(pts_in_link_frame, axis=0)
extent = max_pt - min_pt
center = (max_pt + min_pt) / 2.0
transform = T.pose2mat((center, np.array([0, 0, 0, 1.0])))
print(pts_in_link_frame.shape)
link_bounding_boxes[link_name][mesh_type] = {
"extent": extent,
"transform": transform,
}
return link_bounding_boxes
def compute_obj_kinematic_metadata(obj):
"""
Computes relevant kinematic metadata for @obj, such as stable_orientations, bounding box offsets,
bounding box extents, and base_aligned_bboxes
Args:
obj (BaseObject): Object whose metadata will be computed
Returns:
dict: Relevant metadata, with the following keys:
- "stable_orientations": 2D (N, 4)-array of sampled stable (x,y,z,w) quaternion orientations
- "bbox_offset": (x,y,z) relative position from the root link to @obj's bounding box center
- "native_bbox_extent": (x,y,z) native bounding box extent
- "base_aligned_bboxes": TODO
"""
assert og.sim.scene is not None
assert og.sim.scene.floor_plane is not None, "An empty scene must be used in order to compute kinematic metadata!"
assert np.all(obj.scale == 1.0), "Object must have scale [1, 1, 1] in order to compute kinematic metadata!"
og.sim.stop()
return {
"stable_orientations": sample_stable_orientations(obj=obj),
"bbox_offset": compute_bbox_offset(obj=obj),
"native_bbox_extent": compute_native_bbox_extent(obj=obj),
"base_aligned_bboxes": compute_base_aligned_bboxes(obj=obj),
}
| 5,156 | Python | 38.976744 | 118 | 0.651668 |
StanfordVL/OmniGibson/omnigibson/utils/processing_utils.py | import numpy as np
from omnigibson.utils.python_utils import Serializable
class Filter(Serializable):
"""
A base class for filtering a noisy data stream in an online fashion.
"""
def estimate(self, observation):
"""
Takes an observation and returns a de-noised estimate.
Args:
observation (n-array): A current observation.
Returns:
n-array: De-noised estimate.
"""
raise NotImplementedError
def reset(self):
"""
Resets this filter. Default is no-op.
"""
pass
@property
def state_size(self):
# No state by default
return 0
def _dump_state(self):
# Default is no state (empty dict)
return dict()
def _load_state(self, state):
# Default is no state (empty dict), so this is a no-op
pass
def _serialize(self, state):
# Default is no state, so do nothing
return np.array([])
def _deserialize(self, state):
# Default is no state, so do nothing
return dict(), 0
class MovingAverageFilter(Filter):
"""
This class uses a moving average to de-noise a noisy data stream in an online fashion.
This is a FIR filter.
"""
def __init__(self, obs_dim, filter_width):
"""
Args:
obs_dim (int): The dimension of the points to filter.
filter_width (int): The number of past samples to take the moving average over.
"""
self.obs_dim = obs_dim
assert filter_width > 0, f"MovingAverageFilter must have a non-zero size! Got: {filter_width}"
self.filter_width = filter_width
self.past_samples = np.zeros((filter_width, obs_dim))
self.current_idx = 0
self.fully_filled = False # Whether the entire filter buffer is filled or not
super().__init__()
def estimate(self, observation):
"""
Do an online hold for state estimation given a recent observation.
Args:
observation (n-array): New observation to hold internal estimate of state.
Returns:
n-array: New estimate of state.
"""
# Write the newest observation at the appropriate index
self.past_samples[self.current_idx, :] = np.array(observation)
# Compute value based on whether we're fully filled or not
if not self.fully_filled:
val = self.past_samples[:self.current_idx + 1, :].mean(axis=0)
# Denote that we're fully filled if we're at the end of the buffer
if self.current_idx == self.filter_width - 1:
self.fully_filled = True
else:
val = self.past_samples.mean(axis=0)
# Increment the index to write the next sample to
self.current_idx = (self.current_idx + 1) % self.filter_width
return val
def reset(self):
# Clear internal state
self.past_samples *= 0.0
self.current_idx = 0
self.fully_filled = False
@property
def state_size(self):
return super().state_size + self.filter_width * self.obs_dim + 2
def _dump_state(self):
# Run super init first
state = super()._dump_state()
# Add info from this filter
state["past_samples"] = np.array(self.past_samples)
state["current_idx"] = self.current_idx
state["fully_filled"] = self.fully_filled
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# Load relevant info for this filter
self.past_samples = np.array(state["past_samples"])
self.current_idx = state["current_idx"]
self.fully_filled = state["fully_filled"]
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
# Serialize state for this filter
return np.concatenate([
state_flat,
state["past_samples"].flatten(),
[state["current_idx"]],
[state["fully_filled"]],
]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize state for this filter
samples_len = self.filter_width * self.obs_dim
state_dict["past_samples"] = state[idx: idx + samples_len]
state_dict["current_idx"] = int(state[idx + samples_len])
state_dict["fully_filled"] = bool(state[idx + samples_len + 1])
return state_dict, idx + samples_len + 2
class ExponentialAverageFilter(Filter):
"""
This class uses an exponential average of the form y_n = alpha * x_n + (1 - alpha) * y_{n - 1}.
This is an IIR filter.
"""
def __init__(self, obs_dim, alpha=0.9):
"""
Args:
obs_dim (int): The dimension of the points to filter.
alpha (float): The relative weighting of new samples relative to older samples
"""
self.obs_dim = obs_dim
self.avg = np.zeros(obs_dim)
self.num_samples = 0
self.alpha = alpha
super().__init__()
def estimate(self, observation):
"""
Do an online hold for state estimation given a recent observation.
Args:
observation (n-array): New observation to hold internal estimate of state.
Returns:
n-array: New estimate of state.
"""
self.avg = self.alpha * observation + (1.0 - self.alpha) * self.avg
self.num_samples += 1
return np.array(self.avg)
def reset(self):
# Clear internal state
self.avg *= 0.0
self.num_samples = 0
@property
def state_size(self):
return super().state_size + self.obs_dim + 1
def _dump_state(self):
# Run super init first
state = super()._dump_state()
# Add info from this filter
state["avg"] = np.array(self.avg)
state["num_samples"] = self.num_samples
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# Load relevant info for this filter
self.avg = np.array(state["avg"])
self.num_samples = state["num_samples"]
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
# Serialize state for this filter
return np.concatenate([
state_flat,
state["avg"],
[state["num_samples"]],
]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize state for this filter
state_dict["avg"] = state[idx: idx + self.obs_dim]
state_dict["num_samples"] = int(state[idx + self.obs_dim])
return state_dict, idx + self.obs_dim + 1
class Subsampler:
"""
A base class for subsampling a data stream in an online fashion.
"""
def subsample(self, observation):
"""
Takes an observation and returns the observation, or None, which
corresponds to deleting the observation.
Args:
observation (n-array): A current observation.
Returns:
None or n-array: No observation if subsampled, otherwise the observation
"""
raise NotImplementedError
class UniformSubsampler(Subsampler):
"""
A class for subsampling a data stream uniformly in time in an online fashion.
"""
def __init__(self, T):
"""
Args:
T (int): Pick one every T observations.
"""
self.T = T
self.counter = 0
super(UniformSubsampler, self).__init__()
def subsample(self, observation):
"""
Returns an observation once every T observations, None otherwise.
Args:
observation (n-array): A current observation.
Returns:
None or n-array: The observation, or None.
"""
self.counter += 1
if self.counter == self.T:
self.counter = 0
return observation
return None
if __name__ == "__main__":
f = MovingAverageFilter(3, 10)
a = np.array([1, 1, 1])
for i in range(500):
print(f.estimate(a + np.random.normal(scale=0.1)))
| 8,427 | Python | 27.962199 | 102 | 0.576718 |
StanfordVL/OmniGibson/omnigibson/utils/grasping_planning_utils.py | import numpy as np
from scipy.spatial.transform import Rotation as R, Slerp
from math import ceil
from omnigibson.macros import create_module_macros
import omnigibson.utils.transform_utils as T
from omnigibson.object_states.open_state import _get_relevant_joints
from omnigibson.utils.constants import JointType, JointAxis
import omnigibson.lazy as lazy
m = create_module_macros(module_path=__file__)
m.REVOLUTE_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS = (0.4, 0.6)
m.PRISMATIC_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS = (0.2, 0.8)
m.ROTATION_ARC_SEGMENT_LENGTHS = 0.05
m.OPENNESS_THRESHOLD_TO_OPEN = 0.8
m.OPENNESS_THRESHOLD_TO_CLOSE = 0.05
def get_grasp_poses_for_object_sticky(target_obj):
"""
Obtain a grasp pose for an object from top down, to be used with sticky grasping.
Args:
target_object (StatefulObject): Object to get a grasp pose for
Returns:
List of grasp candidates, where each grasp candidate is a tuple containing the grasp pose and the approach direction.
"""
bbox_center_in_world, bbox_quat_in_world, bbox_extent_in_base_frame, _ = target_obj.get_base_aligned_bbox(
visual=False
)
grasp_center_pos = bbox_center_in_world + np.array([0, 0, np.max(bbox_extent_in_base_frame) + 0.05])
towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
towards_object_in_world_frame /= np.linalg.norm(towards_object_in_world_frame)
grasp_quat = T.euler2quat([0, np.pi/2, 0])
grasp_pose = (grasp_center_pos, grasp_quat)
grasp_candidate = [(grasp_pose, towards_object_in_world_frame)]
return grasp_candidate
def get_grasp_poses_for_object_sticky_from_arbitrary_direction(target_obj):
"""
Obtain a grasp pose for an object from an arbitrary direction to be used with sticky grasping.
Args:
target_object (StatefulObject): Object to get a grasp pose for
Returns:
List of grasp candidates, where each grasp candidate is a tuple containing the grasp pose and the approach direction.
"""
bbox_center_in_world, bbox_quat_in_world, bbox_extent_in_base_frame, _ = target_obj.get_base_aligned_bbox(
visual=False
)
# Pick an axis and a direction.
approach_axis = np.random.choice([0, 1, 2])
approach_direction = np.random.choice([-1, 1]) if approach_axis != 2 else 1
constant_dimension_in_base_frame = approach_direction * bbox_extent_in_base_frame * np.eye(3)[approach_axis]
randomizable_dimensions_in_base_frame = bbox_extent_in_base_frame - np.abs(constant_dimension_in_base_frame)
random_dimensions_in_base_frame = np.random.uniform([-1, -1, 0], [1, 1, 1]) # note that we don't allow going below center
grasp_center_in_base_frame = random_dimensions_in_base_frame * randomizable_dimensions_in_base_frame + constant_dimension_in_base_frame
grasp_center_pos = T.mat2pose(
T.pose2mat((bbox_center_in_world, bbox_quat_in_world)) @ # base frame to world frame
T.pose2mat((grasp_center_in_base_frame, [0, 0, 0, 1])) # grasp pose in base frame
)[0] + np.array([0, 0, 0.02])
towards_object_in_world_frame = bbox_center_in_world - grasp_center_pos
towards_object_in_world_frame /= np.linalg.norm(towards_object_in_world_frame)
# For the grasp, we want the X+ direction to be the direction of the object's surface.
# The other two directions can be randomized.
rand_vec = np.random.rand(3)
rand_vec /= np.linalg.norm(rand_vec)
grasp_x = towards_object_in_world_frame
grasp_y = np.cross(rand_vec, grasp_x)
grasp_y /= np.linalg.norm(grasp_y)
grasp_z = np.cross(grasp_x, grasp_y)
grasp_z /= np.linalg.norm(grasp_z)
grasp_mat = np.array([grasp_x, grasp_y, grasp_z]).T
grasp_quat = R.from_matrix(grasp_mat).as_quat()
grasp_pose = (grasp_center_pos, grasp_quat)
grasp_candidate = [(grasp_pose, towards_object_in_world_frame)]
return grasp_candidate
def get_grasp_position_for_open(robot, target_obj, should_open, relevant_joint=None, num_waypoints="default"):
"""
Computes the grasp position for opening or closing a joint.
Args:
robot: the robot object
target_obj: the object to open/close a joint of
should_open: a boolean indicating whether we are opening or closing
relevant_joint: the joint to open/close if we want to do a particular one in advance
num_waypoints: the number of waypoints to interpolate between the start and end poses (default is "default")
Returns:
None (if no grasp was found), or Tuple, containing:
relevant_joint: the joint that is being targeted for open/close by the returned grasp
offset_grasp_pose_in_world_frame: the grasp pose in the world frame
waypoints: the interpolated waypoints between the start and end poses
approach_direction_in_world_frame: the approach direction in the world frame
grasp_required: a boolean indicating whether a grasp is required for the opening/closing based on which side of the joint we are
required_pos_change: the required change in position of the joint to open/close
"""
# Pick a moving link of the object.
relevant_joints = [relevant_joint] if relevant_joint is not None else _get_relevant_joints(target_obj)[1]
if len(relevant_joints) == 0:
raise ValueError("Cannot open/close object without relevant joints.")
# Make sure what we got is an appropriately open/close joint.
np.random.shuffle(relevant_joints)
selected_joint = None
for joint in relevant_joints:
current_position = joint.get_state()[0][0]
joint_range = joint.upper_limit - joint.lower_limit
openness_fraction = (current_position - joint.lower_limit) / joint_range
if (should_open and openness_fraction < m.OPENNESS_FRACTION_TO_OPEN) or (not should_open and openness_fraction > m.OPENNESS_THRESHOLD_TO_CLOSE):
selected_joint = joint
break
if selected_joint is None:
return None
if selected_joint.joint_type == JointType.JOINT_REVOLUTE:
return (selected_joint,) + grasp_position_for_open_on_revolute_joint(robot, target_obj, selected_joint, should_open, num_waypoints=num_waypoints)
elif selected_joint.joint_type == JointType.JOINT_PRISMATIC:
return (selected_joint,) + grasp_position_for_open_on_prismatic_joint(robot, target_obj, selected_joint, should_open, num_waypoints=num_waypoints)
else:
raise ValueError("Unknown joint type encountered while generating joint position.")
def grasp_position_for_open_on_prismatic_joint(robot, target_obj, relevant_joint, should_open, num_waypoints="default"):
"""
Computes the grasp position for opening or closing a prismatic joint.
Args:
robot: the robot object
target_obj: the object to open
relevant_joint: the prismatic joint to open
should_open: a boolean indicating whether we are opening or closing
num_waypoints: the number of waypoints to interpolate between the start and end poses (default is "default")
Returns:
Tuple, containing:
offset_grasp_pose_in_world_frame: the grasp pose in the world frame
waypoints: the interpolated waypoints between the start and end poses
approach_direction_in_world_frame: the approach direction in the world frame
grasp_required: a boolean indicating whether a grasp is required for the opening/closing based on which side of the joint we are
required_pos_change: the required change in position of the joint to open/close
"""
link_name = relevant_joint.body1.split("/")[-1]
# Get the bounding box of the child link.
(
bbox_center_in_world,
bbox_quat_in_world,
bbox_extent_in_link_frame,
_,
) = target_obj.get_base_aligned_bbox(link_name=link_name, visual=False)
# Match the push axis to one of the bb axes.
joint_orientation = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(relevant_joint.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
push_axis = R.from_quat(joint_orientation).apply([1, 0, 0])
assert np.isclose(np.max(np.abs(push_axis)), 1.0) # Make sure we're aligned with a bb axis.
push_axis_idx = np.argmax(np.abs(push_axis))
canonical_push_axis = np.eye(3)[push_axis_idx]
# TODO: Need to figure out how to get the correct push direction.
push_direction = np.sign(push_axis[push_axis_idx]) if should_open else -1 * np.sign(push_axis[push_axis_idx])
canonical_push_direction = canonical_push_axis * push_direction
# Pick the closer of the two faces along the push axis as our favorite.
points_along_push_axis = (
np.array([canonical_push_axis, -canonical_push_axis]) * bbox_extent_in_link_frame[push_axis_idx] / 2
)
(
push_axis_closer_side_idx,
center_of_selected_surface_along_push_axis,
_,
) = _get_closest_point_to_point_in_world_frame(
points_along_push_axis, (bbox_center_in_world, bbox_quat_in_world), robot.get_position()
)
push_axis_closer_side_sign = 1 if push_axis_closer_side_idx == 0 else -1
# Pick the other axes.
all_axes = list(set(range(3)) - {push_axis_idx})
x_axis_idx, y_axis_idx = tuple(sorted(all_axes))
canonical_x_axis = np.eye(3)[x_axis_idx]
canonical_y_axis = np.eye(3)[y_axis_idx]
# Find the correct side of the lateral axis & go some distance along that direction.
min_lateral_pos_wrt_surface_center = (canonical_x_axis + canonical_y_axis) * -bbox_extent_in_link_frame / 2
max_lateral_pos_wrt_surface_center = (canonical_x_axis + canonical_y_axis) * bbox_extent_in_link_frame / 2
diff_lateral_pos_wrt_surface_center = max_lateral_pos_wrt_surface_center - min_lateral_pos_wrt_surface_center
sampled_lateral_pos_wrt_min = np.random.uniform(
m.PRISMATIC_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[0] * diff_lateral_pos_wrt_surface_center,
m.PRISMATIC_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[1] * diff_lateral_pos_wrt_surface_center,
)
lateral_pos_wrt_surface_center = min_lateral_pos_wrt_surface_center + sampled_lateral_pos_wrt_min
grasp_position_in_bbox_frame = center_of_selected_surface_along_push_axis + lateral_pos_wrt_surface_center
grasp_quat_in_bbox_frame = T.quat_inverse(joint_orientation)
grasp_pose_in_world_frame = T.pose_transform(
bbox_center_in_world, bbox_quat_in_world, grasp_position_in_bbox_frame, grasp_quat_in_bbox_frame
)
# Now apply the grasp offset.
dist_from_grasp_pos = robot.finger_lengths[robot.default_arm] + 0.05
offset_grasp_pose_in_bbox_frame = (grasp_position_in_bbox_frame + canonical_push_axis * push_axis_closer_side_sign * dist_from_grasp_pos, grasp_quat_in_bbox_frame)
offset_grasp_pose_in_world_frame = T.pose_transform(
bbox_center_in_world, bbox_quat_in_world, *offset_grasp_pose_in_bbox_frame
)
# To compute the rotation position, we want to decide how far along the rotation axis we'll go.
target_joint_pos = relevant_joint.upper_limit if should_open else relevant_joint.lower_limit
current_joint_pos = relevant_joint.get_state()[0][0]
required_pos_change = target_joint_pos - current_joint_pos
push_vector_in_bbox_frame = canonical_push_direction * abs(required_pos_change)
target_hand_pos_in_bbox_frame = grasp_position_in_bbox_frame + push_vector_in_bbox_frame
target_hand_pose_in_world_frame = T.pose_transform(
bbox_center_in_world, bbox_quat_in_world, target_hand_pos_in_bbox_frame, grasp_quat_in_bbox_frame
)
# Compute the approach direction.
approach_direction_in_world_frame = R.from_quat(bbox_quat_in_world).apply(canonical_push_axis * -push_axis_closer_side_sign)
# Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
grasp_required = np.dot(push_vector_in_bbox_frame, canonical_push_axis * -push_axis_closer_side_sign) < 0
# TODO: Need to find a better of getting the predicted position of eef for start point of interpolating waypoints. Maybe
# break this into another function that called after the grasp is executed, so we know the eef position?
waypoint_start_offset = -0.05 * approach_direction_in_world_frame if should_open else 0.05 * approach_direction_in_world_frame
waypoint_start_pose = (grasp_pose_in_world_frame[0] + -1 * approach_direction_in_world_frame * (robot.finger_lengths[robot.default_arm] + waypoint_start_offset), grasp_pose_in_world_frame[1])
waypoint_end_pose = (target_hand_pose_in_world_frame[0] + -1 * approach_direction_in_world_frame * (robot.finger_lengths[robot.default_arm]), target_hand_pose_in_world_frame[1])
waypoints = interpolate_waypoints(waypoint_start_pose, waypoint_end_pose, num_waypoints=num_waypoints)
return (
offset_grasp_pose_in_world_frame,
waypoints,
approach_direction_in_world_frame,
relevant_joint,
grasp_required,
required_pos_change
)
def interpolate_waypoints(start_pose, end_pose, num_waypoints="default"):
"""
Interpolates a series of waypoints between a start and end pose.
Args:
start_pose (tuple): A tuple containing the starting position and orientation as a quaternion.
end_pose (tuple): A tuple containing the ending position and orientation as a quaternion.
num_waypoints (int, optional): The number of waypoints to interpolate. If "default", the number of waypoints is calculated based on the distance between the start and end pose.
Returns:
list: A list of tuples representing the interpolated waypoints, where each tuple contains a position and orientation as a quaternion.
"""
start_pos, start_orn = start_pose
travel_distance = np.linalg.norm(end_pose[0] - start_pos)
if num_waypoints == "default":
num_waypoints = np.max([2, int(travel_distance / 0.01) + 1])
pos_waypoints = np.linspace(start_pos, end_pose[0], num_waypoints)
# Also interpolate the rotations
combined_rotation = R.from_quat(np.array([start_orn, end_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(np.linspace(0, 1, num_waypoints))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
return [waypoint for waypoint in zip(pos_waypoints, quat_waypoints)]
def grasp_position_for_open_on_revolute_joint(robot, target_obj, relevant_joint, should_open):
"""
Computes the grasp position for opening or closing a revolute joint.
Args:
robot: the robot object
target_obj: the object to open
relevant_joint: the revolute joint to open
should_open: a boolean indicating whether we are opening or closing
Returns:
Tuple, containing:
offset_grasp_pose_in_world_frame: the grasp pose in the world frame
waypoints: the interpolated waypoints between the start and end poses
approach_direction_in_world_frame: the approach direction in the world frame
grasp_required: a boolean indicating whether a grasp is required for the opening/closing based on which side of the joint we are
required_pos_change: the required change in position of the joint to open/close
"""
link_name = relevant_joint.body1.split("/")[-1]
link = target_obj.links[link_name]
# Get the bounding box of the child link.
(
bbox_center_in_world,
bbox_quat_in_world,
_,
bbox_center_in_obj_frame
) = target_obj.get_base_aligned_bbox(link_name=link_name, visual=False)
bbox_quat_in_world = link.get_orientation()
bbox_extent_in_link_frame = np.array(target_obj.native_link_bboxes[link_name]['collision']['axis_aligned']['extent'])
bbox_wrt_origin = T.relative_pose_transform(bbox_center_in_world, bbox_quat_in_world, *link.get_position_orientation())
origin_wrt_bbox = T.invert_pose_transform(*bbox_wrt_origin)
joint_orientation = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(relevant_joint.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
joint_axis = R.from_quat(joint_orientation).apply([1, 0, 0])
joint_axis /= np.linalg.norm(joint_axis)
origin_towards_bbox = np.array(bbox_wrt_origin[0])
open_direction = np.cross(joint_axis, origin_towards_bbox)
open_direction /= np.linalg.norm(open_direction)
lateral_axis = np.cross(open_direction, joint_axis)
# Match the axes to the canonical axes of the link bb.
lateral_axis_idx = np.argmax(np.abs(lateral_axis))
open_axis_idx = np.argmax(np.abs(open_direction))
joint_axis_idx = np.argmax(np.abs(joint_axis))
assert lateral_axis_idx != open_axis_idx
assert lateral_axis_idx != joint_axis_idx
assert open_axis_idx != joint_axis_idx
canonical_open_direction = np.eye(3)[open_axis_idx]
points_along_open_axis = (
np.array([canonical_open_direction, -canonical_open_direction]) * bbox_extent_in_link_frame[open_axis_idx] / 2
)
current_yaw = relevant_joint.get_state()[0][0]
closed_yaw = relevant_joint.lower_limit
points_along_open_axis_after_rotation = [
_rotate_point_around_axis((point, [0, 0, 0, 1]), bbox_wrt_origin, joint_axis, closed_yaw - current_yaw)[0]
for point in points_along_open_axis
]
open_axis_closer_side_idx, _, _ = _get_closest_point_to_point_in_world_frame(
points_along_open_axis_after_rotation, (bbox_center_in_world, bbox_quat_in_world), robot.get_position()
)
open_axis_closer_side_sign = 1 if open_axis_closer_side_idx == 0 else -1
center_of_selected_surface_along_push_axis = points_along_open_axis[open_axis_closer_side_idx]
# Find the correct side of the lateral axis & go some distance along that direction.
canonical_joint_axis = np.eye(3)[joint_axis_idx]
lateral_away_from_origin = np.eye(3)[lateral_axis_idx] * np.sign(origin_towards_bbox[lateral_axis_idx])
min_lateral_pos_wrt_surface_center = (
lateral_away_from_origin * -np.array(origin_wrt_bbox[0])
- canonical_joint_axis * bbox_extent_in_link_frame[lateral_axis_idx] / 2
)
max_lateral_pos_wrt_surface_center = (
lateral_away_from_origin * bbox_extent_in_link_frame[lateral_axis_idx] / 2
+ canonical_joint_axis * bbox_extent_in_link_frame[lateral_axis_idx] / 2
)
diff_lateral_pos_wrt_surface_center = max_lateral_pos_wrt_surface_center - min_lateral_pos_wrt_surface_center
sampled_lateral_pos_wrt_min = np.random.uniform(
m.REVOLUTE_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[0] * diff_lateral_pos_wrt_surface_center,
m.REVOLUTE_JOINT_FRACTION_ACROSS_SURFACE_AXIS_BOUNDS[1] * diff_lateral_pos_wrt_surface_center,
)
lateral_pos_wrt_surface_center = min_lateral_pos_wrt_surface_center + sampled_lateral_pos_wrt_min
grasp_position = center_of_selected_surface_along_push_axis + lateral_pos_wrt_surface_center
# Get the appropriate rotation
# grasp_quat_in_bbox_frame = get_quaternion_between_vectors([1, 0, 0], canonical_open_direction * open_axis_closer_side_sign * -1)
grasp_quat_in_bbox_frame = _get_orientation_facing_vector_with_random_yaw(canonical_open_direction * open_axis_closer_side_sign * -1)
# Now apply the grasp offset.
dist_from_grasp_pos = robot.finger_lengths[robot.default_arm] + 0.05
offset_in_bbox_frame = canonical_open_direction * open_axis_closer_side_sign * dist_from_grasp_pos
offset_grasp_pose_in_bbox_frame = (grasp_position + offset_in_bbox_frame, grasp_quat_in_bbox_frame)
offset_grasp_pose_in_world_frame = T.pose_transform(
bbox_center_in_world, bbox_quat_in_world, *offset_grasp_pose_in_bbox_frame
)
# To compute the rotation position, we want to decide how far along the rotation axis we'll go.
desired_yaw = relevant_joint.upper_limit if should_open else relevant_joint.lower_limit
required_yaw_change = desired_yaw - current_yaw
# Now we'll rotate the grasp position around the origin by the desired rotation.
# Note that we use the non-offset position here since the joint can't be pulled all the way to the offset.
grasp_pose_in_bbox_frame = grasp_position, grasp_quat_in_bbox_frame
grasp_pose_in_origin_frame = T.pose_transform(*bbox_wrt_origin, *grasp_pose_in_bbox_frame)
# Get the arc length and divide it up to 10cm segments
arc_length = abs(required_yaw_change) * np.linalg.norm(grasp_pose_in_origin_frame[0])
turn_steps = int(ceil(arc_length / m.ROTATION_ARC_SEGMENT_LENGTHS))
targets = []
for i in range(turn_steps):
partial_yaw_change = (i + 1) / turn_steps * required_yaw_change
rotated_grasp_pose_in_bbox_frame = _rotate_point_around_axis(
(offset_grasp_pose_in_bbox_frame[0], offset_grasp_pose_in_bbox_frame[1]), bbox_wrt_origin, joint_axis, partial_yaw_change
)
rotated_grasp_pose_in_world_frame = T.pose_transform(
bbox_center_in_world, bbox_quat_in_world, *rotated_grasp_pose_in_bbox_frame
)
targets.append(rotated_grasp_pose_in_world_frame)
# Compute the approach direction.
approach_direction_in_world_frame = R.from_quat(bbox_quat_in_world).apply(canonical_open_direction * -open_axis_closer_side_sign)
# Decide whether a grasp is required. If approach direction and displacement are similar, no need to grasp.
movement_in_world_frame = np.array(targets[-1][0]) - np.array(offset_grasp_pose_in_world_frame[0])
grasp_required = np.dot(movement_in_world_frame, approach_direction_in_world_frame) < 0
return (
offset_grasp_pose_in_world_frame,
targets,
approach_direction_in_world_frame,
grasp_required,
required_yaw_change,
)
def _get_orientation_facing_vector_with_random_yaw(vector):
"""
Get a quaternion that orients the x-axis of the object to face the given vector and the y and z
axes to be random.
Args:
vector (np.ndarray): The vector to face.
Returns:
np.ndarray: A quaternion representing the orientation.
"""
forward = vector / np.linalg.norm(vector)
rand_vec = np.random.rand(3)
rand_vec /= np.linalg.norm(3)
side = np.cross(rand_vec, forward)
side /= np.linalg.norm(3)
up = np.cross(forward, side)
# assert np.isclose(np.linalg.norm(up), 1, atol=1e-3)
rotmat = np.array([forward, side, up]).T
return R.from_matrix(rotmat).as_quat()
def _rotate_point_around_axis(point_wrt_arbitrary_frame, arbitrary_frame_wrt_origin, joint_axis, yaw_change):
"""
Rotate a point around an axis, given the point in an arbitrary frame, the arbitrary frame's pose in the origin frame,
the axis to rotate around, and the amount to rotate by. This is a utility for rotating the grasp position around the
joint axis.
Args:
point_wrt_arbitrary_frame (tuple): The point in the arbitrary frame.
arbitrary_frame_wrt_origin (tuple): The pose of the arbitrary frame in the origin frame.
joint_axis (np.ndarray): The axis to rotate around.
yaw_change (float): The amount to rotate by.
Returns:
tuple: The rotated point in the arbitrary frame.
"""
rotation = R.from_rotvec(joint_axis * yaw_change).as_quat()
origin_wrt_arbitrary_frame = T.invert_pose_transform(*arbitrary_frame_wrt_origin)
pose_in_origin_frame = T.pose_transform(*arbitrary_frame_wrt_origin, *point_wrt_arbitrary_frame)
rotated_pose_in_origin_frame = T.pose_transform([0, 0, 0], rotation, *pose_in_origin_frame)
rotated_pose_in_arbitrary_frame = T.pose_transform(*origin_wrt_arbitrary_frame, *rotated_pose_in_origin_frame)
return rotated_pose_in_arbitrary_frame
def _get_closest_point_to_point_in_world_frame(
vectors_in_arbitrary_frame, arbitrary_frame_to_world_frame, point_in_world
):
"""
Given a set of vectors in an arbitrary frame, find the closest vector to a point in world frame.
Useful for picking between two sides of a joint for grasping.
Args:
vectors_in_arbitrary_frame (list): A list of vectors in the arbitrary frame.
arbitrary_frame_to_world_frame (tuple): The pose of the arbitrary frame in the world frame.
point_in_world (tuple): The point in the world frame.
Returns:
tuple: The index of the closest vector, the closest vector in the arbitrary frame, and the closest vector in the world frame.
"""
vectors_in_world = np.array(
[
T.pose_transform(*arbitrary_frame_to_world_frame, vector, [0, 0, 0, 1])[0]
for vector in vectors_in_arbitrary_frame
]
)
vector_distances_to_point = np.linalg.norm(vectors_in_world - np.array(point_in_world)[None, :], axis=1)
closer_option_idx = np.argmin(vector_distances_to_point)
vector_in_arbitrary_frame = vectors_in_arbitrary_frame[closer_option_idx]
vector_in_world_frame = vectors_in_world[closer_option_idx]
return closer_option_idx, vector_in_arbitrary_frame, vector_in_world_frame
| 24,988 | Python | 49.687627 | 195 | 0.699416 |
StanfordVL/OmniGibson/omnigibson/utils/bddl_utils.py | import json
import bddl
import os
import random
import numpy as np
import networkx as nx
from collections import defaultdict
from bddl.activity import (
get_goal_conditions,
get_ground_goal_state_options,
get_initial_conditions,
)
from bddl.backend_abc import BDDLBackend
from bddl.condition_evaluation import Negation
from bddl.logic_base import BinaryAtomicFormula, UnaryAtomicFormula, AtomicFormula
from bddl.object_taxonomy import ObjectTaxonomy
import omnigibson as og
from omnigibson.macros import gm, create_module_macros
from omnigibson.utils.constants import PrimType
from omnigibson.utils.asset_utils import get_attachment_metalinks, get_all_object_categories, get_all_object_category_models_with_abilities
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.python_utils import Wrapper
from omnigibson.objects.dataset_object import DatasetObject
from omnigibson.robots import BaseRobot
from omnigibson import object_states
from omnigibson.object_states.object_state_base import AbsoluteObjectState, RelativeObjectState
from omnigibson.object_states.factory import _KINEMATIC_STATE_SET, get_system_states
from omnigibson.systems.system_base import is_system_active, get_system
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.MIN_DYNAMIC_SCALE = 0.5
m.DYNAMIC_SCALE_INCREMENT = 0.1
GOOD_MODELS = {
"jar": {"kijnrj"},
"carton": {"causya", "msfzpz", "sxlklf"},
"hamper": {"drgdfh", "hlgjme", "iofciz", "pdzaca", "ssfvij"},
"hanging_plant": set(),
"hardback": {"esxakn"},
"notebook": {"hwhisw"},
"paperback": {"okcflv"},
"plant_pot": {"ihnfbi", "vhglly", "ygrtaz"},
"pot_plant": {"cvthyv", "dbjcic", "cecdwu"},
"recycling_bin": {"nuoypc"},
"tray": {"gsxbym", "huwhjg", "txcjux", "uekqey", "yqtlhy"},
}
GOOD_BBOXES = {
"basil": {
"dkuhvb": [0.07286304, 0.0545199 , 0.03108144],
},
"basil_jar": {
"swytaw": [0.22969539, 0.19492961, 0.30791675],
},
"bicycle_chain": {
"czrssf": [0.242, 0.012, 0.021],
},
"clam": {
"ihhbfj": [0.078, 0.081, 0.034],
},
"envelope": {
"urcigc": [0.004, 0.06535058, 0.10321216],
},
"mail": {
"azunex": [0.19989018, 0.005, 0.12992871],
"gvivdi": [0.28932137, 0.005, 0.17610794],
"mbbwhn": [0.27069291, 0.005, 0.13114884],
"ojkepk": [0.19092424, 0.005, 0.13252979],
"qpwlor": [0.22472473, 0.005, 0.18983322],
},
"pill_bottle": {
"csvdbe": [0.078, 0.078, 0.109],
"wsasmm": [0.078, 0.078, 0.109],
},
"plant_pot": {
"ihnfbi": [0.24578613, 0.2457865 , 0.18862737],
},
"razor": {
"jocsgp": [0.046, 0.063, 0.204],
},
"recycling_bin": {
"nuoypc": [0.69529409, 0.80712041, 1.07168694],
},
"tupperware": {
"mkstwr": [0.33, 0.33, 0.21],
},
}
BAD_CLOTH_MODELS = {
"bandana": {"wbhliu"},
"curtain": {"ohvomi"},
"cardigan": {"itrkhr"},
"sweatshirt": {"nowqqh"},
"jeans": {"nmvvil", "pvzxyp"},
"pajamas": {"rcgdde"},
"polo_shirt": {"vqbvph"},
"vest": {"girtqm"}, # bddl NOT FIXED
"onesie": {"pbytey"},
"dishtowel": {"ltydgg"},
"dress": {"gtghon"},
"hammock": {'aiftuk', 'fglfga', 'klhkgd', 'lqweda', 'qewdqa'},
'jacket': {'kiiium', 'nogevo', 'remcyk'},
"quilt": {"mksdlu", "prhems"},
"pennant": {"tfnwti"},
"pillowcase": {"dtoahb", "yakvci"},
"rubber_glove": {"leuiso"},
"scarf": {"kclcrj"},
"sock": {"vpafgj"},
"tank_top": {"fzldgi"},
"curtain": {"shbakk"}
}
class UnsampleablePredicate:
def _sample(self, *args, **kwargs):
raise NotImplementedError()
class ObjectStateInsourcePredicate(UnsampleablePredicate, BinaryAtomicFormula):
def _evaluate(self, entity, **kwargs):
# Always returns True
return True
class ObjectStateFuturePredicate(UnsampleablePredicate, UnaryAtomicFormula):
STATE_NAME = "future"
def _evaluate(self, entity, **kwargs):
return not entity.exists
class ObjectStateRealPredicate(UnsampleablePredicate, UnaryAtomicFormula):
STATE_NAME = "real"
def _evaluate(self, entity, **kwargs):
return entity.exists
class ObjectStateUnaryPredicate(UnaryAtomicFormula):
STATE_CLASS = None
STATE_NAME = None
def _evaluate(self, entity, **kwargs):
return entity.get_state(self.STATE_CLASS, **kwargs)
def _sample(self, entity, binary_state, **kwargs):
return entity.set_state(self.STATE_CLASS, binary_state, **kwargs)
class ObjectStateBinaryPredicate(BinaryAtomicFormula):
STATE_CLASS = None
STATE_NAME = None
def _evaluate(self, entity1, entity2, **kwargs):
return entity1.get_state(self.STATE_CLASS, entity2.wrapped_obj, **kwargs) if entity2.exists else False
def _sample(self, entity1, entity2, binary_state, **kwargs):
return entity1.set_state(self.STATE_CLASS, entity2.wrapped_obj, binary_state, **kwargs) if entity2.exists else None
def get_unary_predicate_for_state(state_class, state_name):
return type(
state_class.__name__ + "StateUnaryPredicate",
(ObjectStateUnaryPredicate,),
{"STATE_CLASS": state_class, "STATE_NAME": state_name},
)
def get_binary_predicate_for_state(state_class, state_name):
return type(
state_class.__name__ + "StateBinaryPredicate",
(ObjectStateBinaryPredicate,),
{"STATE_CLASS": state_class, "STATE_NAME": state_name},
)
def is_substance_synset(synset):
return "substance" in OBJECT_TAXONOMY.get_abilities(synset)
def get_system_name_by_synset(synset):
system_names = OBJECT_TAXONOMY.get_subtree_substances(synset)
assert len(system_names) == 1, f"Got zero or multiple systems for {synset}: {system_names}"
return system_names[0]
def process_single_condition(condition):
"""
Processes a single BDDL condition
Args:
condition (Condition): Condition to process
Returns:
2-tuple:
- Expression: Condition's expression
- bool: Whether this evaluated condition is positive or negative
"""
if not isinstance(condition.children[0], Negation) and not isinstance(condition.children[0], AtomicFormula):
log.debug(("Skipping over sampling of predicate that is not a negation or an atomic formula"))
return None, None
if isinstance(condition.children[0], Negation):
condition = condition.children[0].children[0]
positive = False
else:
condition = condition.children[0]
positive = True
return condition, positive
# TODO: Add remaining predicates.
SUPPORTED_PREDICATES = {
"inside": get_binary_predicate_for_state(object_states.Inside, "inside"),
"nextto": get_binary_predicate_for_state(object_states.NextTo, "nextto"),
"ontop": get_binary_predicate_for_state(object_states.OnTop, "ontop"),
"under": get_binary_predicate_for_state(object_states.Under, "under"),
"touching": get_binary_predicate_for_state(object_states.Touching, "touching"),
"covered": get_binary_predicate_for_state(object_states.Covered, "covered"),
"contains": get_binary_predicate_for_state(object_states.Contains, "contains"),
"saturated": get_binary_predicate_for_state(object_states.Saturated, "saturated"),
"filled": get_binary_predicate_for_state(object_states.Filled, "filled"),
"cooked": get_unary_predicate_for_state(object_states.Cooked, "cooked"),
"burnt": get_unary_predicate_for_state(object_states.Burnt, "burnt"),
"frozen": get_unary_predicate_for_state(object_states.Frozen, "frozen"),
"hot": get_unary_predicate_for_state(object_states.Heated, "hot"),
"open": get_unary_predicate_for_state(object_states.Open, "open"),
"toggled_on": get_unary_predicate_for_state(object_states.ToggledOn, "toggled_on"),
"on_fire": get_unary_predicate_for_state(object_states.OnFire, "on_fire"),
"attached": get_binary_predicate_for_state(object_states.AttachedTo, "attached"),
"overlaid": get_binary_predicate_for_state(object_states.Overlaid, "overlaid"),
"folded": get_unary_predicate_for_state(object_states.Folded, "folded"),
"unfolded": get_unary_predicate_for_state(object_states.Unfolded, "unfolded"),
"draped": get_binary_predicate_for_state(object_states.Draped, "draped"),
"future": ObjectStateFuturePredicate,
"real": ObjectStateRealPredicate,
"insource": ObjectStateInsourcePredicate,
}
KINEMATIC_STATES_BDDL = frozenset([state.__name__.lower() for state in _KINEMATIC_STATE_SET] + ["attached"])
# BEHAVIOR-related
OBJECT_TAXONOMY = ObjectTaxonomy()
BEHAVIOR_ACTIVITIES = sorted(os.listdir(os.path.join(os.path.dirname(bddl.__file__), "activity_definitions")))
def _populate_input_output_objects_systems(og_recipe, input_synsets, output_synsets):
# Map input/output synsets into input/output objects and systems.
for synsets, obj_key, system_key in zip((input_synsets, output_synsets), ("input_objects", "output_objects"), ("input_systems", "output_systems")):
for synset, count in synsets.items():
assert OBJECT_TAXONOMY.is_leaf(synset), f"Synset {synset} must be a leaf node in the taxonomy!"
if is_substance_synset(synset):
og_recipe[system_key].append(get_system_name_by_synset(synset))
else:
obj_categories = OBJECT_TAXONOMY.get_categories(synset)
assert len(obj_categories) == 1, f"Object synset {synset} must map to exactly one object category! Now: {obj_categories}."
og_recipe[obj_key][obj_categories[0]] = count
# Assert only one of output_objects or output_systems is not None
assert len(og_recipe["output_objects"]) == 0 or len(og_recipe["output_systems"]) == 0, \
"Recipe can only generate output objects or output systems, but not both!"
def _populate_input_output_states(og_recipe, input_states, output_states):
# Apply post-processing for input/output states if specified
for synsets_to_states, states_key in zip((input_states, output_states), ("input_states", "output_states")):
if synsets_to_states is None:
continue
for synsets, states in synsets_to_states.items():
# For unary/binary states, synsets is a single synset or a comma-separated pair of synsets, respectively
synset_split = synsets.split(",")
if len(synset_split) == 1:
first_synset = synset_split[0]
second_synset = None
else:
first_synset, second_synset = synset_split
# Assert the first synset is an object because the systems don't have any states.
assert OBJECT_TAXONOMY.is_leaf(first_synset), f"Input/output state synset {first_synset} must be a leaf node in the taxonomy!"
assert not is_substance_synset(first_synset), f"Input/output state synset {first_synset} must be applied to an object, not a substance!"
obj_categories = OBJECT_TAXONOMY.get_categories(first_synset)
assert len(obj_categories) == 1, f"Input/output state synset {first_synset} must map to exactly one object category! Now: {obj_categories}."
first_obj_category = obj_categories[0]
if second_synset is None:
# Unary states for the first synset
for state_type, state_value in states:
state_class = SUPPORTED_PREDICATES[state_type].STATE_CLASS
assert issubclass(state_class, AbsoluteObjectState), f"Input/output state type {state_type} must be a unary state!"
# Example: (Cooked, True)
og_recipe[states_key][first_obj_category]["unary"].append((state_class, state_value))
else:
assert OBJECT_TAXONOMY.is_leaf(second_synset), f"Input/output state synset {second_synset} must be a leaf node in the taxonomy!"
obj_categories = OBJECT_TAXONOMY.get_categories(second_synset)
if is_substance_synset(second_synset):
second_obj_category = get_system_name_by_synset(second_synset)
is_substance = True
else:
obj_categories = OBJECT_TAXONOMY.get_categories(second_synset)
assert len(obj_categories) == 1, f"Input/output state synset {second_synset} must map to exactly one object category! Now: {obj_categories}."
second_obj_category = obj_categories[0]
is_substance = False
for state_type, state_value in states:
state_class = SUPPORTED_PREDICATES[state_type].STATE_CLASS
assert issubclass(state_class, RelativeObjectState), f"Input/output state type {state_type} must be a binary state!"
assert is_substance == (state_class in get_system_states()), f"Input/output state type {state_type} system state inconsistency found!"
if is_substance:
# Non-kinematic binary states, e.g. Covered, Saturated, Filled, Contains.
# Example: (Covered, "sesame_seed", True)
og_recipe[states_key][first_obj_category]["binary_system"].append(
(state_class, second_obj_category, state_value))
else:
# Kinematic binary states w.r.t. the second object.
# Example: (OnTop, "raw_egg", True)
assert states_key != "output_states", f"Output state type {state_type} can only be used in input states!"
og_recipe[states_key][first_obj_category]["binary_object"].append(
(state_class, second_obj_category, state_value))
def _populate_filter_categories(og_recipe, filter_name, synsets):
# Map synsets to categories.
if synsets is not None:
og_recipe[f"{filter_name}_categories"] = set()
for synset in synsets:
assert OBJECT_TAXONOMY.is_leaf(synset), f"Synset {synset} must be a leaf node in the taxonomy!"
assert not is_substance_synset(synset), f"Synset {synset} must be applied to an object, not a substance!"
for category in OBJECT_TAXONOMY.get_categories(synset):
og_recipe[f"{filter_name}_categories"].add(category)
def translate_bddl_recipe_to_og_recipe(
name,
input_synsets,
output_synsets,
input_states=None,
output_states=None,
fillable_synsets=None,
heatsource_synsets=None,
timesteps=None,
):
"""
Translate a BDDL recipe to an OG recipe.
Args:
name (str): Name of the recipe
input_synsets (dict): Maps synsets to number of instances required for the recipe
output_synsets (dict): Maps synsets to number of instances to be spawned in the container when the recipe executes
input_states (dict or None): Maps input synsets to states that must be satisfied for the recipe to execute,
or None if no states are required
otuput_states (dict or None): Map output synsets to states that should be set when spawned when the recipe executes,
or None if no states are required
fillable_synsets (None or set of str): If specified, set of fillable synsets which are allowed for this recipe.
If None, any fillable is allowed
heatsource_synsets (None or set of str): If specified, set of heatsource synsets which are allowed for this recipe.
If None, any heatsource is allowed
timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None,
it will be set to be 1, i.e.: instantaneous execution
"""
og_recipe = {
"name": name,
# Maps object categories to number of instances required for the recipe
"input_objects": dict(),
# List of system names required for the recipe
"input_systems": list(),
# Maps object categories to number of instances to be spawned in the container when the recipe executes
"output_objects": dict(),
# List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
"output_systems": list(),
# Maps object categories to ["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
"input_states": defaultdict(lambda: defaultdict(list)),
# Maps object categories to ["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
"output_states": defaultdict(lambda: defaultdict(list)),
# Set of fillable categories which are allowed for this recipe
"fillable_categories": None,
# Set of heatsource categories which are allowed for this recipe
"heatsource_categories": None,
# Number of subsequent heating steps required for the recipe to execute
"timesteps": timesteps if timesteps is not None else 1,
}
_populate_input_output_objects_systems(og_recipe=og_recipe, input_synsets=input_synsets, output_synsets=output_synsets)
_populate_input_output_states(og_recipe=og_recipe, input_states=input_states, output_states=output_states)
_populate_filter_categories(og_recipe=og_recipe, filter_name="fillable", synsets=fillable_synsets)
_populate_filter_categories(og_recipe=og_recipe, filter_name="heatsource", synsets=heatsource_synsets)
return og_recipe
def translate_bddl_washer_rule_to_og_washer_rule(conditions):
"""
Translate BDDL washer rule to OG washer rule.
Args:
conditions (dict): Dictionary mapping the synset of ParticleSystem (str) to None or list of synsets of
ParticleSystem (str). None represents "never", empty list represents "always", or non-empty list represents
at least one of the systems in the list needs to be present in the washer for the key system to be removed.
E.g. "rust.n.01" -> None: "never remove rust.n.01 from the washer"
E.g. "dust.n.01" -> []: "always remove dust.n.01 from the washer"
E.g. "cooking_oil.n.01" -> ["sodium_carbonate.n.01", "vinegar.n.01"]: "remove cooking_oil.n.01 from the
washer if either sodium_carbonate.n.01 or vinegar.n.01 is present"
For keys not present in the dictionary, the default is []: "always remove"
Returns:
dict: Dictionary mapping the system name (str) to None or list of system names (str). None represents "never",
empty list represents "always", or non-empty list represents at least one of the systems in the list needs
to be present in the washer for the key system to be removed.
"""
og_washer_rule = dict()
for solute, solvents in conditions.items():
assert OBJECT_TAXONOMY.is_leaf(solute), f"Synset {solute} must be a leaf node in the taxonomy!"
assert is_substance_synset(solute), f"Synset {solute} must be a substance synset!"
solute_name = get_system_name_by_synset(solute)
if solvents is None:
og_washer_rule[solute_name] = None
else:
solvent_names = []
for solvent in solvents:
assert OBJECT_TAXONOMY.is_leaf(solvent), f"Synset {solvent} must be a leaf node in the taxonomy!"
assert is_substance_synset(solvent), f"Synset {solvent} must be a substance synset!"
solvent_name = get_system_name_by_synset(solvent)
solvent_names.append(solvent_name)
og_washer_rule[solute_name] = solvent_names
return og_washer_rule
class OmniGibsonBDDLBackend(BDDLBackend):
def get_predicate_class(self, predicate_name):
return SUPPORTED_PREDICATES[predicate_name]
class BDDLEntity(Wrapper):
"""
Thin wrapper class that wraps an object or system if it exists, or nothing if it does not exist. Will
dynamically reference an object / system as they become real in the sim
"""
def __init__(
self,
bddl_inst,
entity=None,
):
"""
Args:
bddl_inst (str): BDDL synset instance of the entity, e.g.: "almond.n.01_1"
entity (None or DatasetObject or BaseSystem): If specified, the BDDL entity to wrap. If not
specified, will initially wrap nothing, but may dynamically reference an actual object or system
if it exists in the future
"""
# Store synset and other info, and pass entity internally
self.bddl_inst = bddl_inst
self.synset = "_".join(self.bddl_inst.split("_")[:-1])
self.is_system = is_substance_synset(self.synset)
# Infer the correct category to assign
self.og_categories = OBJECT_TAXONOMY.get_subtree_substances(self.synset) \
if self.is_system else OBJECT_TAXONOMY.get_subtree_categories(self.synset)
super().__init__(obj=entity)
@property
def name(self):
"""
Returns:
None or str: Name of this entity, if it exists, else None
"""
if self.exists:
return self.og_categories[0] if self.is_system else self.wrapped_obj.name
else:
return None
@property
def exists(self):
"""
Checks whether the entity referenced by @synset exists
Returns:
bool: Whether the entity referenced by @synset exists
"""
return self.wrapped_obj is not None
def set_entity(self, entity):
"""
Sets the internal entity, overriding any if it already exists
Args:
entity (BaseSystem or BaseObject): Entity to set internally
"""
self.wrapped_obj = entity
def clear_entity(self):
"""
Clears the internal entity, if any
"""
self.wrapped_obj = None
def get_state(self, state, *args, **kwargs):
"""
Helper function to grab wrapped entity's state @state
Args:
state (BaseObjectState): State whose get_value() should be called
*args (tuple): Any arguments to pass to getter, in order
**kwargs (dict): Any keyword arguments to pass to getter, in order
Returns:
any: Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None), else False
"""
return self.wrapped_obj.states[state].get_value(*args, **kwargs) if self.exists else False
def set_state(self, state, *args, **kwargs):
"""
Helper function to set wrapped entity's state @state. Note: Should only be called if the entity exists!
Args:
state (BaseObjectState): State whose set_value() should be called
*args (tuple): Any arguments to pass to getter, in order
**kwargs (dict): Any keyword arguments to pass to getter, in order
Returns:
any: Returned value(s) from @state if self.wrapped_obj exists (i.e.: not None)
"""
assert self.exists, \
f"Cannot call set_state() for BDDLEntity {self.synset} when the entity does not exist!"
return self.wrapped_obj.states[state].set_value(*args, **kwargs)
class BDDLSampler:
def __init__(
self,
env,
activity_conditions,
object_scope,
backend,
debug=False,
):
# Store internal variables from inputs
self._env = env
self._scene_model = self._env.scene.scene_model if isinstance(self._env.scene, InteractiveTraversableScene) else None
self._agent = self._env.robots[0]
if debug:
gm.DEBUG = True
self._backend = backend
self._activity_conditions = activity_conditions
self._object_scope = object_scope
self._object_instance_to_synset = {
obj_inst: obj_cat
for obj_cat in self._activity_conditions.parsed_objects
for obj_inst in self._activity_conditions.parsed_objects[obj_cat]
}
self._substance_instances = {obj_inst for obj_inst in self._object_scope.keys() if
is_substance_synset(self._object_instance_to_synset[obj_inst])}
# Initialize other variables that will be filled in later
self._room_type_to_object_instance = None # dict
self._inroom_object_instances = None # set of str
self._object_sampling_orders = None # dict mapping str to list of str
self._sampled_objects = None # set of BaseObject
self._future_obj_instances = None # set of str
self._inroom_object_conditions = None # list of (condition, positive) tuple
self._inroom_object_scope_filtered_initial = None # dict mapping str to BDDLEntity
self._attached_objects = defaultdict(set) # dict mapping str to set of str
def sample(self, validate_goal=False):
"""
Run sampling for this BEHAVIOR task
Args:
validate_goal (bool): Whether the goal should be validated or not
Returns:
2-tuple:
- bool: Whether sampling was successful or not
- None or str: None if successful, otherwise the associated error message
"""
log.info("Sampling task...")
# Reject scenes with missing non-sampleable objects
# Populate object_scope with sampleable objects and the robot
accept_scene, feedback = self._prepare_scene_for_sampling()
if not accept_scene:
return accept_scene, feedback
# Sample objects to satisfy initial conditions
accept_scene, feedback = self._sample_all_conditions(validate_goal=validate_goal)
if not accept_scene:
return accept_scene, feedback
log.info("Sampling succeeded!")
return True, None
def _sample_all_conditions(self, validate_goal=False):
"""
Run sampling for this BEHAVIOR task
Args:
validate_goal (bool): Whether the goal should be validated or not
Returns:
2-tuple:
- bool: Whether sampling was successful or not
- None or str: None if successful, otherwise the associated error message
"""
# Auto-initialize all sampleable objects
with og.sim.playing():
self._env.scene.reset()
error_msg = self._sample_initial_conditions()
if error_msg:
log.error(error_msg)
return False, error_msg
if validate_goal:
error_msg = self._sample_goal_conditions()
if error_msg:
log.error(error_msg)
return False, error_msg
error_msg = self._sample_initial_conditions_final()
if error_msg:
log.error(error_msg)
return False, error_msg
self._env.scene.update_initial_state()
return True, None
def _prepare_scene_for_sampling(self):
"""
Runs sanity checks for the current scene for the given BEHAVIOR task
Returns:
2-tuple:
- bool: Whether the generated scene activity should be accepted or not
- dict: Any feedback from the sampling / initialization process
"""
error_msg = self._parse_inroom_object_room_assignment()
if error_msg:
log.error(error_msg)
return False, error_msg
error_msg = self._parse_attached_states()
if error_msg:
log.error(error_msg)
return False, error_msg
error_msg = self._build_sampling_order()
if error_msg:
log.error(error_msg)
return False, error_msg
error_msg = self._build_inroom_object_scope()
if error_msg:
log.error(error_msg)
return False, error_msg
error_msg = self._import_sampleable_objects()
if error_msg:
log.error(error_msg)
return False, error_msg
self._object_scope["agent.n.01_1"] = BDDLEntity(bddl_inst="agent.n.01_1", entity=self._agent)
return True, None
def _parse_inroom_object_room_assignment(self):
"""
Infers which rooms each object is assigned to
"""
self._room_type_to_object_instance = dict()
self._inroom_object_instances = set()
for cond in self._activity_conditions.parsed_initial_conditions:
if cond[0] == "inroom":
obj_inst, room_type = cond[1], cond[2]
obj_synset = self._object_instance_to_synset[obj_inst]
abilities = OBJECT_TAXONOMY.get_abilities(obj_synset)
if "sceneObject" not in abilities:
# Invalid room assignment
return f"You have assigned room type for [{obj_synset}], but [{obj_synset}] is sampleable. " \
f"Only non-sampleable (scene) objects can have room assignment."
if self._scene_model is not None and room_type not in og.sim.scene.seg_map.room_sem_name_to_ins_name:
# Missing room type
return f"Room type [{room_type}] missing in scene [{self._scene_model}]."
if room_type not in self._room_type_to_object_instance:
self._room_type_to_object_instance[room_type] = []
self._room_type_to_object_instance[room_type].append(obj_inst)
if obj_inst in self._inroom_object_instances:
# Duplicate room assignment
return f"Object [{obj_inst}] has more than one room assignment"
self._inroom_object_instances.add(obj_inst)
def _parse_attached_states(self):
"""
Infers which objects are attached to which other objects.
If a category-level attachment is specified, it will be expanded to all instances of that category.
E.g. if the goal condition requires corks to be attached to bottles, every cork needs to be able to
attach to every bottle.
"""
for cond in self._activity_conditions.parsed_initial_conditions:
if cond[0] == "attached":
obj_inst, parent_inst = cond[1], cond[2]
if obj_inst not in self._object_scope or parent_inst not in self._object_scope:
return f"Object [{obj_inst}] or parent [{parent_inst}] in attached initial condition not found in object scope"
self._attached_objects[obj_inst].add(parent_inst)
ground_attached_conditions = []
conditions_to_check = self._activity_conditions.parsed_goal_conditions.copy()
while conditions_to_check:
new_conditions_to_check = []
for cond in conditions_to_check:
if cond[0] == "attached":
ground_attached_conditions.append(cond)
else:
new_conditions_to_check.extend([ele for ele in cond if isinstance(ele, list)])
conditions_to_check = new_conditions_to_check
for cond in ground_attached_conditions:
obj_inst, parent_inst = cond[1].lstrip("?"), cond[2].lstrip("?")
if obj_inst in self._object_scope:
obj_insts = [obj_inst]
elif obj_inst in self._activity_conditions.parsed_objects:
obj_insts = self._activity_conditions.parsed_objects[obj_inst]
else:
return f"Object [{obj_inst}] in attached goal condition not found in object scope or parsed objects"
if parent_inst in self._object_scope:
parent_insts = [parent_inst]
elif parent_inst in self._activity_conditions.parsed_objects:
parent_insts = self._activity_conditions.parsed_objects[parent_inst]
else:
return f"Parent [{parent_inst}] in attached goal condition not found in object scope or parsed objects"
for obj_inst in obj_insts:
for parent_inst in parent_insts:
self._attached_objects[obj_inst].add(parent_inst)
def _build_sampling_order(self):
"""
Sampling orders is a list of lists: [[batch_1_inst_1, ... batch_1_inst_N], [batch_2_inst_1, batch_2_inst_M], ...]
Sampling should happen for batch 1 first, then batch 2, so on and so forth
Example: OnTop(plate, table) should belong to batch 1, and OnTop(apple, plate) should belong to batch 2
"""
unsampleable_conditions = []
sampling_groups = {group: [] for group in ("kinematic", "particle", "unary")}
self._object_sampling_conditions = {group: [] for group in ("kinematic", "particle", "unary")}
self._object_sampling_orders = {group: [] for group in ("kinematic", "particle", "unary")}
self._inroom_object_conditions = []
# First, sort initial conditions into kinematic, particle and unary groups
# bddl.condition_evaluation.HEAD, each with one child.
# This child is either a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate or
# a Negation of a ObjectStateUnaryPredicate/ObjectStateBinaryPredicate
for condition in get_initial_conditions(self._activity_conditions, self._backend, self._object_scope):
condition, positive = process_single_condition(condition)
if condition is None:
continue
# Sampled conditions must always be positive
# Non-positive (e.g.: NOT onTop) is not restrictive enough for sampling
if condition.STATE_NAME in KINEMATIC_STATES_BDDL and not positive:
return "Initial condition has negative kinematic conditions: {}".format(condition.body)
# Store any unsampleable conditions separately
if isinstance(condition, UnsampleablePredicate):
unsampleable_conditions.append(condition)
continue
# Infer the group the condition and its object instances belong to
# (a) Kinematic (binary) conditions, where (ent0, ent1) are both objects
# (b) Particle (binary) conditions, where (ent0, ent1) are (object, substance)
# (d) Unary conditions, where (ent0,) is an object
# Binary conditions have length 2: (ent0, ent1)
if len(condition.body) == 2:
group = "particle" if condition.body[1] in self._substance_instances else "kinematic"
else:
assert len(condition.body) == 1, \
f"Got invalid parsed initial condition; body length should either be 2 or 1. " \
f"Got body: {condition.body} for condition: {condition}"
group = "unary"
sampling_groups[group].append(condition.body)
self._object_sampling_conditions[group].append((condition, positive))
# If the condition involves any non-sampleable object (e.g.: furniture), it's a non-sampleable condition
# This means that there's no ordering constraint in terms of sampling, because we know the, e.g., furniture
# object already exists in the scene and is placed, so these specific conditions can be sampled without
# any dependencies
if len(self._inroom_object_instances.intersection(set(condition.body))) > 0:
self._inroom_object_conditions.append((condition, positive))
# Now, sort each group, ignoring the futures (since they don't get sampled)
# First handle kinematics, then particles, then unary
# Start with the non-sampleable objects as the first sampled set, then infer recursively
cur_batch = self._inroom_object_instances
while len(cur_batch) > 0:
next_batch = set()
for cur_batch_inst in cur_batch:
inst_batch = set()
for condition, _ in self._object_sampling_conditions["kinematic"]:
if condition.body[1] == cur_batch_inst:
inst_batch.add(condition.body[0])
next_batch.add(condition.body[0])
if len(inst_batch) > 0:
self._object_sampling_orders["kinematic"].append(inst_batch)
cur_batch = next_batch
# Now parse particles -- simply unordered, since particle systems shouldn't impact each other
self._object_sampling_orders["particle"].append({cond[0] for cond in sampling_groups["particle"]})
sampled_particle_entities = {cond[1] for cond in sampling_groups["particle"]}
# Finally, parse unaries -- this is simply unordered, since it is assumed that unary predicates do not
# affect each other
self._object_sampling_orders["unary"].append({cond[0] for cond in sampling_groups["unary"]})
# Aggregate future objects and any unsampleable obj instances
# Unsampleable obj instances are strictly a superset of future obj instances
unsampleable_obj_instances = {cond.body[-1] for cond in unsampleable_conditions}
self._future_obj_instances = {cond.body[0] for cond in unsampleable_conditions if isinstance(cond, ObjectStateFuturePredicate)}
nonparticle_entities = set(self._object_scope.keys()) - self._substance_instances
# Sanity check kinematic objects -- any non-system must be kinematically sampled
remaining_kinematic_entities = nonparticle_entities - unsampleable_obj_instances - \
self._inroom_object_instances - set.union(*(self._object_sampling_orders["kinematic"] + [set()]))
# Possibly remove the agent entity if we're in an empty scene -- i.e.: no kinematic sampling needed for the
# agent
if self._scene_model is None:
remaining_kinematic_entities -= {"agent.n.01_1"}
if len(remaining_kinematic_entities) != 0:
return f"Some objects do not have any kinematic condition defined for them in the initial conditions: " \
f"{', '.join(remaining_kinematic_entities)}"
# Sanity check particle systems -- any non-future system must be sampled as part of particle groups
remaining_particle_entities = self._substance_instances - unsampleable_obj_instances - sampled_particle_entities
if len(remaining_particle_entities) != 0:
return f"Some systems do not have any particle condition defined for them in the initial conditions: " \
f"{', '.join(remaining_particle_entities)}"
def _build_inroom_object_scope(self):
"""
Store simulator object options for non-sampleable objects in self.inroom_object_scope
{
"living_room": {
"table1": {
"living_room_0": [URDFObject, URDFObject, URDFObject],
"living_room_1": [URDFObject]
},
"table2": {
"living_room_0": [URDFObject, URDFObject],
"living_room_1": [URDFObject, URDFObject]
},
"chair1": {
"living_room_0": [URDFObject],
"living_room_1": [URDFObject]
},
}
}
"""
room_type_to_scene_objs = {}
for room_type in self._room_type_to_object_instance:
room_type_to_scene_objs[room_type] = {}
for obj_inst in self._room_type_to_object_instance[room_type]:
room_type_to_scene_objs[room_type][obj_inst] = {}
obj_synset = self._object_instance_to_synset[obj_inst]
# We allow burners to be used as if they are stoves
# No need to safeguard check for subtree_substances because inroom objects will never be substances
categories = OBJECT_TAXONOMY.get_subtree_categories(obj_synset)
# Grab all models that fully support all abilities for the corresponding category
valid_models = {cat: set(get_all_object_category_models_with_abilities(
cat, OBJECT_TAXONOMY.get_abilities(OBJECT_TAXONOMY.get_synset_from_category(cat))))
for cat in categories}
valid_models = {cat: (models if cat not in GOOD_MODELS else models.intersection(GOOD_MODELS[cat])) - BAD_CLOTH_MODELS.get(cat, set()) for cat, models in valid_models.items()}
valid_models = {cat: self._filter_model_choices_by_attached_states(models, cat, obj_inst) for cat, models in valid_models.items()}
room_insts = [None] if self._scene_model is None else og.sim.scene.seg_map.room_sem_name_to_ins_name[room_type]
for room_inst in room_insts:
# A list of scene objects that satisfy the requested categories
room_objs = og.sim.scene.object_registry("in_rooms", room_inst, default_val=[])
scene_objs = [obj for obj in room_objs if obj.category in categories and obj.model in valid_models[obj.category]]
if len(scene_objs) != 0:
room_type_to_scene_objs[room_type][obj_inst][room_inst] = scene_objs
error_msg = self._consolidate_room_instance(room_type_to_scene_objs, "initial_pre-sampling")
if error_msg:
return error_msg
self._inroom_object_scope = room_type_to_scene_objs
def _filter_object_scope(self, input_object_scope, conditions, condition_type):
"""
Filters the object scope based on given @input_object_scope, @conditions, and @condition_type
Args:
input_object_scope (dict):
conditions (list): List of conditions to filter scope with, where each list entry is
a tuple of (condition, positive), where @positive is True if the condition has a positive
evaluation.
condition_type (str): What type of condition to sample, e.g., "initial"
Returns:
2-tuple:
- dict: Filtered object scope
- list of str: The name of children object(s) that have the highest proportion of kinematic sampling
failures
"""
filtered_object_scope = {}
# Maps child obj name (SCOPE name) to parent obj name (OBJECT name) to T / F,
# ie: if the kinematic relationship was sampled successfully
problematic_objs = defaultdict(dict)
for room_type in input_object_scope:
filtered_object_scope[room_type] = {}
for scene_obj in input_object_scope[room_type]:
filtered_object_scope[room_type][scene_obj] = {}
for room_inst in input_object_scope[room_type][scene_obj]:
# These are a list of candidate simulator objects that need sampling test
for obj in input_object_scope[room_type][scene_obj][room_inst]:
# Temporarily set object_scope to point to this candidate object
self._object_scope[scene_obj] = BDDLEntity(bddl_inst=scene_obj, entity=obj)
success = True
# If this candidate object is not involved in any conditions,
# success will be True by default and this object will qualify
parent_obj_name = obj.name
conditions_to_sample = []
for condition, positive in conditions:
# Sample positive kinematic conditions that involve this candidate object
if condition.STATE_NAME in KINEMATIC_STATES_BDDL and positive and scene_obj in condition.body:
child_scope_name = condition.body[0]
entity = self._object_scope[child_scope_name]
conditions_to_sample.append((condition, positive, entity, child_scope_name))
# If we're sampling kinematics, sort children based on (a) whether they are cloth or not, and
# then (b) their AABB, so that first all rigid objects are sampled before all cloth objects,
# and within each group the larger objects are sampled first. This is needed because rigid
# objects currently don't detect collisions with cloth objects (rigid_obj.states[ContactBodies]
# is empty even when a cloth object is in contact with it).
rigid_conditions = [c for c in conditions_to_sample if c[2].prim_type != PrimType.CLOTH]
cloth_conditions = [c for c in conditions_to_sample if c[2].prim_type == PrimType.CLOTH]
conditions_to_sample = (
list(reversed(sorted(rigid_conditions, key=lambda x: np.product(x[2].aabb_extent)))) +
list(reversed(sorted(cloth_conditions, key=lambda x: np.product(x[2].aabb_extent))))
)
# Sample!
for condition, positive, entity, child_scope_name in conditions_to_sample:
kwargs = dict()
# Reset if we're sampling a kinematic state
if condition.STATE_NAME in {"inside", "ontop", "under"}:
kwargs["reset_before_sampling"] = True
elif condition.STATE_NAME in {"attached"}:
kwargs["bypass_alignment_checking"] = True
kwargs["check_physics_stability"] = True
kwargs["can_joint_break"] = False
success = condition.sample(binary_state=positive, **kwargs)
log_msg = " ".join(
[
f"{condition_type} kinematic condition sampling",
room_type,
scene_obj,
room_inst,
parent_obj_name,
condition.STATE_NAME,
str(condition.body),
str(success),
]
)
log.info(log_msg)
# Record the result for the child object
assert parent_obj_name not in problematic_objs[child_scope_name], \
f"Multiple kinematic relationships attempted for pair {condition.body}"
problematic_objs[child_scope_name][parent_obj_name] = success
# If any condition fails for this candidate object, skip
if not success:
break
# If this candidate object fails, move on to the next candidate object
if not success:
continue
if room_inst not in filtered_object_scope[room_type][scene_obj]:
filtered_object_scope[room_type][scene_obj][room_inst] = []
filtered_object_scope[room_type][scene_obj][room_inst].append(obj)
# Compute most problematic objects
if len(problematic_objs) == 0:
max_problematic_objs = []
else:
problematic_objs_by_proportion = defaultdict(list)
for child_scope_name, parent_obj_names in problematic_objs.items():
problematic_objs_by_proportion[np.mean(list(parent_obj_names.values()))].append(child_scope_name)
max_problematic_objs = problematic_objs_by_proportion[min(problematic_objs_by_proportion.keys())]
return filtered_object_scope, max_problematic_objs
def _consolidate_room_instance(self, filtered_object_scope, condition_type):
"""
Consolidates room instances
Args:
filtered_object_scope (dict): Filtered object scope
condition_type (str): What type of condition to sample, e.g., "initial"
Returns:
None or str: Error message, if any
"""
for room_type in filtered_object_scope:
# For each room_type, filter in room_inst that has successful
# sampling options for all obj_inst in this room_type
room_inst_satisfied = set.intersection(
*[
set(filtered_object_scope[room_type][obj_inst].keys())
for obj_inst in filtered_object_scope[room_type]
]
)
if len(room_inst_satisfied) == 0:
error_msg = "{}: Room type [{}] of scene [{}] do not contain or cannot sample all the objects needed.\nThe following are the possible room instances for each object, the intersection of which is an empty set.\n".format(
condition_type, room_type, self._scene_model
)
for obj_inst in filtered_object_scope[room_type]:
error_msg += (
"{}: ".format(obj_inst) + ", ".join(filtered_object_scope[room_type][obj_inst].keys()) + "\n"
)
return error_msg
for obj_inst in filtered_object_scope[room_type]:
filtered_object_scope[room_type][obj_inst] = {
key: val
for key, val in filtered_object_scope[room_type][obj_inst].items()
if key in room_inst_satisfied
}
def _filter_model_choices_by_attached_states(self, model_choices, category, obj_inst):
# If obj_inst is a child object that depends on a parent object that has been imported or exists in the scene,
# we filter in only models that match the parent object's attachment metalinks.
if obj_inst in self._attached_objects:
parent_insts = self._attached_objects[obj_inst]
parent_objects = []
for parent_inst in parent_insts:
# If parent_inst is not an inroom object, it must be a non-sampleable object that has already been imported.
# Grab it from the object_scope
if parent_inst not in self._inroom_object_instances:
assert self._object_scope[parent_inst] is not None
parent_objects.append([self._object_scope[parent_inst].wrapped_obj])
# If parent_inst is an inroom object, it can refer to multiple objects in the scene in different rooms.
# We gather all of them and require that the model choice supports attachment to at least one of them.
else:
for _, parent_inst_to_parent_objs in self._inroom_object_scope.items():
if parent_inst in parent_inst_to_parent_objs:
parent_objects.append(sum(parent_inst_to_parent_objs[parent_inst].values(), []))
# Help function to check if a child object can attach to a parent object
def can_attach(child_attachment_links, parent_attachment_links):
for child_link_name in child_attachment_links:
child_category = child_link_name.split("_")[1]
if child_category.endswith("F"):
continue
assert child_category.endswith("M")
parent_category = child_category[:-1] + "F"
for parent_link_name in parent_attachment_links:
if parent_category in parent_link_name:
return True
return False
# Filter out models that don't support the attached states
new_model_choices = set()
for model_choice in model_choices:
child_attachment_links = get_attachment_metalinks(category, model_choice)
# The child model choice needs to be able to attach to all parent instances.
# For in-room parent instances, there might be multiple parent objects (e.g. different wall nails),
# and the child object needs to be able to attach to at least one of them.
if all(
any(
can_attach(child_attachment_links, get_attachment_metalinks(parent_obj.category, parent_obj.model))
for parent_obj in parent_objs_per_inst
)
for parent_objs_per_inst in parent_objects):
new_model_choices.add(model_choice)
return new_model_choices
# If obj_inst is a prent object that other objects depend on, we filter in only models that have at least some
# attachment links.
elif any(obj_inst in parents for parents in self._attached_objects.values()):
# Filter out models that don't support the attached states
new_model_choices = set()
for model_choice in model_choices:
if len(get_attachment_metalinks(category, model_choice)) > 0:
new_model_choices.add(model_choice)
return new_model_choices
# If neither of the above cases apply, we don't need to filter the model choices
else:
return model_choices
def _import_sampleable_objects(self):
"""
Import all objects that can be sampled
Args:
env (Environment): Current active environment instance
"""
assert og.sim.is_stopped(), "Simulator should be stopped when importing sampleable objects"
# Move the robot object frame to a far away location, similar to other newly imported objects below
self._agent.set_position_orientation([300, 300, 300], [0, 0, 0, 1])
self._sampled_objects = set()
num_new_obj = 0
# Only populate self.object_scope for sampleable objects
available_categories = set(get_all_object_categories())
# Attached states introduce dependencies among objects during import time.
# For example, when importing a child object instance, we need to make sure the imported model can be attached
# to the parent object instance. We sort the object instances such that parent object instances are imported
# before child object instances.
dependencies = {key: self._attached_objects.get(key, {}) for key in self._object_instance_to_synset.keys()}
for obj_inst in list(reversed(list(nx.algorithms.topological_sort(nx.DiGraph(dependencies))))):
obj_synset = self._object_instance_to_synset[obj_inst]
# Don't populate agent
if obj_synset == "agent.n.01":
continue
# Populate based on whether it's a substance or not
if is_substance_synset(obj_synset):
assert len(self._activity_conditions.parsed_objects[obj_synset]) == 1, "Systems are singletons"
obj_inst = self._activity_conditions.parsed_objects[obj_synset][0]
system_name = OBJECT_TAXONOMY.get_subtree_substances(obj_synset)[0]
self._object_scope[obj_inst] = BDDLEntity(
bddl_inst=obj_inst,
entity=None if obj_inst in self._future_obj_instances else get_system(system_name),
)
else:
valid_categories = set(OBJECT_TAXONOMY.get_subtree_categories(obj_synset))
categories = list(valid_categories.intersection(available_categories))
if len(categories) == 0:
return f"None of the following categories could be found in the dataset for synset {obj_synset}: " \
f"{valid_categories}"
# Don't explicitly sample if future
if obj_inst in self._future_obj_instances:
self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst)
continue
# Don't sample if already in room
if obj_inst in self._inroom_object_instances:
continue
# Shuffle categories and sample to find a valid model
np.random.shuffle(categories)
model_choices = set()
for category in categories:
# Get all available models that support all of its synset abilities
model_choices = set(get_all_object_category_models_with_abilities(
category=category,
abilities=OBJECT_TAXONOMY.get_abilities(OBJECT_TAXONOMY.get_synset_from_category(category)),
))
model_choices = model_choices if category not in GOOD_MODELS else model_choices.intersection(GOOD_MODELS[category])
model_choices -= BAD_CLOTH_MODELS.get(category, set())
model_choices = self._filter_model_choices_by_attached_states(model_choices, category, obj_inst)
if len(model_choices) > 0:
break
if len(model_choices) == 0:
# We failed to find ANY valid model across ALL valid categories
return f"Missing valid object models for all categories: {categories}"
# Randomly select an object model
model = np.random.choice(list(model_choices))
# Potentially add additional kwargs
obj_kwargs = dict()
obj_kwargs["bounding_box"] = GOOD_BBOXES.get(category, dict()).get(model, None)
# create the object
simulator_obj = DatasetObject(
name=f"{category}_{len(og.sim.scene.objects)}",
category=category,
model=model,
prim_type=PrimType.CLOTH if "cloth" in OBJECT_TAXONOMY.get_abilities(obj_synset) else PrimType.RIGID,
**obj_kwargs,
)
num_new_obj += 1
# Load the object into the simulator
assert og.sim.scene.loaded, "Scene is not loaded"
og.sim.import_object(simulator_obj)
# Set these objects to be far-away locations
simulator_obj.set_position(np.array([100.0, 100.0, -100.0]) + np.ones(3) * num_new_obj * 5.0)
self._sampled_objects.add(simulator_obj)
self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=simulator_obj)
og.sim.play()
og.sim.stop()
def _sample_initial_conditions(self):
"""
Sample initial conditions
Returns:
None or str: If successful, returns None. Otherwise, returns an error message
"""
error_msg, self._inroom_object_scope_filtered_initial = self._sample_conditions(
self._inroom_object_scope, self._inroom_object_conditions, "initial"
)
return error_msg
def _sample_goal_conditions(self):
"""
Sample goal conditions
Returns:
None or str: If successful, returns None. Otherwise, returns an error message
"""
activity_goal_conditions = get_goal_conditions(self._activity_conditions, self._backend, self._object_scope)
ground_goal_state_options = get_ground_goal_state_options(self._activity_conditions, self._backend, self._object_scope, activity_goal_conditions)
np.random.shuffle(ground_goal_state_options)
log.debug(("number of ground_goal_state_options", len(ground_goal_state_options)))
num_goal_condition_set_to_test = 10
goal_condition_success = False
# Try to fulfill different set of ground goal conditions (maximum num_goal_condition_set_to_test)
for goal_condition_set in ground_goal_state_options[:num_goal_condition_set_to_test]:
goal_condition_processed = []
for condition in goal_condition_set:
condition, positive = process_single_condition(condition)
if condition is None:
continue
goal_condition_processed.append((condition, positive))
error_msg, _ = self._sample_conditions(
self._inroom_object_scope_filtered_initial, goal_condition_processed, "goal"
)
if not error_msg:
# if one set of goal conditions (and initial conditions) are satisfied, sampling is successful
goal_condition_success = True
break
if not goal_condition_success:
return error_msg
def _sample_initial_conditions_final(self):
"""
Sample final initial conditions
Returns:
None or str: If successful, returns None. Otherwise, returns an error message
"""
# Sample kinematics first, then particle states, then unary states
state = og.sim.dump_state(serialized=False)
for group in ("kinematic", "particle", "unary"):
log.info(f"Sampling {group} states...")
if len(self._object_sampling_orders[group]) > 0:
for cur_batch in self._object_sampling_orders[group]:
conditions_to_sample = []
for condition, positive in self._object_sampling_conditions[group]:
# Sample conditions that involve the current batch of objects
child_scope_name = condition.body[0]
if child_scope_name in cur_batch:
entity = self._object_scope[child_scope_name]
conditions_to_sample.append((condition, positive, entity, child_scope_name))
# If we're sampling kinematics, sort children based on (a) whether they are cloth or not, and then
# (b) their AABB, so that first all rigid objects are sampled before cloth objects, and within each
# group the larger objects are sampled first
if group == "kinematic":
rigid_conditions = [c for c in conditions_to_sample if c[2].prim_type != PrimType.CLOTH]
cloth_conditions = [c for c in conditions_to_sample if c[2].prim_type == PrimType.CLOTH]
conditions_to_sample = (
list(reversed(sorted(rigid_conditions, key=lambda x: np.product(x[2].aabb_extent)))) +
list(reversed(sorted(cloth_conditions, key=lambda x: np.product(x[2].aabb_extent))))
)
# Sample!
for condition, positive, entity, child_scope_name in conditions_to_sample:
success = False
kwargs = dict()
# Reset if we're sampling a kinematic state
if condition.STATE_NAME in {"inside", "ontop", "under"}:
kwargs["reset_before_sampling"] = True
elif condition.STATE_NAME in {"attached"}:
kwargs["bypass_alignment_checking"] = True
kwargs["check_physics_stability"] = True
kwargs["can_joint_break"] = False
while True:
num_trials = 1
for _ in range(num_trials):
success = condition.sample(binary_state=positive, **kwargs)
if success:
# Update state
state = og.sim.dump_state(serialized=False)
break
if success:
# After the final round of kinematic sampling, we assign in_rooms to newly imported objects
if group == "kinematic":
parent = self._object_scope[condition.body[1]]
entity.in_rooms = parent.in_rooms.copy()
# Can terminate immediately
break
# Can't re-sample non-kinematics or rescale cloth or agent, so in
# those cases terminate immediately
if group != "kinematic" or condition.STATE_NAME == "attached" or "agent" in child_scope_name or entity.prim_type == PrimType.CLOTH:
break
# If any scales are equal or less than the lower threshold, terminate immediately
new_scale = entity.scale - m.DYNAMIC_SCALE_INCREMENT
if np.any(new_scale < m.MIN_DYNAMIC_SCALE):
break
# Re-scale and re-attempt
# Re-scaling is not respected unless sim cycle occurs
og.sim.stop()
entity.scale = new_scale
log.info(f"Kinematic sampling {condition.STATE_NAME} {condition.body} failed, rescaling obj: {child_scope_name} to {entity.scale}")
og.sim.play()
og.sim.load_state(state, serialized=False)
og.sim.step_physics()
if not success:
# Update object registry because we just assigned in_rooms to newly imported objects
og.sim.scene.object_registry.update(keys=["in_rooms"])
return f"Sampleable object conditions failed: {condition.STATE_NAME} {condition.body}"
# Update object registry because we just assigned in_rooms to newly imported objects
og.sim.scene.object_registry.update(keys=["in_rooms"])
# One more sim step to make sure the object states are propagated correctly
# E.g. after sampling Filled.set_value(True), Filled.get_value() will become True only after one step
og.sim.step()
def _sample_conditions(self, input_object_scope, conditions, condition_type):
"""
Sample conditions
Args:
input_object_scope (dict):
conditions (list): List of conditions to filter scope with, where each list entry is
a tuple of (condition, positive), where @positive is True if the condition has a positive
evaluation.
condition_type (str): What type of condition to sample, e.g., "initial"
Returns:
None or str: If successful, returns None. Otherwise, returns an error message
"""
error_msg, problematic_objs = "", []
while not np.any([np.any(self._object_scope[obj_inst].scale < m.MIN_DYNAMIC_SCALE) for obj_inst in problematic_objs]):
filtered_object_scope, problematic_objs = self._filter_object_scope(input_object_scope, conditions, condition_type)
error_msg = self._consolidate_room_instance(filtered_object_scope, condition_type)
if error_msg is None:
break
# Re-scaling is not respected unless sim cycle occurs
og.sim.stop()
for obj_inst in problematic_objs:
obj = self._object_scope[obj_inst]
# If the object's initial condition is attachment, or it's agent or cloth, we can't / shouldn't scale
# down, so play again and then terminate immediately
if obj_inst in self._attached_objects or "agent" in obj_inst or obj.prim_type == PrimType.CLOTH:
og.sim.play()
return error_msg, None
assert np.all(obj.scale > m.DYNAMIC_SCALE_INCREMENT)
obj.scale -= m.DYNAMIC_SCALE_INCREMENT
og.sim.play()
if error_msg:
return error_msg, None
return self._maximum_bipartite_matching(filtered_object_scope, condition_type), filtered_object_scope
def _maximum_bipartite_matching(self, filtered_object_scope, condition_type):
"""
Matches objects from @filtered_object_scope to specific room instances it can be
sampled from
Args:
filtered_object_scope (dict): Filtered object scope
condition_type (str): What type of condition to sample, e.g., "initial"
Returns:
None or str: If successful, returns None. Otherwise, returns an error message
"""
# For each room instance, perform maximum bipartite matching between object instance in scope to simulator objects
# Left nodes: a list of object instance in scope
# Right nodes: a list of simulator objects
# Edges: if the simulator object can support the sampling requirement of ths object instance
for room_type in filtered_object_scope:
# The same room instances will be shared across all scene obj in a given room type
some_obj = list(filtered_object_scope[room_type].keys())[0]
room_insts = list(filtered_object_scope[room_type][some_obj].keys())
success = False
# Loop through each room instance
for room_inst in room_insts:
graph = nx.Graph()
# For this given room instance, gether mapping from obj instance to a list of simulator obj
obj_inst_to_obj_per_room_inst = {}
for obj_inst in filtered_object_scope[room_type]:
obj_inst_to_obj_per_room_inst[obj_inst] = filtered_object_scope[room_type][obj_inst][room_inst]
top_nodes = []
log_msg = "MBM for room instance [{}]".format(room_inst)
log.debug((log_msg))
for obj_inst in obj_inst_to_obj_per_room_inst:
for obj in obj_inst_to_obj_per_room_inst[obj_inst]:
# Create an edge between obj instance and each of the simulator obj that supports sampling
graph.add_edge(obj_inst, obj)
log_msg = "Adding edge: {} <-> {}".format(obj_inst, obj.name)
log.debug((log_msg))
top_nodes.append(obj_inst)
# Need to provide top_nodes that contain all nodes in one bipartite node set
# The matches will have two items for each match (e.g. A -> B, B -> A)
matches = nx.bipartite.maximum_matching(graph, top_nodes=top_nodes)
if len(matches) == 2 * len(obj_inst_to_obj_per_room_inst):
log.debug(("Object scope finalized:"))
for obj_inst, obj in matches.items():
if obj_inst in obj_inst_to_obj_per_room_inst:
self._object_scope[obj_inst] = BDDLEntity(bddl_inst=obj_inst, entity=obj)
log.debug((obj_inst, obj.name))
success = True
break
if not success:
return "{}: Room type [{}] of scene [{}] do not have enough simulator objects that can successfully sample all the objects needed. This is usually caused by specifying too many object instances in the object scope or the conditions are so stringent that too few simulator objects can satisfy them via sampling.\n".format(
condition_type, room_type, self._scene_model
)
| 72,214 | Python | 49.429469 | 337 | 0.597945 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/post_processing_settings.py | import omnigibson.lazy as lazy
from omnigibson.renderer_settings.settings_base import SettingItem, SettingsBase, SubSettingsBase
class PostProcessingSettings(SettingsBase):
"""
Post-Processing setting group that handles a variety of sub-settings, including:
- Tone Mapping
- Auto Exposure
- Color Correction
- Color Grading
- XR Compositing
- Chromatic Aberration
- Depth Of Field Camera Overrides
- Motion Blur
- FTT Bloom
- TV Noise & Film Grain
- Reshade
"""
def __init__(self):
self.tone_mapping_settings = ToneMappingSettings()
self.auto_exposure_settings = AutoExposureSettings()
self.color_correction_settings = ColorCorrectionSettings()
self.color_grading_settings = ColorGradingSettings()
self.xr_compositing_settings = XRCompositingSettings()
self.chromatic_aberration_settings = ChromaticAberrationSettings()
self.depth_of_field_settings = DepthOfFieldSettings()
self.motion_blur_settings = MotionBlurSettings()
self.ftt_bloom_settings = FTTBloomSettings()
self.tv_noise_grain_settings = TVNoiseGrainSettings()
self.reshade_settings = ReshadeSettings()
@property
def settings(self):
settings = {}
settings.update(self.tone_mapping_settings.settings)
settings.update(self.auto_exposure_settings.settings)
settings.update(self.color_correction_settings.settings)
settings.update(self.color_grading_settings.settings)
settings.update(self.xr_compositing_settings.settings)
settings.update(self.chromatic_aberration_settings.settings)
settings.update(self.depth_of_field_settings.settings)
settings.update(self.motion_blur_settings.settings)
settings.update(self.ftt_bloom_settings.settings)
settings.update(self.tv_noise_grain_settings.settings)
settings.update(self.reshade_settings.settings)
return settings
class ToneMappingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
# The main tonemapping layout contains only the combo box. All the other options
# are saved in a different layout which can be swapped out in case the tonemapper changes.
tonemapper_ops = [
"Clamp",
"Linear",
"Reinhard",
"Reinhard (modified)",
"HejiHableAlu",
"HableUc2",
"Aces",
"Iray",
]
self.tomemap_op = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Tone Mapping Operator", "/rtx/post/tonemap/op", range_list=tonemapper_ops
)
# tonemap_op_idx = self._carb_settings.get("/rtx/post/tonemap/op")
# Modified Reinhard
# tonemap_op_idx == 3
self.max_white_luminance = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Max White Luminance",
"/rtx/post/tonemap/maxWhiteLuminance",
range_from=0,
range_to=100,
)
# HableUc2
# tonemap_op_idx == 5
self.white_scale = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "White Scale Value", "/rtx/post/tonemap/whiteScale", range_from=0, range_to=100
)
self.cm2_factor = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "cm^2 Factor", "/rtx/post/tonemap/cm2Factor", range_from=0, range_to=2
)
self.white_point = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "White Point", "/rtx/post/tonemap/whitepoint")
self.film_iso = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Film ISO", "/rtx/post/tonemap/filmIso", range_from=50, range_to=1600
)
self.camera_shutter = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Camera Shutter", "/rtx/post/tonemap/cameraShutter", range_from=1, range_to=5000
)
self.f_number = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "f-Number / f-Stop", "/rtx/post/tonemap/fNumber", range_from=1, range_to=20
)
# Iray
# tonemap_op_idx == 7
self.crush_blacks = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Crush Blacks",
"/rtx/post/tonemap/irayReinhard/crushBlacks",
range_from=0,
range_to=1,
)
self.burn_highlights = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Burn Highlights",
"/rtx/post/tonemap/irayReinhard/burnHighlights",
range_from=0,
range_to=1,
)
self.burn_highlights_per_component = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Burn Highlights per Component",
"/rtx/post/tonemap/irayReinhard/burnHighlightsPerComponent",
)
self.burn_highlights_max_component = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Burn Highlights max Component",
"/rtx/post/tonemap/irayReinhard/burnHighlightsMaxComponent",
)
self.saturation = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Saturation", "/rtx/post/tonemap/irayReinhard/saturation", range_from=0, range_to=1
)
# Clamp is never using srgb conversion
# tonemap_op_idx != 0
self.enable_srgb_to_gamma = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable SRGB To Gamma Conversion", "/rtx/post/tonemap/enableSrgbToGamma"
)
tonemapColorMode = ["sRGBLinear", "ACEScg"]
self.color_mode = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Tonemapping Color Space",
"/rtx/post/tonemap/colorMode",
range_list=tonemapColorMode,
)
self.wrapvalue = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Wrap Value", "/rtx/post/tonemap/wrapValue", range_from=0, range_to=100000
)
@property
def settings(self):
settings = {
"/rtx/post/tonemap/op": self.tomemap_op,
"/rtx/post/tonemap/cm2Factor": self.cm2_factor,
"/rtx/post/tonemap/whitepoint": self.white_point,
"/rtx/post/tonemap/filmIso": self.film_iso,
"/rtx/post/tonemap/cameraShutter": self.camera_shutter,
"/rtx/post/tonemap/fNumber": self.f_number,
"/rtx/post/tonemap/colorMode": self.color_mode,
"/rtx/post/tonemap/wrapValue": self.wrapvalue,
}
tonemap_op_idx = self._carb_settings.get("/rtx/post/tonemap/op")
if tonemap_op_idx == 3: # Modified Reinhard
settings.update(
{"/rtx/post/tonemap/maxWhiteLuminance": self.max_white_luminance,}
)
if tonemap_op_idx == 5: # HableUc2
settings.update(
{"/rtx/post/tonemap/whiteScale": self.white_scale,}
)
if tonemap_op_idx == 7: # Iray
settings.update(
{
"/rtx/post/tonemap/irayReinhard/crushBlacks": self.crush_blacks,
"/rtx/post/tonemap/irayReinhard/burnHighlights": self.burn_highlights,
"/rtx/post/tonemap/irayReinhard/burnHighlightsPerComponent": self.burn_highlights_per_component,
"/rtx/post/tonemap/irayReinhard/burnHighlightsMaxComponent": self.burn_highlights_max_component,
"/rtx/post/tonemap/irayReinhard/saturation": self.saturation,
}
)
if tonemap_op_idx != 0: # Clamp is never using srgb conversion
settings.update(
{"/rtx/post/tonemap/enableSrgbToGamma": self.enable_srgb_to_gamma,}
)
return settings
class AutoExposureSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
histFilter_types = ["Median", "Average"]
self.filter_type = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Histogram Filter", "/rtx/post/histogram/filterType", range_list=histFilter_types
)
self.tau = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Adaptation Speed", "/rtx/post/histogram/tau", range_from=0.5, range_to=10.0
)
self.white_scale = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"White Point Scale",
"/rtx/post/histogram/whiteScale",
range_from=0.01,
range_to=80.0,
)
self.use_exposure_clamping = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Use Exposure Clamping", "/rtx/post/histogram/useExposureClamping"
)
self.min_ev = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Min EV", "/rtx/post/histogram/minEV", range_from=0.0, range_to=1000000.0
)
self.max_ev = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Max EV", "/rtx/post/histogram/maxEV", range_from=0.0, range_to=1000000.0
)
@property
def settings(self):
return {
"/rtx/post/histogram/filterType": self.filter_type,
"/rtx/post/histogram/tau": self.tau,
"/rtx/post/histogram/whiteScale": self.white_scale,
"/rtx/post/histogram/useExposureClamping": self.use_exposure_clamping,
"/rtx/post/histogram/minEV": self.min_ev,
"/rtx/post/histogram/maxEV": self.max_ev,
}
@property
def enabled_setting_path(self):
return "/rtx/post/histogram/enabled"
class ColorCorrectionSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
mode = ["ACES (Pre-Tonemap)", "Standard (Post-Tonemap)"]
self.mode = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.STRING, "Mode", "/rtx/post/colorcorr/mode", range_list=mode)
# ccMode = self._carb_settings.get("/rtx/post/colorcorr/mode")
# ccMode == 0
color_correction_mode = ["sRGBLinear", "ACEScg"]
self.outputMode = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Output Color Space",
"/rtx/post/colorcorr/outputMode",
range_list=color_correction_mode,
)
self.saturation = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Saturation", "/rtx/post/colorcorr/saturation")
self.contrast = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Contrast", "/rtx/post/colorcorr/contrast")
self.gamma = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Gamma", "/rtx/post/colorcorr/gamma")
self.gain = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Gain", "/rtx/post/colorcorr/gain")
self.offset = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Offset", "/rtx/post/colorcorr/offset")
@property
def settings(self):
settings = {
"/rtx/post/colorcorr/mode": self.mode,
"/rtx/post/colorcorr/saturation": self.saturation,
"/rtx/post/colorcorr/contrast": self.contrast,
"/rtx/post/colorcorr/gamma": self.gamma,
"/rtx/post/colorcorr/gain": self.gain,
"/rtx/post/colorcorr/offset": self.offset,
}
cc_mode = self._carb_settings.get("/rtx/post/colorcorr/mode")
if cc_mode == 0:
settings.update(
{"/rtx/post/colorcorr/outputMode": self.outputMode,}
)
return settings
@property
def enabled_setting_path(self):
return "/rtx/post/colorcorr/enabled"
class ColorGradingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
mode = ["ACES (Pre-Tonemap)", "Standard (Post-Tonemap)"]
self.mode = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.STRING, "Mode", "/rtx/post/colorgrad/mode", range_list=mode)
cg_mode = self._carb_settings.get("/rtx/post/colorgrad/mode")
if cg_mode == 0:
colorGradingMode = ["sRGBLinear", "ACEScg"]
self.output_mode = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Output Color Space",
"/rtx/post/colorgrad/outputMode",
range_list=colorGradingMode,
)
self.blackpoint = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Black Point", "/rtx/post/colorgrad/blackpoint")
self.whitepoint = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "White Point", "/rtx/post/colorgrad/whitepoint")
self.contrast = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Contrast", "/rtx/post/colorgrad/contrast")
self.lift = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Lift", "/rtx/post/colorgrad/lift")
self.gain = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Gain", "/rtx/post/colorgrad/gain")
self.multiply = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Multiply", "/rtx/post/colorgrad/multiply")
self.offset = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Offset", "/rtx/post/colorgrad/offset")
self.gamma = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Gamma", "/rtx/post/colorgrad/gamma")
@property
def settings(self):
settings = {
"/rtx/post/colorgrad/mode": self.mode,
"/rtx/post/colorgrad/blackpoint": self.blackpoint,
"/rtx/post/colorgrad/whitepoint": self.whitepoint,
"/rtx/post/colorgrad/contrast": self.contrast,
"/rtx/post/colorgrad/lift": self.lift,
"/rtx/post/colorgrad/gain": self.gain,
"/rtx/post/colorgrad/multiply": self.multiply,
"/rtx/post/colorgrad/offset": self.offset,
"/rtx/post/colorgrad/gamma": self.gamma,
}
cg_mode = self._carb_settings.get("/rtx/post/colorgrad/mode")
if cg_mode == 0:
settings.update(
{"/rtx/post/colorgrad/outputMode": self.output_mode,}
)
return settings
@property
def enabled_setting_path(self):
return "/rtx/post/colorgrad/enabled"
class XRCompositingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.apply_alpha_zero_pass_first = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Composite in Linear Space", "/rtx/post/backgroundZeroAlpha/ApplyAlphaZeroPassFirst"
)
self.backgroundComposite = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Composite in Editor", "/rtx/post/backgroundZeroAlpha/backgroundComposite"
)
# self.backplate_texture = SettingItem(self, "ASSET", "Default Backplate Texture", "/rtx/post/backgroundZeroAlpha/backplateTexture")
self.background_default_color = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Default Backplate Color", "/rtx/post/backgroundZeroAlpha/backgroundDefaultColor"
)
self.enable_lens_distortion_correction = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Enable Lens Distortion",
"/rtx/post/backgroundZeroAlpha/enableLensDistortionCorrection",
)
# self.distortion_map = SettingItem(self, "ASSET", "Lens Distortion Map", "/rtx/post/lensDistortion/distortionMap")
# self.undistortion_map = SettingItem(self, "ASSET", "Lens Undistortion Map", "/rtx/post/lensDistortion/undistortionMap")
@property
def settings(self):
return {
"/rtx/post/backgroundZeroAlpha/ApplyAlphaZeroPassFirst": self.apply_alpha_zero_pass_first,
"/rtx/post/backgroundZeroAlpha/backgroundComposite": self.backgroundComposite,
"/rtx/post/backgroundZeroAlpha/backgroundDefaultColor": self.background_default_color,
"/rtx/post/backgroundZeroAlpha/enableLensDistortionCorrection": self.enable_lens_distortion_correction,
}
@property
def enabled_setting_path(self):
return "/rtx/post/backgroundZeroAlpha/enabled"
class ChromaticAberrationSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.strength_r = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Strength Red", "/rtx/post/chromaticAberration/strengthR", -1.0, 1.0, 0.01
)
self.strength_g = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Strength Green", "/rtx/post/chromaticAberration/strengthG", -1.0, 1.0, 0.01
)
self.strength_b = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Strength Blue", "/rtx/post/chromaticAberration/strengthB", -1.0, 1.0, 0.01
)
chromatic_aberration_ops = ["Radial", "Barrel"]
self.mode_r = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Algorithm Red", "/rtx/post/chromaticAberration/modeR", chromatic_aberration_ops
)
self.mode_g = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Algorithm Green", "/rtx/post/chromaticAberration/modeG", chromatic_aberration_ops
)
self.mode_b = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Algorithm Blue", "/rtx/post/chromaticAberration/modeB", chromatic_aberration_ops
)
self.enable_lanczos = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Use Lanczos Sampler", "/rtx/post/chromaticAberration/enableLanczos"
)
@property
def settings(self):
return {
"/rtx/post/chromaticAberration/strengthR": self.strength_r,
"/rtx/post/chromaticAberration/strengthG": self.strength_g,
"/rtx/post/chromaticAberration/strengthB": self.strength_b,
"/rtx/post/chromaticAberration/modeR": self.mode_r,
"/rtx/post/chromaticAberration/modeG": self.mode_g,
"/rtx/post/chromaticAberration/modeB": self.mode_b,
"/rtx/post/chromaticAberration/enableLanczos": self.enable_lanczos,
}
@property
def enabled_setting_path(self):
return "/rtx/post/chromaticAberration/enabled"
class DepthOfFieldSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.dof_enabled = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable DOF", "/rtx/post/dof/enabled")
self.subject_distance = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Subject Distance",
"/rtx/post/dof/subjectDistance",
range_from=-10000,
range_to=10000.0,
)
self.focal_length = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Focal Length (mm)", "/rtx/post/dof/focalLength", range_from=0, range_to=1000
)
self.f_number = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "f-Number / f-Stop", "/rtx/post/dof/fNumber", range_from=0, range_to=1000
)
self.anisotropy = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Anisotropy", "/rtx/post/dof/anisotropy", range_from=-1, range_to=1
)
@property
def settings(self):
return {
"/rtx/post/dof/enabled": self.dof_enabled,
"/rtx/post/dof/subjectDistance": self.subject_distance,
"/rtx/post/dof/focalLength": self.focal_length,
"/rtx/post/dof/fNumber": self.f_number,
"/rtx/post/dof/anisotropy": self.anisotropy,
}
@property
def enabled_setting_path(self):
return "/rtx/post/dof/overrideEnabled"
class MotionBlurSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.max_blur_diameter_fraction = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Blur Diameter Fraction",
"/rtx/post/motionblur/maxBlurDiameterFraction",
range_from=0,
range_to=0.5,
)
self.num_samples = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Number of Samples", "/rtx/post/motionblur/numSamples", range_from=4, range_to=32
)
self.exposure_fraction = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Exposure Fraction",
"/rtx/post/motionblur/exposureFraction",
range_from=0,
range_to=5.0,
)
@property
def settings(self):
return {
"/rtx/post/motionblur/maxBlurDiameterFraction": self.max_blur_diameter_fraction,
"/rtx/post/motionblur/numSamples": self.num_samples,
"/rtx/post/motionblur/exposureFraction": self.exposure_fraction,
}
@property
def enabled_setting_path(self):
return "/rtx/post/motionblur/enabled"
class FTTBloomSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.flare_scale = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Scale", "/rtx/post/lensFlares/flareScale", range_from=-1000, range_to=1000
)
self.cutoff_point = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.DOUBLE3, "Cutoff Point", "/rtx/post/lensFlares/cutoffPoint")
self.cutoff_fuzziness = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Cutoff Fuzziness",
"/rtx/post/lensFlares/cutoffFuzziness",
range_from=0.0,
range_to=1.0,
)
self.energy_constraining_blend = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Energy Constrained", "/rtx/post/lensFlares/energyConstrainingBlend"
)
self.physical_settings = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Physical Settings", "/rtx/post/lensFlares/physicalSettings"
)
# fftbloom_use_physical_settings = self._carb_settings.get("/rtx/post/lensFlares/physicalSettings")
# Physical settings
self.blades = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Blades", "/rtx/post/lensFlares/blades", range_from=0, range_to=10
)
self.aperture_rotation = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Aperture Rotation",
"/rtx/post/lensFlares/apertureRotation",
range_from=-1000,
range_to=1000,
)
self.sensor_diagonal = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Sensor Diagonal",
"/rtx/post/lensFlares/sensorDiagonal",
range_from=-1000,
range_to=1000,
)
self.sensor_aspect_ratio = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Sensor Aspect Ratio",
"/rtx/post/lensFlares/sensorAspectRatio",
range_from=-1000,
range_to=1000,
)
self.f_number = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"f-Number / f-Stop",
"/rtx/post/lensFlares/fNumber",
range_from=-1000,
range_to=1000,
)
self.focal_length = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Focal Length (mm)",
"/rtx/post/lensFlares/focalLength",
range_from=-1000,
range_to=1000,
)
# Non-physical settings
self.halo_flare_radius = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.DOUBLE3, "Halo Radius", "/rtx/post/lensFlares/haloFlareRadius"
)
self.halo_flare_falloff = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.DOUBLE3, "Halo Flare Falloff", "/rtx/post/lensFlares/haloFlareFalloff"
)
self.halo_flare_weight = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Halo Flare Weight",
"/rtx/post/lensFlares/haloFlareWeight",
range_from=-1000,
range_to=1000,
)
self.aniso_flare_falloff_y = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.DOUBLE3, "Aniso Falloff Y", "/rtx/post/lensFlares/anisoFlareFalloffY"
)
self.aniso_flare_falloff_x = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.DOUBLE3, "Aniso Falloff X", "/rtx/post/lensFlares/anisoFlareFalloffX"
)
self.aniso_flare_weight = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Aniso Flare Weight",
"/rtx/post/lensFlares/anisoFlareWeight",
range_from=-1000,
range_to=1000,
)
self.isotropic_flare_falloff = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.DOUBLE3, "Isotropic Flare Falloff", "/rtx/post/lensFlares/isotropicFlareFalloff"
)
self.isotropic_flare_weight = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Isotropic Flare Weight",
"/rtx/post/lensFlares/isotropicFlareWeight",
range_from=-1000,
range_to=1000,
)
@property
def settings(self):
settings = {
"/rtx/post/lensFlares/flareScale": self.flare_scale,
"/rtx/post/lensFlares/cutoffPoint": self.cutoff_point,
"/rtx/post/lensFlares/cutoffFuzziness": self.cutoff_fuzziness,
"/rtx/post/lensFlares/energyConstrainingBlend": self.energy_constraining_blend,
"/rtx/post/lensFlares/physicalSettings": self.physical_settings,
}
fftbloom_use_physical_settings = self._carb_settings.get("/rtx/post/lensFlares/physicalSettings")
if fftbloom_use_physical_settings:
settings.update(
{
"/rtx/post/lensFlares/blades": self.blades,
"/rtx/post/lensFlares/apertureRotation": self.aperture_rotation,
"/rtx/post/lensFlares/sensorDiagonal": self.sensor_diagonal,
"/rtx/post/lensFlares/sensorAspectRatio": self.sensor_aspect_ratio,
"/rtx/post/lensFlares/fNumber": self.f_number,
"/rtx/post/lensFlares/focalLength": self.focal_length,
}
)
else:
settings.update(
{
"/rtx/post/lensFlares/haloFlareRadius": self.halo_flare_radius,
"/rtx/post/lensFlares/haloFlareFalloff": self.halo_flare_falloff,
"/rtx/post/lensFlares/haloFlareWeight": self.halo_flare_weight,
"/rtx/post/lensFlares/anisoFlareFalloffY": self.aniso_flare_falloff_y,
"/rtx/post/lensFlares/anisoFlareFalloffX": self.aniso_flare_falloff_x,
"/rtx/post/lensFlares/anisoFlareWeight": self.aniso_flare_weight,
"/rtx/post/lensFlares/isotropicFlareFalloff": self.isotropic_flare_falloff,
"/rtx/post/lensFlares/isotropicFlareWeight": self.isotropic_flare_weight,
}
)
return settings
@property
def enabled_setting_path(self):
return "/rtx/post/lensFlares/enabled"
class TVNoiseGrainSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.enable_scanlines = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Scanlines", "/rtx/post/tvNoise/enableScanlines"
)
self.scanline_spread = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Scanline Spreading",
"/rtx/post/tvNoise/scanlineSpread",
range_from=0.0,
range_to=2.0,
)
self.enable_scroll_bug = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Scroll Bug", "/rtx/post/tvNoise/enableScrollBug"
)
self.enable_vignetting = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Vignetting", "/rtx/post/tvNoise/enableVignetting"
)
self.vignetting_size = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Vignetting Size", "/rtx/post/tvNoise/vignettingSize", range_from=0.0, range_to=255
)
self.vignetting_strength = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Vignetting Strength",
"/rtx/post/tvNoise/vignettingStrength",
range_from=0.0,
range_to=2.0,
)
self.enable_vignetting_flickering = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Vignetting Flickering", "/rtx/post/tvNoise/enableVignettingFlickering"
)
self.enable_ghost_flickering = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Ghost Flickering", "/rtx/post/tvNoise/enableGhostFlickering"
)
self.enable_wave_distortion = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Wavy Distortion", "/rtx/post/tvNoise/enableWaveDistortion"
)
self.enable_vertical_lines = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Vertical Lines", "/rtx/post/tvNoise/enableVerticalLines"
)
self.enable_random_splotches = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Random Splotches", "/rtx/post/tvNoise/enableRandomSplotches"
)
self.enable_film_grain = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Film Grain", "/rtx/post/tvNoise/enableFilmGrain"
)
# Filmgrain is a subframe in TV Noise
# self._carb_settings.get("/rtx/post/tvNoise/enableFilmGrain"):
self.grain_amount = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Grain Amount", "/rtx/post/tvNoise/grainAmount", range_from=0, range_to=0.2
)
self.color_amount = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Color Amount", "/rtx/post/tvNoise/colorAmount", range_from=0, range_to=1.0
)
self.lum_amount = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Luminance Amount", "/rtx/post/tvNoise/lumAmount", range_from=0, range_to=1.0
)
self.grain_size = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Grain Size", "/rtx/post/tvNoise/grainSize", range_from=1.5, range_to=2.5
)
@property
def settings(self):
settings = {
"/rtx/post/tvNoise/enableScanlines": self.enable_scanlines,
"/rtx/post/tvNoise/scanlineSpread": self.scanline_spread,
"/rtx/post/tvNoise/enableScrollBug": self.enable_scroll_bug,
"/rtx/post/tvNoise/enableVignetting": self.enable_vignetting,
"/rtx/post/tvNoise/vignettingSize": self.vignetting_size,
"/rtx/post/tvNoise/vignettingStrength": self.vignetting_strength,
"/rtx/post/tvNoise/enableVignettingFlickering": self.enable_vignetting_flickering,
"/rtx/post/tvNoise/enableGhostFlickering": self.enable_ghost_flickering,
"/rtx/post/tvNoise/enableWaveDistortion": self.enable_wave_distortion,
"/rtx/post/tvNoise/enableVerticalLines": self.enable_vertical_lines,
"/rtx/post/tvNoise/enableRandomSplotches": self.enable_random_splotches,
"/rtx/post/tvNoise/enableFilmGrain": self.enable_film_grain,
}
if self._carb_settings.get("/rtx/post/tvNoise/enableFilmGrain"):
settings.update(
{
"/rtx/post/tvNoise/grainAmount": self.grain_amount,
"/rtx/post/tvNoise/colorAmount": self.color_amount,
"/rtx/post/tvNoise/lumAmount": self.lum_amount,
"/rtx/post/tvNoise/grainSize": self.grain_size,
}
)
return settings
@property
def enabled_setting_path(self):
return "/rtx/post/tvNoise/enabled"
class ReshadeSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
# self._add_setting("ASSET", "Preset path", "/rtx/reshade/presetFilePath")
# widget = self._add_setting("ASSET", "Effect search dir path", "/rtx/reshade/effectSearchDirPath")
# widget.is_folder=True
# widget = self._add_setting("ASSET", "Texture search dir path", "/rtx/reshade/textureSearchDirPath")
# widget.is_folder=True
@property
def settings(self):
return {}
@property
def enabled_setting_path(self):
return "/rtx/reshade/enable"
| 34,472 | Python | 44.240157 | 150 | 0.625348 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/common_settings.py | import omnigibson.lazy as lazy
from omnigibson.renderer_settings.settings_base import SettingItem, SettingsBase, SubSettingsBase
class CommonSettings(SettingsBase):
"""
Common setting group that handles a variety of sub-settings, including:
- Rendering
- Geometry
- Materials
- Lighting
- Simple Fog
- Flow
- Debug View
"""
def __init__(self):
self.render_settings = RenderSettings()
self.geometry_settings = GeometrySettings()
self.materials_settings = MaterialsSettings()
self.lighting_settings = LightingSettings()
self.simple_fog_setting = SimpleFogSettings()
self.flow_settings = FlowSettings()
self.debug_view_settings = DebugViewSettings()
@property
def settings(self):
settings = {}
settings.update(self.render_settings.settings)
settings.update(self.geometry_settings.settings)
settings.update(self.materials_settings.settings)
settings.update(self.lighting_settings.settings)
settings.update(self.simple_fog_setting.settings)
settings.update(self.flow_settings.settings)
settings.update(self.debug_view_settings.settings)
return settings
class RenderSettings(SubSettingsBase):
def __init__(self):
self.multi_threading_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Multi-Threading", "/rtx/multiThreading/enabled"
)
@property
def settings(self):
return {
"/rtx/multiThreading/enabled": self.multi_threading_enabled,
}
class GeometrySettings(SubSettingsBase):
def __init__(self):
# Basic geometry settings.
tbnMode = ["AUTO", "CPU", "GPU", "Force GPU"]
self.tbn_frame_mode = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Normal & Tangent Space Generation Mode",
"/rtx/hydra/TBNFrameMode",
range_list=tbnMode,
)
self.face_culling_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Back Face Culling", "/rtx/hydra/faceCulling/enabled"
)
# Wireframe settings.
self.wireframe_thickness = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Wireframe Thickness",
"/rtx/wireframe/wireframeThickness",
range_from=0.1,
range_to=100,
)
self.wireframe_thickness_world_space = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Wireframe World Space Thickness", "/rtx/wireframe/wireframeThicknessWorldSpace"
)
self.wireframe_shading_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Shaded Wireframe", "/rtx/wireframe/shading/enabled"
)
# Subdivision settings.
self.subdivision_refinement_level = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Subdivision Global Refinement Level",
"/rtx/hydra/subdivision/refinementLevel",
range_from=0,
range_to=2,
)
self.subdivision_adaptive_refinement = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Subdivision Feature-adaptive Refinement",
"/rtx/hydra/subdivision/adaptiveRefinement",
)
# if set to zero, override to scene unit, which means the scale factor would be 1
self.renderMeterPerUnit = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Renderer-internal meters per unit ", "/rtx/scene/renderMeterPerUnit"
)
self.only_opaque_ray_flags = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Hide geometry that uses opacity (debug)", "/rtx/debug/onlyOpaqueRayFlags"
)
@property
def settings(self):
return {
"/rtx/hydra/TBNFrameMode": self.tbn_frame_mode,
"/rtx/hydra/faceCulling/enabled": self.face_culling_enabled,
"/rtx/wireframe/wireframeThickness": self.wireframe_thickness,
"/rtx/wireframe/wireframeThicknessWorldSpace": self.wireframe_thickness_world_space,
"/rtx/wireframe/shading/enabled": self.wireframe_shading_enabled,
"/rtx/hydra/subdivision/refinementLevel": self.subdivision_refinement_level,
"/rtx/hydra/subdivision/adaptiveRefinement": self.subdivision_adaptive_refinement,
"/rtx/scene/renderMeterPerUnit": self.renderMeterPerUnit,
"/rtx/debug/onlyOpaqueRayFlags": self.only_opaque_ray_flags,
}
class MaterialsSettings(SubSettingsBase):
def __init__(self):
self.skip_material_loading = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Disable Material Loading", "/app/renderer/skipMaterialLoading"
)
self.max_mip_count = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Textures: Mipmap Levels to Load",
"/rtx-transient/resourcemanager/maxMipCount",
range_from=2,
range_to=15,
)
self.compression_mip_size_threshold = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Textures: Compression Mipmap Size Threshold (0 to disable) ",
"/rtx-transient/resourcemanager/compressionMipSizeThreshold",
0,
8192,
)
self.enable_texture_streaming = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Textures: on-demand streaming (toggling requires scene reload)",
"/rtx-transient/resourcemanager/enableTextureStreaming",
)
self.memory_budget = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Texture streaming memory budget (fraction of GPU memory)",
"/rtx-transient/resourcemanager/texturestreaming/memoryBudget",
0.01,
1,
)
self.animation_time = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "MDL Animation Time Override", "/rtx/animationTime")
self.animation_time_use_wallclock = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "MDL Animation Time Use Wallclock", "/rtx/animationTimeUseWallclock"
)
@property
def settings(self):
return {
"/app/renderer/skipMaterialLoading": self.skip_material_loading,
"/rtx-transient/resourcemanager/maxMipCount": self.max_mip_count,
"/rtx-transient/resourcemanager/compressionMipSizeThreshold": self.compression_mip_size_threshold,
"/rtx-transient/resourcemanager/enableTextureStreaming": self.enable_texture_streaming,
"/rtx-transient/resourcemanager/texturestreaming/memoryBudget": self.memory_budget,
"/rtx/animationTime": self.animation_time,
"/rtx/animationTimeUseWallclock": self.animation_time_use_wallclock,
}
class LightingSettings(SubSettingsBase):
def __init__(self):
# Basic light settings.
show_lights_settings = {"Per-Light Enable": 0, "Force Enable": 1, "Force Disable": 2}
self.show_lights = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Show Area Lights In Primary Rays",
"/rtx/raytracing/showLights",
range_dict=show_lights_settings,
)
self.shadow_bias = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Shadow Bias", "/rtx/raytracing/shadowBias", range_from=0.0, range_to=5.0
)
self.skip_most_lights = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Use First Distant Light & First Dome Light Only", "/rtx/scenedb/skipMostLights"
)
# Demo light.
dome_lighting_sampling_type = {
"Upper & Lower Hemisphere": 0,
# "Upper Visible & Sampled, Lower Is Black": 1,
"Upper Hemisphere Visible & Sampled, Lower Is Only Visible": 2,
"Use As Env Map": 3,
}
self.upper_lower_strategy = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Hemisphere Sampling",
"/rtx/domeLight/upperLowerStrategy",
range_dict=dome_lighting_sampling_type,
)
dome_texture_resolution_items = {
"16": 16,
"32": 32,
"64": 64,
"128": 128,
"256": 256,
"512": 512,
"1024": 1024,
"2048": 2048,
"4096": 4096,
"8192": 8192,
}
self.baking_resolution = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Baking Resolution",
"/rtx/domeLight/baking/resolution",
range_dict=dome_texture_resolution_items,
)
self.resolution_factor = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Dome Light Texture Resolution Factor",
"/rtx/domeLight/resolutionFactor",
range_from=0.01,
range_to=4.0,
)
self.baking_spp = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Dome Light Material Baking SPP",
"/rtx/domeLight/baking/spp",
range_from=1,
range_to=32,
)
@property
def settings(self):
return {
"/rtx/raytracing/showLights": self.show_lights,
"/rtx/raytracing/shadowBias": self.shadow_bias,
"/rtx/scenedb/skipMostLights": self.skip_most_lights,
"/rtx/domeLight/upperLowerStrategy": self.upper_lower_strategy,
"/rtx/domeLight/baking/resolution": self.baking_resolution,
"/rtx/domeLight/resolutionFactor": self.resolution_factor,
"/rtx/domeLight/baking/spp": self.baking_spp,
}
class SimpleFogSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.fog_color = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Color", "/rtx/fog/fogColor")
self.fog_color_intensity = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Intensity", "/rtx/fog/fogColorIntensity", range_from=1, range_to=1000000
)
self.fog_z_up_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Height-based Fog - Use +Z Axis", "/rtx/fog/fogZup/enabled"
)
self.fog_start_height = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Height-based Fog - Plane Height",
"/rtx/fog/fogStartHeight",
range_from=-1000000,
range_to=1000000,
)
self.fog_height_density = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Height Density", "/rtx/fog/fogHeightDensity", range_from=0, range_to=1
)
self.fog_height_falloff = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Height Falloff", "/rtx/fog/fogHeightFalloff", range_from=0, range_to=1000
)
self.fog_distance_density = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Distance Density", "/rtx/fog/fogDistanceDensity", range_from=0, range_to=1
)
self.fog_start_dist = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Start Distance to Camera", "/rtx/fog/fogStartDist", range_from=0, range_to=1000000
)
self.fog_end_dist = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "End Distance to Camera", "/rtx/fog/fogEndDist", range_from=0, range_to=1000000
)
@property
def settings(self):
return {
"/rtx/fog/fogColor": self.fog_color,
"/rtx/fog/fogColorIntensity": self.fog_color_intensity,
"/rtx/fog/fogZup/enabled": self.fog_z_up_enabled,
"/rtx/fog/fogStartHeight": self.fog_height_density,
"/rtx/fog/fogHeightDensity": self.fog_height_density,
"/rtx/fog/fogHeightFalloff": self.fog_height_falloff,
"/rtx/fog/fogDistanceDensity": self.fog_distance_density,
"/rtx/fog/fogStartDist": self.fog_start_dist,
"/rtx/fog/fogEndDist": self.fog_end_dist,
}
@property
def enabled_setting_path(self):
return "/rtx/fog/enabled"
class FlowSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.ray_traced_shadows_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Flow in Real-Time Ray Traced Shadows", "/rtx/flow/rayTracedShadowsEnabled"
)
self.ray_traced_reflections_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Flow in Real-Time Ray Traced Reflections", "/rtx/flow/rayTracedReflectionsEnabled"
)
self.path_tracing_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Flow in Path-Traced Mode", "/rtx/flow/pathTracingEnabled"
)
self.path_tracing_shadows_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Flow in Path-Traced Mode Shadows", "/rtx/flow/pathTracingShadowsEnabled"
)
self.composite_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Composite with Flow Library Renderer", "/rtx/flow/compositeEnabled"
)
self.use_flow_library_self_shadow = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Use Flow Library Self Shadow", "/rtx/flow/useFlowLibrarySelfShadow"
)
self.max_blocks = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.INT, "Max Blocks", "/rtx/flow/maxBlocks")
@property
def settings(self):
return {
"/rtx/flow/rayTracedShadowsEnabled": self.ray_traced_shadows_enabled,
"/rtx/flow/rayTracedReflectionsEnabled": self.ray_traced_reflections_enabled,
"/rtx/flow/pathTracingEnabled": self.path_tracing_enabled,
"/rtx/flow/pathTracingShadowsEnabled": self.path_tracing_shadows_enabled,
"/rtx/flow/compositeEnabled": self.composite_enabled,
"/rtx/flow/useFlowLibrarySelfShadow": self.use_flow_library_self_shadow,
"/rtx/flow/maxBlocks": self.max_blocks,
}
@property
def enabled_setting_path(self):
return "/rtx/flow/enabled"
class DebugViewSettings(SubSettingsBase):
def __init__(self):
debug_view_items = {
"Off": "",
"Beauty Before Tonemap": "beautyPreTonemap",
"Beauty After Tonemap": "beautyPostTonemap",
"Timing Heat Map": "TimingHeatMap",
"Depth": "depth",
"World Position": "worldPosition",
"Wireframe": "wire",
"Barycentrics": "barycentrics",
"Texture Coordinates 0": "texcoord0",
"Tangent U": "tangentu",
"Tangent V": "tangentv",
"Interpolated Normal": "normal",
"Triangle Normal": "triangleNormal",
"Material Normal": "materialGeometryNormal",
"Instance ID": "instanceId",
"3D Motion Vectors": "targetMotion",
"Shadow (last light)": "shadow",
"Diffuse Reflectance": "diffuseReflectance",
"Specular Reflectance": "reflectance",
"Roughness": "roughness",
"Ambient Occlusion": "ao",
"Reflections": "reflections",
"Reflections 3D Motion Vectors": "reflectionsMotion",
"Translucency": "translucency",
"Radiance": "radiance",
"Diffuse GI": "indirectDiffuse",
"Caustics": "caustics",
"PT Noisy Result": "pathTracerNoisy",
"PT Denoised Result": "pathTracerDenoised",
"RT Noisy Sampled Lighting": "rtNoisySampledLighting",
"RT Denoised Sampled Lighting": "rtDenoisedSampledLighting",
"Developer Debug Texture": "developerDebug",
"Noisy Dome Light": "noisyDomeLightingTex",
"Denoised Dome Light": "denoisedDomeLightingTex",
"RT Noisy Sampled Lighting Diffuse": "rtNoisySampledLightingDiffuse", # ReLAX Only
"RT Noisy Sampled Lighting Specular": "rtNoisySampledLightingSpecular", # ReLAX Only
"RT Denoised Sampled Lighting Diffuse": "rtDenoiseSampledLightingDiffuse", # ReLAX Only
"RT Denoised Sampled Lighting Specular": "rtDenoiseSampledLightingSpecular", # ReLAX Only
"Triangle Normal (OctEnc)": "triangleNormalOctEnc", # ReLAX Only
"Material Normal (OctEnc)": "materialGeometryNormalOctEnc", # ReLAX Only
# Targets not using RT accumulation
"RT Noisy Sampled Lighting (Not Accumulated)": "rtNoisySampledLightingNonAccum",
"RT Noisy Sampled Lighting Diffuse (Not Accumulated)": "sampledLightingDiffuseNonAccum",
"RT Noisy Sampled Lighting Specular (Not Accumulated)": "sampledLightingSpecularNonAccum",
"Reflections (Not Accumulated)": "reflectionsNonAccum",
"Diffuse GI (Not Accumulated)": "indirectDiffuseNonAccum",
}
self.target = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Render Target", "/rtx/debugView/target", range_dict=debug_view_items
)
self.scaling = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Output Value Scaling",
"/rtx/debugView/scaling",
range_from=-1000000,
range_to=1000000,
)
@property
def settings(self):
return {
"/rtx/debugView/target": self.target,
"/rtx/debugView/scaling": self.scaling,
}
| 18,494 | Python | 43.352518 | 150 | 0.621229 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/real_time_settings.py | import omnigibson.lazy as lazy
from omnigibson.renderer_settings.settings_base import SettingItem, SettingsBase, SubSettingsBase
class RealTimeSettings(SettingsBase):
"""
Real-Time setting group that handles a variety of sub-settings, including:
- Eco Mode
- Anti Aliasing
- Direct Lighting
- Reflections
- Translucency
- Global Volumetric Effects
- Caustics
- Indirect Diffuse Lighting
- RTMulti GPU (if multiple GPUs available)
"""
def __init__(self):
self.eco_mode_settings = EcoModeSettings()
self.anti_aliasing_settings = AntiAliasingSettings()
self.direct_lighting_settings = DirectLightingSettings()
self.reflections_settings = ReflectionsSettings()
self.translucency_settings = TranslucencySettings()
self.global_volumetric_effects_settings = GlobalVolumetricEffectsSettings()
self.caustics_settings = CausticsSettings()
self.indirect_diffuse_lighting_settings = IndirectDiffuseLightingSettings()
gpu_count = lazy.carb.settings.get_settings().get("/renderer/multiGpu/currentGpuCount")
if gpu_count and gpu_count > 1:
self.rt_multi_gpu_settings = RTMultiGPUSettings()
@property
def settings(self):
settings = {}
settings.update(self.eco_mode_settings.settings)
settings.update(self.anti_aliasing_settings.settings)
settings.update(self.direct_lighting_settings.settings)
settings.update(self.reflections_settings.settings)
settings.update(self.translucency_settings.settings)
settings.update(self.global_volumetric_effects_settings.settings)
settings.update(self.caustics_settings.settings)
settings.update(self.indirect_diffuse_lighting_settings.settings)
gpu_count = lazy.carb.settings.get_settings().get("/renderer/multiGpu/currentGpuCount")
if gpu_count and gpu_count > 1:
settings.update(self.rt_multi_gpu_settings.settings)
return settings
class EcoModeSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.max_frames_without_change = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Stop Rendering After This Many Frames Without Changes",
"/rtx/ecoMode/maxFramesWithoutChange",
range_from=0,
range_to=100,
)
@property
def settings(self):
return {
"/rtx/ecoMode/maxFramesWithoutChange": self.max_frames_without_change,
}
@property
def enabled_setting_path(self):
return "/rtx/ecoMode/enabled"
class AntiAliasingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
antialiasing_ops = ["Off", "TAA", "FXAA"]
if self._carb_settings.get("/ngx/enabled") is True:
antialiasing_ops.append("DLSS")
antialiasing_ops.append("RTXAA")
self.algorithm = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.STRING, "Algorithm", "/rtx/post/aa/op", antialiasing_ops)
# antialiasing_op_idx == 1
# TAA
self.static_ratio = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Static scaling", "/rtx/post/scaling/staticRatio", range_from=0.33, range_to=1
)
self.samples = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "TAA Samples", "/rtx/post/taa/samples", range_from=1, range_to=16
)
self.alpha = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "TAA history scale", "/rtx/post/taa/alpha", range_from=0, range_to=1
)
# antialiasing_op_idx == 2
# FXAA
self.quality_sub_pix = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Subpixel Quality", "/rtx/post/fxaa/qualitySubPix", range_from=0.0, range_to=1.0
)
self.quality_edge_threshold = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Edge Threshold",
"/rtx/post/fxaa/qualityEdgeThreshold",
range_from=0.0,
range_to=1.0,
)
self.quality_edge_threshold_min = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Edge Threshold Min",
"/rtx/post/fxaa/qualityEdgeThresholdMin",
range_from=0.0,
range_to=1.0,
)
# antialiasing_op_idx == 3 or antialiasing_op_idx == 4
# DLSS and RTXAA
# if antialiasing_op_idx == 3
dlss_opts = ["Performance", "Balanced", "Quality"]
self.exec_mode = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.STRING, "Execution mode", "/rtx/post/dlss/execMode", dlss_opts)
self.sharpness = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Sharpness", "/rtx/post/aa/sharpness", range_from=0.0, range_to=1.0
)
exposure_ops = ["Force self evaluated", "PostProcess Autoexposure", "Fixed"]
self.auto_exposure_mode = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Exposure mode", "/rtx/post/aa/autoExposureMode", exposure_ops
)
# auto_exposure_idx = self._carb_settings.get("/rtx/post/aa/autoExposureMode")
# if auto_exposure_idx == 1
self.exposure_multiplier = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Auto Exposure Multiplier",
"/rtx/post/aa/exposureMultiplier",
range_from=0.00001,
range_to=10.0,
)
# if auto_exposure_idx == 2
self.exposure = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Fixed Exposure Value", "/rtx/post/aa/exposure", range_from=0.00001, range_to=1.0
)
@property
def settings(self):
settings = {
"/rtx/post/aa/op": self.algorithm,
}
antialiasing_op_idx = self._carb_settings.get("/rtx/post/aa/op")
if antialiasing_op_idx == 1:
# TAA
settings.update(
{
"/rtx/post/scaling/staticRatio": self.static_ratio,
"/rtx/post/taa/samples": self.samples,
"/rtx/post/taa/alpha": self.alpha,
}
)
elif antialiasing_op_idx == 2:
# FXAA
settings.update(
{
"/rtx/post/fxaa/qualitySubPix": self.quality_sub_pix,
"/rtx/post/fxaa/qualityEdgeThreshold": self.quality_edge_threshold,
"/rtx/post/fxaa/qualityEdgeThresholdMin": self.quality_edge_threshold_min,
}
)
elif antialiasing_op_idx == 3 or antialiasing_op_idx == 4:
# DLSS and RTXAA
if antialiasing_op_idx == 3:
settings.update(
{"/rtx/post/dlss/execMode": self.exec_mode,}
)
settings.update(
{"/rtx/post/aa/sharpness": self.sharpness, "/rtx/post/aa/autoExposureMode": self.auto_exposure_mode,}
)
auto_exposure_idx = self._carb_settings.get("/rtx/post/aa/autoExposureMode")
if auto_exposure_idx == 1:
settings.update(
{"/rtx/post/aa/exposureMultiplier": self.exposure_multiplier,}
)
elif auto_exposure_idx == 2:
settings.update(
{"/rtx/post/aa/exposure": self.exposure,}
)
return settings
class DirectLightingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.shadows_enabled = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Shadows", "/rtx/shadows/enabled")
self.sampled_lighting_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Sampled Direct Lighting", "/rtx/directLighting/sampledLighting/enabled"
)
self.sampled_lighting_auto_enable = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Auto-enable Sampled Lighting Above Light Count Threshold",
"/rtx/directLighting/sampledLighting/autoEnable",
)
self.sampled_lighting_auto_enable_light_count_threshold = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Auto-enable Sampled Lighting: Light Count Threshold",
"/rtx/directLighting/sampledLighting/autoEnableLightCountThreshold",
)
# if not self._settings.get("/rtx/directLighting/sampledLighting/enabled"
self.shadows_sample_count = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Shadow Samples per Pixel", "/rtx/shadows/sampleCount", range_from=1, range_to=16
)
self.shadows_denoiser_quarter_res = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Lower Resolution Shadows Denoiser", "/rtx/shadows/denoiser/quarterRes"
)
self.dome_light_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Dome Lighting", "/rtx/directLighting/domeLight/enabled"
)
self.dome_light_sample_count = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Dome Light Samples per Pixel",
"/rtx/directLighting/domeLight/sampleCount",
range_from=0,
range_to=32,
)
self.dome_light_enabled_in_reflections = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Dome Lighting in Reflections", "/rtx/directLighting/domeLight/enabledInReflections"
)
# if self._settings.get("/rtx/directLighting/sampledLighting/enabled")
sampled_lighting_spp_items = {"1": 1, "2": 2, "4": 4, "8": 8}
self.samples_per_pixel = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Samples per Pixel",
"/rtx/directLighting/sampledLighting/samplesPerPixel",
range_dict=sampled_lighting_spp_items,
)
self.clamp_samples_per_pixel_to_number_of_lights = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Clamp Sample Count to Light Count",
"/rtx/directLighting/sampledLighting/clampSamplesPerPixelToNumberOfLights",
)
self.reflections_samples_per_pixel = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Reflections: Light Samples per Pixel",
"/rtx/reflections/sampledLighting/samplesPerPixel",
range_dict=sampled_lighting_spp_items,
)
self.reflections_clamp_samples_per_pixel_to_number_of_lights = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Reflections: Clamp Sample Count to Light Count",
"/rtx/reflections/sampledLighting/clampSamplesPerPixelToNumberOfLights",
)
self.max_ray_intensity = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Max Ray Intensity",
"/rtx/directLighting/sampledLighting/maxRayIntensity",
range_from=0.0,
range_to=1000000,
)
self.reflections_max_ray_intensity = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Reflections: Max Ray Intensity",
"/rtx/reflections/sampledLighting/maxRayIntensity",
range_from=0.0,
range_to=1000000,
)
self.enabled_in_reflections = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Dome Lighting in Reflections", "/rtx/directLighting/domeLight/enabledInReflections"
)
firefly_filter_types = {"None": "None", "Median": "Cross-Bilateral Median", "RCRS": "Cross-Bilateral RCRS"}
self.firefly_suppression_type = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Firefly Filter",
"/rtx/lightspeed/ReLAX/fireflySuppressionType",
range_dict=firefly_filter_types,
)
self.history_clamping_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "History Clamping", "/rtx/lightspeed/ReLAX/historyClampingEnabled"
)
self.denoiser_iterations = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Denoiser Iterations",
"/rtx/lightspeed/ReLAX/aTrousIterations",
range_from=1,
range_to=10,
)
self.diffuse_backscattering_enabled = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.BOOL,
"Enable Extended Diffuse Backscattering",
"/rtx/directLighting/diffuseBackscattering/enabled",
)
self.shadow_offset = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Shadow Ray Offset",
"/rtx/directLighting/diffuseBackscattering/shadowOffset",
range_from=0.1,
range_to=1000,
)
self.extinction = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Extinction",
"/rtx/directLighting/diffuseBackscattering/extinction",
range_from=0.001,
range_to=100,
)
@property
def settings(self):
settings = {
"/rtx/shadows/enabled": self.shadows_enabled,
"/rtx/directLighting/sampledLighting/enabled": self.sampled_lighting_enabled,
"/rtx/directLighting/sampledLighting/autoEnable": self.sampled_lighting_auto_enable,
"/rtx/directLighting/sampledLighting/autoEnableLightCountThreshold": self.sampled_lighting_auto_enable_light_count_threshold,
}
if not self._carb_settings.get("/rtx/directLighting/sampledLighting/enabled"):
settings.update(
{
"/rtx/shadows/sampleCount": self.shadows_sample_count,
"/rtx/shadows/denoiser/quarterRes": self.shadows_denoiser_quarter_res,
"/rtx/directLighting/domeLight/enabled": self.dome_light_enabled,
"/rtx/directLighting/domeLight/sampleCount": self.dome_light_sample_count,
"/rtx/directLighting/domeLight/enabledInReflections": self.dome_light_enabled_in_reflections,
}
)
else:
settings.update(
{
"/rtx/directLighting/sampledLighting/samplesPerPixel": self.samples_per_pixel,
"/rtx/directLighting/sampledLighting/clampSamplesPerPixelToNumberOfLights": self.clamp_samples_per_pixel_to_number_of_lights,
"/rtx/reflections/sampledLighting/samplesPerPixel": self.reflections_samples_per_pixel,
"/rtx/reflections/sampledLighting/clampSamplesPerPixelToNumberOfLights": self.reflections_clamp_samples_per_pixel_to_number_of_lights,
"/rtx/directLighting/sampledLighting/maxRayIntensity": self.max_ray_intensity,
"/rtx/reflections/sampledLighting/maxRayIntensity": self.reflections_max_ray_intensity,
"/rtx/directLighting/domeLight/enabledInReflections": self.enabled_in_reflections,
"/rtx/lightspeed/ReLAX/fireflySuppressionType": self.firefly_suppression_type,
"/rtx/lightspeed/ReLAX/historyClampingEnabled": self.history_clamping_enabled,
"/rtx/lightspeed/ReLAX/aTrousIterations": self.denoiser_iterations,
"/rtx/directLighting/diffuseBackscattering/enabled": self.diffuse_backscattering_enabled,
"/rtx/directLighting/diffuseBackscattering/shadowOffset": self.shadow_offset,
"/rtx/directLighting/diffuseBackscattering/extinction": self.extinction,
}
)
return settings
@property
def enabled_setting_path(self):
return "/rtx/directLighting/enabled"
class ReflectionsSettings(SettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.max_roughness = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Max Roughness", "/rtx/reflections/maxRoughness", range_from=0.0, range_to=1.0
)
self.max_reflection_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Reflection Bounces",
"/rtx/reflections/maxReflectionBounces",
range_from=0,
range_to=100,
)
@property
def settings(self):
return {
"/rtx/reflections/maxRoughness": self.max_roughness,
"/rtx/reflections/maxReflectionBounces": self.max_reflection_bounces,
}
@property
def enabled_setting_path(self):
return "/rtx/reflections/enabled"
class TranslucencySettings(SettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.max_refraction_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Refraction Bounces",
"/rtx/translucency/maxRefractionBounces",
range_from=0,
range_to=100,
)
self.reflection_cutoff = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Secondary Bounce Roughness Cutoff",
"/rtx/translucency/reflectionCutoff",
range_from=0.0,
range_to=1.0,
)
self.fractional_cutou_opacity = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Fractional Cutout Opacity", "/rtx/raytracing/fractionalCutoutOpacity"
)
self.virtual_depth = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Depth Correction for DoF", "/rtx/translucency/virtualDepth"
)
@property
def settings(self):
return {
"/rtx/translucency/maxRefractionBounces": self.max_refraction_bounces,
"/rtx/translucency/reflectionCutoff": self.reflection_cutoff,
"/rtx/raytracing/fractionalCutoutOpacity": self.reflection_cutoff,
"/rtx/translucency/virtualDepth": self.virtual_depth,
}
@property
def enabled_setting_path(self):
return "/rtx/translucency/enabled"
class GlobalVolumetricEffectsSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.max_accumulation_frames = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Accumulation Frames",
"/rtx/raytracing/inscattering/maxAccumulationFrames",
range_from=1,
range_to=255,
)
self.depth_slices = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"# Depth Slices",
"/rtx/raytracing/inscattering/depthSlices",
range_from=16,
range_to=1024,
)
self.pixel_ratio = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Pixel Density", "/rtx/raytracing/inscattering/pixelRatio", range_from=4, range_to=64
)
self.max_distance = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Max inscattering Distance",
"/rtx/raytracing/inscattering/maxDistance",
range_from=10,
range_to=100000,
)
self.atmosphere_height = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Atmosphere Height",
"/rtx/raytracing/inscattering/atmosphereHeight",
range_from=-100000,
range_to=100000,
)
self.transmittance_color = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Transmittance Color", "/rtx/raytracing/inscattering/transmittanceColor"
)
self.transmittance_measurement_distance = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Transmittance Measurment Distance",
"/rtx/raytracing/inscattering/transmittanceMeasurementDistance",
range_from=0.0001,
range_to=1000000,
)
self.single_scattering_albedo = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Single Scattering Albedo", "/rtx/raytracing/inscattering/singleScatteringAlbedo",
)
self.anisotropy_factor = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Anisotropy Factor",
"/rtx/raytracing/inscattering/anisotropyFactor",
range_from=-0.999,
range_to=0.999,
)
self.slice_distribution_exponent = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Slice Distribution Exponent",
"/rtx/raytracing/inscattering/sliceDistributionExponent",
range_from=1,
range_to=16,
)
self.blur_sigma = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Inscatter Blur Sigma",
"/rtx/raytracing/inscattering/blurSigma",
0.0,
range_to=10.0,
)
self.dithering_scale = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Inscatter Dithering Scale",
"/rtx/raytracing/inscattering/ditheringScale",
range_from=0,
range_to=100,
)
self.spatial_jitter_scale = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Spatial Sample Jittering Scale",
"/rtx/raytracing/inscattering/spatialJitterScale",
range_from=0.0,
range_to=1,
)
self.temporal_jitter_scale = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Temporal Reprojection Jittering Scale",
"/rtx/raytracing/inscattering/temporalJitterScale",
range_from=0.0,
range_to=1,
)
self.use_detail_noise = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Apply Density Noise", "/rtx/raytracing/inscattering/useDetailNoise"
)
self.detail_noise_scale = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Density Noise World Scale",
"/rtx/raytracing/inscattering/detailNoiseScale",
range_from=0.0,
range_to=1,
)
self.noise_animation_speed_x = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Density Noise Animation Speed X",
"/rtx/raytracing/inscattering/noiseAnimationSpeedX",
range_from=-1.0,
range_to=1.0,
)
self.noise_animation_speed_y = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Density Noise Animation Speed Y",
"/rtx/raytracing/inscattering/noiseAnimationSpeedY",
range_from=-1.0,
range_to=1.0,
)
self.noise_animation_speed_z = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Density Noise Animation Speed Z",
"/rtx/raytracing/inscattering/noiseAnimationSpeedZ",
range_from=-1.0,
range_to=1.0,
)
self.noise_scale_range_min = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Density Noise Scale Min",
"/rtx/raytracing/inscattering/noiseScaleRangeMin",
range_from=-1.0,
range_to=5.0,
)
self.noise_scale_range_max = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Density Noise Scale Max",
"/rtx/raytracing/inscattering/noiseScaleRangeMax",
range_from=-1.0,
range_to=5.0,
)
self.noise_num_octaves = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Density Noise Octave Count",
"/rtx/raytracing/inscattering/noiseNumOctaves",
range_from=1,
range_to=8,
)
self.use_32bit_precision = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Use 32-bit Precision", "/rtx/raytracing/inscattering/use32bitPrecision"
)
@property
def settings(self):
return {
"/rtx/raytracing/inscattering/maxAccumulationFrames": self.max_accumulation_frames,
"/rtx/raytracing/inscattering/depthSlices": self.depth_slices,
"/rtx/raytracing/inscattering/pixelRatio": self.pixel_ratio,
"/rtx/raytracing/inscattering/maxDistance": self.max_distance,
"/rtx/raytracing/inscattering/atmosphereHeight": self.atmosphere_height,
"/rtx/raytracing/inscattering/transmittanceColor": self.transmittance_color,
"/rtx/raytracing/inscattering/transmittanceMeasurementDistance": self.transmittance_measurement_distance,
"/rtx/raytracing/inscattering/singleScatteringAlbedo": self.single_scattering_albedo,
"/rtx/raytracing/inscattering/anisotropyFactor": self.anisotropy_factor,
"/rtx/raytracing/inscattering/sliceDistributionExponent": self.slice_distribution_exponent,
"/rtx/raytracing/inscattering/blurSigma": self.blur_sigma,
"/rtx/raytracing/inscattering/ditheringScale": self.dithering_scale,
"/rtx/raytracing/inscattering/spatialJitterScale": self.spatial_jitter_scale,
"/rtx/raytracing/inscattering/temporalJitterScale": self.temporal_jitter_scale,
"/rtx/raytracing/inscattering/useDetailNoise": self.use_detail_noise,
"/rtx/raytracing/inscattering/detailNoiseScale": self.detail_noise_scale,
"/rtx/raytracing/inscattering/noiseAnimationSpeedX": self.noise_animation_speed_x,
"/rtx/raytracing/inscattering/noiseAnimationSpeedY": self.noise_animation_speed_y,
"/rtx/raytracing/inscattering/noiseAnimationSpeedZ": self.noise_animation_speed_z,
"/rtx/raytracing/inscattering/noiseScaleRangeMin": self.noise_scale_range_min,
"/rtx/raytracing/inscattering/noiseScaleRangeMax": self.noise_scale_range_max,
"/rtx/raytracing/inscattering/noiseNumOctaves": self.noise_num_octaves,
"/rtx/raytracing/inscattering/use32bitPrecision": self.use_32bit_precision,
}
@property
def enabled_setting_path(self):
return "/rtx/raytracing/globalVolumetricEffects/enabled"
class CausticsSettings(SettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.photon_count_nultiplier = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Photon Count Multiplier",
"/rtx/raytracing/caustics/photonCountMultiplier",
range_from=1,
range_to=5000,
)
self.photon_max_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Photon Max Bounces",
"/rtx/raytracing/caustics/photonMaxBounces",
range_from=1,
range_to=20,
)
self.positio_phi = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Position Phi", "/rtx/raytracing/caustics/positionPhi", range_from=0.1, range_to=50
)
self.normal_phi = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Normal Phi", "/rtx/raytracing/caustics/normalPhi", range_from=0.3, range_to=1
)
self.filtering_iterations = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Filter Iterations",
"/rtx/raytracing/caustics/eawFilteringSteps",
range_from=0,
range_to=10,
)
@property
def settings(self):
return {
"/rtx/raytracing/caustics/photonCountMultiplier": self.photon_count_nultiplier,
"/rtx/raytracing/caustics/photonMaxBounces": self.photon_max_bounces,
"/rtx/raytracing/caustics/positionPhi": self.positio_phi,
"/rtx/raytracing/caustics/normalPhi": self.normal_phi,
"/rtx/raytracing/caustics/eawFilteringSteps": self.filtering_iterations,
}
@property
def enabled_setting_path(self):
return "/rtx/caustics/enabled"
class IndirectDiffuseLightingSettings(SettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.ambient_light_color = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Ambient Light Color", "/rtx/sceneDb/ambientLightColor"
)
self.ambient_light_intensity = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Ambient Light Intensity",
"/rtx/sceneDb/ambientLightIntensity",
range_from=0.0,
range_to=10.0,
)
self.ambient_occlusion_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Ambient Occlusion (AO)", "/rtx/ambientOcclusion/enabled"
)
# if self._carb_settings.get("/rtx/ambientOcclusion/enabled")
self.ray_length_in_cm = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"AO: Ray Length (cm)",
"/rtx/ambientOcclusion/rayLengthInCm",
range_from=0.0,
range_to=2000.0,
)
self.min_samples = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"AO: Minimum Samples per Pixel",
"/rtx/ambientOcclusion/minSamples",
range_from=1,
range_to=16,
)
self.max_samples = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"AO: Maximum Samples per Pixel",
"/rtx/ambientOcclusion/maxSamples",
range_from=1,
range_to=16,
)
self.aggressive_denoising = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "AO: Aggressive denoising", "/rtx/ambientOcclusion/aggressiveDenoising"
)
self.indirect_diffuse_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Indirect Diffuse GI", "/rtx/indirectDiffuse/enabled"
)
# if self._carb_settings.get("/rtx/indirectDiffuse/enabled")
gi_denoising_techniques_ops = ["NVRTD", "NRD:Reblur"]
self.fetch_sample_count = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Samples per Pixel",
"/rtx/indirectDiffuse/fetchSampleCount",
range_from=0,
range_to=4,
)
self.max_bounces = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Max Bounces", "/rtx/indirectDiffuse/maxBounces", range_from=0, range_to=16
)
self.scaling_factor = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Intensity", "/rtx/indirectDiffuse/scalingFactor", range_from=0.0, range_to=20.0
)
self.denoiser_method = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Denoising technique",
"/rtx/indirectDiffuse/denoiser/method",
range_list=gi_denoising_techniques_ops,
)
# if enabled and self._carb_settings.get("/rtx/indirectDiffuse/denoiser/method") == 0:
self.kernel_radius = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Kernel radius",
"/rtx/indirectDiffuse/denoiser/kernelRadius",
range_from=1,
range_to=64,
)
self.iterations = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Iteration count",
"/rtx/indirectDiffuse/denoiser/iterations",
range_from=1,
range_to=10,
)
self.max_history = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max History Length",
"/rtx/indirectDiffuse/denoiser/temporal/maxHistory",
range_from=1,
range_to=100,
)
# if enabled and self._carb_settings.get("/rtx/indirectDiffuse/denoiser/method") == 1:
self.max_accumulated_frame_num = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Frames in History",
"/rtx/lightspeed/NRD_ReblurDiffuse/maxAccumulatedFrameNum",
range_from=0,
range_to=63,
)
self.max_fast_accumulated_frame_num = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Frames in Fast History",
"/rtx/lightspeed/NRD_ReblurDiffuse/maxFastAccumulatedFrameNum",
range_from=0,
range_to=63,
)
self.plane_distance_sensitivity = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Plane Distance Sensitivity",
"/rtx/lightspeed/NRD_ReblurDiffuse/planeDistanceSensitivity",
range_from=0,
range_to=1,
)
self.blur_radius = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Blur Radius",
"/rtx/lightspeed/NRD_ReblurDiffuse/blurRadius",
range_from=0,
range_to=100,
)
@property
def settings(self):
settings = {
"/rtx/sceneDb/ambientLightColor": self.ambient_light_color,
"/rtx/sceneDb/ambientLightIntensity": self.ambient_light_intensity,
"/rtx/ambientOcclusion/enabled": self.ambient_occlusion_enabled,
"/rtx/indirectDiffuse/enabled": self.indirect_diffuse_enabled,
}
if self._carb_settings.get("/rtx/ambientOcclusion/enabled"):
settings.update(
{
"/rtx/ambientOcclusion/rayLengthInCm": self.ray_length_in_cm,
"/rtx/ambientOcclusion/minSamples": self.min_samples,
"/rtx/ambientOcclusion/maxSamples": self.max_samples,
"/rtx/ambientOcclusion/aggressiveDenoising": self.aggressive_denoising,
}
)
if self._carb_settings.get("/rtx/indirectDiffuse/enabled"):
settings.update(
{
"/rtx/indirectDiffuse/fetchSampleCount": self.max_bounces,
"/rtx/indirectDiffuse/maxBounces": self.ambient_light_color,
"/rtx/indirectDiffuse/scalingFactor": self.scaling_factor,
"/rtx/indirectDiffuse/denoiser/method": self.denoiser_method,
}
)
if self._carb_settings.get("/rtx/indirectDiffuse/denoiser/method") == 0:
settings.update(
{
"/rtx/indirectDiffuse/denoiser/kernelRadius": self.kernel_radius,
"/rtx/indirectDiffuse/denoiser/iterations": self.iterations,
"/rtx/indirectDiffuse/denoiser/temporal/maxHistory": self.max_history,
}
)
elif self._carb_settings.get("/rtx/indirectDiffuse/denoiser/method") == 1:
settings.update(
{
"/rtx/lightspeed/NRD_ReblurDiffuse/maxAccumulatedFrameNum": self.max_accumulated_frame_num,
"/rtx/lightspeed/NRD_ReblurDiffuse/maxFastAccumulatedFrameNum": self.max_fast_accumulated_frame_num,
"/rtx/lightspeed/NRD_ReblurDiffuse/planeDistanceSensitivity": self.plane_distance_sensitivity,
"/rtx/lightspeed/NRD_ReblurDiffuse/blurRadius": self.blur_radius,
}
)
return settings
class RTMultiGPUSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
currentGpuCount = self._carb_settings.get("/renderer/multiGpu/currentGpuCount")
self.tile_count = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Tile Count", "/rtx/realtime/mgpu/tileCount", range_from=2, range_to=currentGpuCount
)
self.master_post_processOnly = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "GPU 0 Post Process Only", "/rtx/realtime/mgpu/masterPostProcessOnly"
)
self.tile_overlap = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Tile Overlap (Pixels)", "/rtx/realtime/mgpu/tileOverlap", range_from=0, range_to=256
)
self.tile_overlap_blend_fraction = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Fraction of Overlap Pixels to Blend",
"/rtx/realtime/mgpu/tileOverlapBlendFraction",
range_from=0.0,
range_to=1.0,
)
@property
def settings(self):
return {
"/rtx/realtime/mgpu/tileCount": self.tile_count,
"/rtx/realtime/mgpu/masterPostProcessOnly": self.master_post_processOnly,
"/rtx/realtime/mgpu/tileOverlap": self.tile_overlap,
"/rtx/realtime/mgpu/tileOverlapBlendFraction": self.tile_overlap_blend_fraction,
}
@property
def enabled_setting_path(self):
return "/rtx/realtime/mgpu/enabled"
| 39,074 | Python | 42.320399 | 154 | 0.609203 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/__init__.py | from omnigibson.renderer_settings.renderer_settings import RendererSettings
| 76 | Python | 37.499981 | 75 | 0.894737 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/path_tracing_settings.py | import omnigibson.lazy as lazy
from omnigibson.renderer_settings.settings_base import SettingItem, SettingsBase, SubSettingsBase
class PathTracingSettings(SettingsBase):
"""
Path-Tracing setting group that handles a variety of sub-settings, including:
- Anti-Aliasing
- Firefly Filter
- Path-Tracing
- Sampling & Caching
- Denoising
- Path-Traced Fog
- Heterogeneous Volumes (Path Traced Volume)
- Multi GPU (if multiple GPUs available)
"""
def __init__(self):
self.anti_aliasing_settings = AntiAliasingSettings()
self.firefly_filter_settings = FireflyFilterSettings()
self.path_tracing_settings = PathTracingSettings()
self.sampling_and_caching_settings = SamplingAndCachingSettings()
self.denoising_settings = DenoisingSettings()
self.path_traced_fog_settings = PathTracedFogSettings()
self.path_traced_volume_settings = PathTracedVolumeSettings()
if lazy.carb.settings.get_settings().get("/renderer/multiGpu/currentGpuCount") > 1:
self.multi_gpu_settings = MultiGPUSettings()
@property
def settings(self):
settings = {}
settings.update(self.anti_aliasing_settings.settings)
settings.update(self.firefly_filter_settings.settings)
settings.update(self.path_tracing_settings.settings)
settings.update(self.sampling_and_caching_settings.settings)
settings.update(self.denoising_settings.settings)
settings.update(self.path_traced_fog_settings.settings)
settings.update(self.path_traced_volume_settings.settings)
if lazy.carb.settings.get_settings().get("/renderer/multiGpu/currentGpuCount") > 1:
settings.update(self.multi_gpu_settings.settings)
return settings
class AntiAliasingSettings(SubSettingsBase):
def __init__(self):
pt_aa_ops = ["Box", "Triangle", "Gaussian", "Uniform"]
self.sample_pattern = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.STRING, "Anti-Aliasing Sample Pattern", "/rtx/pathtracing/aa/op", pt_aa_ops
)
self.filter_radius = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Anti-Aliasing Radius",
"/rtx/pathtracing/aa/filterRadius",
range_from=0.0001,
range_to=5.0,
)
@property
def settings(self):
return {
"/rtx/pathtracing/aa/op": self.sample_pattern,
"/rtx/pathtracing/aa/filterRadius": self.filter_radius,
}
class FireflyFilterSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.max_intensity_per_sample = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Max Ray Intensity Glossy",
"/rtx/pathtracing/fireflyFilter/maxIntensityPerSample",
range_from=0,
range_to=100000,
)
self.max_intensityper_sample_diffuse = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Max Ray Intensity Diffuse",
"/rtx/pathtracing/fireflyFilter/maxIntensityPerSampleDiffuse",
range_from=0,
range_to=100000,
)
@property
def settings(self):
return {
"/rtx/pathtracing/fireflyFilter/maxIntensityPerSample": self.max_intensity_per_sample,
"/rtx/pathtracing/fireflyFilter/maxIntensityPerSampleDiffuse": self.max_intensityper_sample_diffuse,
}
@property
def enabled_setting_path(self):
return "/rtx/pathtracing/fireflyFilter/enabled"
class PathTracingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.pathtracing_max_bounces = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.INT, "Max Bounces", "/rtx/pathtracing/maxBounces", range_from=0, range_to=64
)
self.max_specular_and_transmission_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Specular and Transmission Bounces",
"/rtx/pathtracing/maxSpecularAndTransmissionBounces",
range_from=1,
range_to=128,
)
self.maxvolume_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max SSS Volume Scattering Bounces",
"/rtx/pathtracing/maxVolumeBounces",
range_from=0,
range_to=1024,
)
self.ptfog_max_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Fog Scattering Bounces",
"/rtx/pathtracing/ptfog/maxBounces",
range_from=1,
range_to=10,
)
self.ptvol_max_bounces = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Heterogeneous Volume Scattering Bounces",
"/rtx/pathtracing/ptvol/maxBounces",
range_from=0,
range_to=1024,
)
clamp_spp = self._carb_settings.get("/rtx/pathtracing/clampSpp")
if clamp_spp > 1: # better 0, but setting range = (1,1) completely disables the UI control range
self.spp = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Samples per Pixel per Frame (1 to {})".format(clamp_spp),
"/rtx/pathtracing/spp",
range_from=1,
range_to=clamp_spp,
)
else:
self.spp = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Samples per Pixel per Frame",
"/rtx/pathtracing/spp",
range_from=1,
range_to=1048576,
)
self.total_spp = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Total Samples per Pixel (0 = inf)",
"/rtx/pathtracing/totalSpp",
range_from=0,
range_to=1048576,
)
self.fractional_cutout_opacity = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Fractional Cutout Opacity", "/rtx/pathtracing/fractionalCutoutOpacity"
)
self.reset_pt_accum_on_anim_time_change = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Reset Accumulation on Time Change", "/rtx/resetPtAccumOnAnimTimeChange"
)
@property
def settings(self):
return {
"/rtx/pathtracing/maxBounces": self.pathtracing_max_bounces,
"/rtx/pathtracing/maxSpecularAndTransmissionBounces": self.max_specular_and_transmission_bounces,
"/rtx/pathtracing/maxVolumeBounces": self.maxvolume_bounces,
"/rtx/pathtracing/ptfog/maxBounces": self.ptfog_max_bounces,
"/rtx/pathtracing/ptvol/maxBounces": self.ptvol_max_bounces,
"/rtx/pathtracing/spp": self.spp,
"/rtx/pathtracing/totalSpp": self.total_spp,
"/rtx/pathtracing/fractionalCutoutOpacity": self.fractional_cutout_opacity,
"/rtx/resetPtAccumOnAnimTimeChange": self.reset_pt_accum_on_anim_time_change,
}
class SamplingAndCachingSettings(SubSettingsBase):
def __init__(self):
self.cached_enabled = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Caching", "/rtx/pathtracing/cached/enabled")
self.lightcache_cached_enabled = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Enable Many-Light Sampling", "/rtx/pathtracing/lightcache/cached/enabled"
)
@property
def settings(self):
return {
"/rtx/pathtracing/cached/enabled": self.cached_enabled,
"/rtx/pathtracing/lightcache/cached/enabled": self.lightcache_cached_enabled,
}
class DenoisingSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.blend_factor = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"OptiX Denoiser Blend Factor",
"/rtx/pathtracing/optixDenoiser/blendFactor",
range_from=0,
range_to=1,
)
@property
def settings(self):
return {
"/rtx/pathtracing/optixDenoiser/blendFactor": self.blend_factor,
}
@property
def enabled_setting_path(self):
return "/rtx/pathtracing/optixDenoiser/enabled"
class PathTracedFogSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.density = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Density", "/rtx/pathtracing/ptfog/density", range_from=0, range_to=1
)
self.height = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Height", "/rtx/pathtracing/ptfog/height", range_from=-10, range_to=1000
)
self.falloff = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Falloff", "/rtx/pathtracing/ptfog/falloff", range_from=0, range_to=100
)
self.color = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.COLOR3, "Color", "/rtx/pathtracing/ptfog/color", range_from=0, range_to=1
)
self.asymmetry = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.FLOAT,
"Asymmetry (g)",
"/rtx/pathtracing/ptfog/asymmetry",
range_from=-0.99,
range_to=0.99,
)
self.z_up = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Use +Z Axis for Height", "/rtx/pathtracing/ptfog/ZUp")
@property
def settings(self):
return {
"/rtx/pathtracing/ptfog/density": self.density,
"/rtx/pathtracing/ptfog/height": self.height,
"/rtx/pathtracing/ptfog/falloff": self.falloff,
"/rtx/pathtracing/ptfog/color": self.color,
"/rtx/pathtracing/ptfog/asymmetry": self.asymmetry,
"/rtx/pathtracing/ptfog/ZUp": self.z_up,
}
@property
def enabled_setting_path(self):
return "/rtx/pathtracing/ptfog/enabled"
class PathTracedVolumeSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
pt_vol_tr_ops = ["Biased Ray Marching", "Ratio Tracking", "Brute-force Ray Marching"]
self.transmittance_method = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.STRING,
"Transmittance Method",
"/rtx/pathtracing/ptvol/transmittanceMethod",
range_list=pt_vol_tr_ops,
)
self.max_collision_count = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Collision Count",
"/rtx/pathtracing/ptvol/maxCollisionCount",
range_from=0,
range_to=1024,
)
self.max_light_collision_count = SettingItem(
self,
lazy.omni.kit.widget.settings.SettingType.INT,
"Max Light Collision Count",
"/rtx/pathtracing/ptvol/maxLightCollisionCount",
range_from=0,
range_to=1024,
)
self.max_density = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "Max Density", "/rtx/pathtracing/ptvol/maxDensity", range_from=0, range_to=1000
)
self.fast_vdb = SettingItem(self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Fast VDB", "/rtx/pathtracing/ptvol/fastVdb")
# if self._carb_settings.get("/rtx/pathtracing/ptvol/fastVdb")
self.autoMajorant_vdb = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Fast VDB Auto majorant", "/rtx/pathtracing/ptvol/autoMajorantVdb"
)
@property
def settings(self):
settings = {
"/rtx/pathtracing/ptvol/transmittanceMethod": self.transmittance_method,
"/rtx/pathtracing/ptvol/maxCollisionCount": self.max_collision_count,
"/rtx/pathtracing/ptvol/maxLightCollisionCount": self.max_light_collision_count,
"/rtx/pathtracing/ptvol/maxDensity": self.max_density,
"/rtx/pathtracing/ptvol/fastVdb": self.fast_vdb,
}
if self._carb_settings.get("/rtx/pathtracing/ptvol/fastVdb"):
settings.update(
{"/rtx/pathtracing/ptvol/autoMajorantVdb": self.autoMajorant_vdb,}
)
return settings
@property
def enabled_setting_path(self):
return "/rtx/pathtracing/ptvol/enabled"
class MultiGPUSettings(SubSettingsBase):
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.weight_gpu0 = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.FLOAT, "GPU 0 Weight", "/rtx/pathtracing/mgpu/weightGpu0", range_from=0, range_to=1
)
self.compress_radiance = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Compress Radiance", "/rtx/pathtracing/mgpu/compressRadiance"
)
self.compress_albedo = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Compress Albedo", "/rtx/pathtracing/mgpu/compressAlbedo"
)
self.compress_normals = SettingItem(
self, lazy.omni.kit.widget.settings.SettingType.BOOL, "Compress Normals", "/rtx/pathtracing/mgpu/compressNormals"
)
@property
def settings(self):
return {
"/rtx/pathtracing/mgpu/weightGpu0": self.weight_gpu0,
"/rtx/pathtracing/mgpu/compressRadiance": self.compress_radiance,
"/rtx/pathtracing/mgpu/compressAlbedo": self.compress_albedo,
"/rtx/pathtracing/mgpu/compressNormals": self.compress_normals,
}
@property
def enabled_setting_path(self):
return "/rtx/pathtracing/mgpu/enabled"
| 14,389 | Python | 39.083565 | 148 | 0.623601 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/renderer_settings.py | import omnigibson.lazy as lazy
from omnigibson.renderer_settings.common_settings import CommonSettings
from omnigibson.renderer_settings.path_tracing_settings import PathTracingSettings
from omnigibson.renderer_settings.post_processing_settings import PostProcessingSettings
from omnigibson.renderer_settings.real_time_settings import RealTimeSettings
def singleton(cls):
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
@singleton
class RendererSettings:
"""
Controller for all renderer settings.
"""
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
self.common_settings = CommonSettings()
self.path_tracing_settings = PathTracingSettings()
self.post_processing_settings = PostProcessingSettings()
self.real_time_settings = RealTimeSettings()
def set_setting(self, path, value):
"""
Sets setting @path with value @value.
Args:
path (str): Path of the setting to set.
value (any): Value to set for for setting @path.
"""
if path not in self.settings:
raise NotImplementedError(f"Setting {path} is not supported.")
self.settings[path].set(value)
def reset_setting(self, path):
"""
Resets setting @path to default value.
Args:
path (str): Path of the setting to reset.
"""
if path not in self.settings:
raise NotImplementedError(f"Setting {path} is not supported.")
self.settings[path].reset()
def get_setting_from_path(self, path):
"""
Get the value of setting @path.
Args:
path (str): Path of the setting to get.
Returns:
any: Value of the requested setting @path.
"""
return self._carb_settings.get(path)
def get_current_renderer(self):
"""
Get the current renderer.
Args:
path (str): Path of the setting to get.
Returns:
str: the current renderer.
"""
return lazy.omni.rtx.window.settings.RendererSettingsFactory.get_current_renderer()
def set_current_renderer(self, renderer):
"""
Set the current renderer to @renderer.
Args:
renderer (str): The renderer to set as current (e.g. Real-Time, Path-Traced).
"""
assert (
renderer in lazy.omni.rtx.window.settings.RendererSettingsFactory.get_registered_renderers()
), f"renderer must be one of {lazy.omni.rtx.window.settings.RendererSettingsFactory.get_registered_renderers()}"
print(f"Set current renderer to {renderer}.")
lazy.omni.rtx.window.settings.RendererSettingsFactory.set_current_renderer(renderer)
@property
def settings(self):
"""
Get all available settings.
Returns:
dict: A dictionary of all available settings.
Keys are setting paths and values are setting item objects.
"""
settings = {}
settings.update(self.common_settings.settings)
settings.update(self.path_tracing_settings.settings)
settings.update(self.post_processing_settings.settings)
settings.update(self.real_time_settings.settings)
return settings
| 3,431 | Python | 31.685714 | 120 | 0.632469 |
StanfordVL/OmniGibson/omnigibson/renderer_settings/settings_base.py | from abc import ABCMeta
import numpy as np
import omnigibson.lazy as lazy
class SettingsBase(metaclass=ABCMeta):
"""
Base class for all renderer settings classes.
Settings classes include Common, Real-Time (Ray-Tracing), Path-Tracing and Post Processing.
"""
class SubSettingsBase(metaclass=ABCMeta):
"""
Base class for all renderer sub-settings classes.
"""
def __init__(self):
self._carb_settings = lazy.carb.settings.get_settings()
@property
def enabled_setting_path(self):
"""
The path of "enabled" setting for this sub-settings class.
Subclass with "enabled" mode needs to overwrite this method.
Returns:
str or None: The path of "enabled" mode for this sub-setting class.
Defaults to None, which means this sub-setting group cannot be enabled/disabled.
"""
return None
def is_enabled(self):
"""
Get the enabled status for this sub-setting class.
Returns:
bool: Whether this sub-setting group is enabled.
Returns true if this sub-setting group has no "enabled" mode.
"""
if not self.enabled_setting_path:
return True
return self._carb_settings.get(self.enabled_setting_path)
def enable(self):
"""
Enable this sub-setting class.
"""
if not self.enabled_setting_path:
print(f"{self.__class__.__name__} has no enabled mode.")
return
self._carb_settings.set_bool(self.enabled_setting_path, True)
def disable(self):
"""
Disable this sub-setting class.
"""
if not self.enabled_setting_path:
print(f"{self.__class__.__name__} has no enabled mode.")
return
self._carb_settings.set_bool(self.enabled_setting_path, False)
class SettingItem:
"""
A wrapper of an individual setting item.
Args:
owner (:class:`SubSettingsBase`): The SubSettingsBase object owning this setting.
setting_type (:class:`SettingType`): Setting type (e.g. float, int).
name (str): Description of this setting.
path (str): Path of this setting.
range_from (float): The lower bound of the values for this setting. Defaults to -inf.
range_to (float): The upper bound of the values for this settin. Defaults to inf.
range_list (list): Possible values for this setting. Defaults to None.
range_dict (dict): Possible values for this setting. Defaults to None.
"""
def __init__(
self,
owner,
setting_type,
name,
path,
range_from=-float("inf"),
range_to=float("inf"),
range_list=None,
range_dict=None,
):
self._carb_settings = lazy.carb.settings.get_settings()
self.owner = owner
self.setting_type = setting_type
self.name = name
self.path = path
self.range_from = range_from
self.range_to = range_to
self.range_list = range_list
self.range_dict = range_dict
self.initial_value = self.value
@property
def value(self):
"""
Get the current setting value.
Returns:
any: The current setting value.
"""
return self._carb_settings.get(self.path)
def get(self):
"""
Get the current setting value.
Returns:
any: The current setting value.
"""
return self.value
def reset(self):
"""
Reset the current setting value to default.
"""
self.set(self.initial_value)
def set(self, value):
"""
Set the current setting to @value.
Args:
value (any): Value to set for the current setting value.
"""
print(f"Set setting {self.path} ({self.name}) to {value}.") # carb.log_info
if not self.owner.is_enabled():
print(f"Note: {self.owner.enabled_setting_path} is not enabled.")
# Validate range list and range dict.
if self.range_list:
assert value in self.range_list, f"Setting {self.path} must be chosen from {self.range_list}."
if self.range_dict:
assert isinstance(self.range_dict, dict)
assert (
value in self.range_dict.values()
), f"Setting {self.path} must be chosen from a value (not key) in {self.range_dict}."
if self.setting_type == lazy.omni.kit.widget.settings.SettingType.FLOAT:
assert isinstance(value, (int, float)), f"Setting {self.path} must be of type float."
assert (
value >= self.range_from and value <= self.range_to
), f"Setting {self.path} must be within range ({self.range_from}, {self.range_to})."
self._carb_settings.set_float(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.INT:
assert isinstance(value, int), f"Setting {self.path} must be of type int."
assert (
value >= self.range_from and value <= self.range_to
), f"Setting {self.path} must be within range ({self.range_from}, {self.range_to})."
self._carb_settings.set_int(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.COLOR3:
assert (
isinstance(value, (list, tuple, np.ndarray)) and len(value) == 3
), f"Setting {self.path} must be a list of 3 numbers within range [0,1]."
for v in value:
assert (
isinstance(v, (int, float)) and v >= 0 and v <= 1
), f"Setting {self.path} must be a list of 3 numbers within range [0,1]."
self._carb_settings.set_float_array(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.BOOL:
assert isinstance(value, bool), f"Setting {self.path} must be of type bool."
self._carb_settings.set_bool(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.STRING:
assert isinstance(value, str), f"Setting {self.path} must be of type str."
self._carb_settings.set_string(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.DOUBLE3:
assert (
isinstance(value, (list, tuple, np.ndarray)) and len(value) == 3
), f"Setting {self.path} must be a list of 3 floats."
for v in value:
assert isinstance(v, (int, float)), f"Setting {self.path} must be a list of 3 floats."
self._carb_settings.set_float_array(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.INT2:
assert (
isinstance(value, (list, tuple, np.ndarray)) and len(value) == 2
), f"Setting {self.path} must be a list of 2 ints."
for v in value:
assert isinstance(v, int), f"Setting {self.path} must be a list of 2 ints."
self._carb_settings.set_int_array(self.path, value)
elif self.setting_type == lazy.omni.kit.widget.settings.SettingType.DOUBLE2:
assert (
isinstance(value, (list, tuple, np.ndarray)) and len(value) == 2
), f"Setting {self.path} must be a list of 2 floats."
for v in value:
assert isinstance(v, (int, float)), f"Setting {self.path} must be a list of 2 floats."
self._carb_settings.set_float_array(self.path, value)
else:
raise TypeError(f"Setting type {self.setting_type} is not supported.")
| 7,821 | Python | 36.787439 | 106 | 0.586626 |
StanfordVL/OmniGibson/omnigibson/scene_graphs/graph_builder.py | import itertools
import os
import networkx as nx
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from omnigibson import object_states
from omnigibson.macros import create_module_macros
from omnigibson.sensors import VisionSensor
from omnigibson.object_states.factory import get_state_name
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin, RelativeObjectState
from omnigibson.utils import transform_utils as T
def _formatted_aabb(obj):
return T.pose2mat((obj.aabb_center, [0, 0, 0, 1])), obj.aabb_extent
class SceneGraphBuilder(object):
def __init__(
self,
robot_name=None,
egocentric=False,
full_obs=False,
only_true=False,
merge_parallel_edges=False,
exclude_states=(object_states.Touching,)
):
"""
A utility that builds a scene graph with objects as nodes and relative states as edges,
alongside additional metadata.
Args:
robot_name (str): Name of the robot whose POV the scene graph will be from. If None, we assert that there
is exactly one robot in the scene and use that robot.
egocentric (bool): Whether the objects should have poses in the world frame or robot frame.
full_obs (bool): Whether all objects should be updated or only those in FOV of the robot.
only_true (bool): Whether edges should be created only for relative states that have a value True, or for all
relative states (with the appropriate value attached as an attribute).
merge_parallel_edges (bool): Whether parallel edges (e.g. different states of the same pair of objects) should
exist (making the graph a MultiDiGraph) or should be merged into a single edge instead.
exclude_states (Iterable): Object state classes that should be ignored when building the graph.
"""
self._G = None
self._robot = None
self._robot_name = robot_name
self._egocentric = egocentric
self._full_obs = full_obs
self._only_true = only_true
self._merge_parallel_edges = merge_parallel_edges
self._last_desired_frame_to_world = None
self._exclude_states = set(exclude_states)
def get_scene_graph(self):
return self._G.copy()
def _get_desired_frame(self):
desired_frame_to_world = np.eye(4)
world_to_desired_frame = np.eye(4)
if self._egocentric:
desired_frame_to_world = self._get_robot_to_world_transform()
world_to_desired_frame = T.pose_inv(desired_frame_to_world)
return desired_frame_to_world, world_to_desired_frame
def _get_robot_to_world_transform(self):
robot_to_world = self._robot.get_position_orientation()
# Get rid of any rotation outside xy plane
robot_to_world = T.pose2mat((robot_to_world[0], T.z_rotation_from_quat(robot_to_world[1])))
return robot_to_world
def _get_boolean_unary_states(self, obj):
states = {}
for state_type, state_inst in obj.states.items():
if not issubclass(state_type, BooleanStateMixin) or not issubclass(state_type, AbsoluteObjectState):
continue
if state_type in self._exclude_states:
continue
value = state_inst.get_value()
if self._only_true and not value:
continue
states[get_state_name(state_type)] = value
return states
def _get_boolean_binary_states(self, objs):
states = []
for obj1 in objs:
for obj2 in objs:
if obj1 == obj2:
continue
for state_type, state_inst in obj1.states.items():
if not issubclass(state_type, BooleanStateMixin) or not issubclass(state_type, RelativeObjectState):
continue
if state_type in self._exclude_states:
continue
try:
value = state_inst.get_value(obj2)
if self._only_true and not value:
continue
states.append((obj1, obj2, get_state_name(state_type), {"value": value}))
except:
pass
return states
def start(self, scene):
assert self._G is None, "Cannot start graph builder multiple times."
if self._robot_name is None:
assert len(scene.robots) == 1, "Cannot build scene graph without specifying robot name if there are multiple robots."
self._robot = scene.robots[0]
else:
self._robot = scene.object_registry("name", self._robot_name)
assert self._robot, f"Robot with name {self._robot_name} not found in scene."
self._G = nx.DiGraph() if self._merge_parallel_edges else nx.MultiDiGraph()
desired_frame_to_world, world_to_desired_frame = self._get_desired_frame()
robot_pose = world_to_desired_frame @ self._get_robot_to_world_transform()
robot_bbox_pose, robot_bbox_extent = _formatted_aabb(self._robot)
robot_bbox_pose = world_to_desired_frame @ robot_bbox_pose
self._G.add_node(
self._robot, pose=robot_pose, bbox_pose=robot_bbox_pose, bbox_extent=robot_bbox_extent, states={}
)
self._last_desired_frame_to_world = desired_frame_to_world
# Let's also take the first step.
self.step(scene)
def step(self, scene):
assert self._G is not None, "Cannot step graph builder before starting it."
# Prepare the necessary transformations.
desired_frame_to_world, world_to_desired_frame = self._get_desired_frame()
# Update the position of everything that's already in the scene by using our relative position to last frame.
old_desired_to_new_desired = world_to_desired_frame @ self._last_desired_frame_to_world
nodes = list(self._G.nodes)
poses = np.array([self._G.nodes[obj]["pose"] for obj in nodes])
bbox_poses = np.array([self._G.nodes[obj]["bbox_pose"] for obj in nodes])
updated_poses = old_desired_to_new_desired @ poses
updated_bbox_poses = old_desired_to_new_desired @ bbox_poses
for i, obj in enumerate(nodes):
self._G.nodes[obj]["pose"] = updated_poses[i]
self._G.nodes[obj]["bbox_pose"] = updated_bbox_poses[i]
# Update the robot's pose. We don't want to accumulate errors because of the repeated transforms.
self._G.nodes[self._robot]["pose"] = world_to_desired_frame @ self._get_robot_to_world_transform()
robot_bbox_pose, robot_bbox_extent = _formatted_aabb(self._robot)
robot_bbox_pose = world_to_desired_frame @ robot_bbox_pose
self._G.nodes[self._robot]["bbox_pose"] = robot_bbox_pose
self._G.nodes[self._robot]["bbox_extent"] = robot_bbox_extent
# Go through the objects in FOV of the robot.
objs_to_add = set(scene.objects)
if not self._full_obs:
# TODO: Reenable this once InFOV state is fixed.
# If we're not in full observability mode, only pick the objects in FOV of robot.
# bids_in_fov = self._robot.states[object_states.ObjectsInFOVOfRobot].get_value()
# objs_in_fov = set(
# scene.objects_by_id[bid]
# for bid in bids_in_fov
# if bid in scene.objects_by_id
# )
# objs_to_add &= objs_in_fov
raise NotImplementedError("Partial observability not supported in scene graph builder yet.")
for obj in objs_to_add:
# Add the object if not already in the graph
if obj not in self._G.nodes:
self._G.add_node(obj)
# Get the relative position of the object & update it (reducing accumulated errors)
self._G.nodes[obj]["pose"] = world_to_desired_frame @ T.pose2mat(obj.get_position_orientation())
# Get the bounding box.
if hasattr(obj, "get_base_aligned_bbox"):
bbox_center, bbox_orn, bbox_extent, _ = obj.get_base_aligned_bbox(visual=True)
bbox_pose = T.pose2mat((bbox_center, bbox_orn))
else:
bbox_pose, bbox_extent = _formatted_aabb(obj)
self._G.nodes[obj]["bbox_pose"] = world_to_desired_frame @ bbox_pose
self._G.nodes[obj]["bbox_extent"] = bbox_extent
# Update the states of the object
self._G.nodes[obj]["states"] = self._get_boolean_unary_states(obj)
# Update the binary states for seen objects.
self._G.remove_edges_from(list(itertools.product(objs_to_add, objs_to_add)))
edges = self._get_boolean_binary_states(objs_to_add)
if self._merge_parallel_edges:
new_edges = {}
for edge in edges:
edge_pair = (edge[0], edge[1])
if edge_pair not in new_edges:
new_edges[edge_pair] = []
new_edges[edge_pair].append((edge[2], edge[3]["value"]))
edges = [(k[0], k[1], {"states": v}) for k, v in new_edges.items()]
self._G.add_edges_from(edges)
# Save the robot's transform in this frame.
self._last_desired_frame_to_world = desired_frame_to_world
def visualize_scene_graph(scene, G, show_window=True, realistic_positioning=False):
"""
Converts the graph into an image and shows it in a cv2 window if preferred.
Args:
show_window (bool): Whether a cv2 GUI window containing the visualization should be shown.
realistic_positioning (bool): Whether nodes should be positioned based on their position in the scene (if True)
or placed using a graphviz layout (neato) that makes it easier to read edges & find clusters.
"""
def _draw_graph():
nodes = list(G.nodes)
node_labels = {obj: obj.category for obj in nodes}
colors = [
"yellow"
if obj.category == "agent"
else ("green" if obj.states[object_states.InFOVOfRobot].get_value() else "red")
for obj in nodes
]
positions = (
{obj: (-pose[0][1], pose[0][0]) for obj, pose in G.nodes.data("pose")}
if realistic_positioning
else nx.nx_pydot.pydot_layout(G, prog="neato")
)
nx.drawing.draw_networkx(
G,
pos=positions,
labels=node_labels,
nodelist=nodes,
node_color=colors,
font_size=4,
arrowsize=5,
node_size=150,
)
edge_labels = {
edge: ", ".join(
state + "=" + str(value)
for state, value in G.edges[edge]["states"]
)
for edge in G.edges
}
nx.drawing.draw_networkx_edge_labels(G, pos=positions, edge_labels=edge_labels, font_size=4)
# Prepare pyplot figure that's sized to match the robot video.
robot = scene.robots[0]
robot_camera_sensor, = [s for s in robot.sensors.values() if isinstance(s, VisionSensor) and "rgb" in s.modalities]
robot_view = (robot_camera_sensor.get_obs()[0]["rgb"][..., :3]).astype(np.uint8)
imgheight, imgwidth, _ = robot_view.shape
figheight = 4.8
figdpi = imgheight / figheight
figwidth = imgwidth / figdpi
# Draw the graph onto the figure.
fig = plt.figure(figsize=(figwidth, figheight), dpi=figdpi)
_draw_graph()
fig.canvas.draw()
# Convert the canvas to image
graph_view = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
graph_view = graph_view.reshape(fig.canvas.get_width_height()[::-1] + (3,))
assert graph_view.shape == robot_view.shape
plt.close(fig)
# Combine the two images side-by-side
img = np.hstack((robot_view, graph_view))
# # Convert to BGR for cv2-based viewing.
if show_window:
import cv2
cv_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("SceneGraph", cv_img)
cv2.waitKey(1)
return Image.fromarray(img).save(r"D:\test.png")
| 12,289 | Python | 40.52027 | 129 | 0.604768 |
StanfordVL/OmniGibson/omnigibson/robots/franka_allegro.py | import os
import numpy as np
import omnigibson.utils.transform_utils as T
from omnigibson.macros import gm
from omnigibson.robots.manipulation_robot import ManipulationRobot, GraspingPoint
class FrankaAllegro(ManipulationRobot):
"""
Franka Robot with Allegro hand
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
visual_only=False,
self_collisions=True,
load_config=None,
fixed_base=True,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
# Unique to BaseRobot
obs_modalities="all",
proprio_obs="default",
sensor_config=None,
# Unique to ManipulationRobot
grasping_mode="physical",
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be at teh render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
Note that _default_joint_pos are hardcoded & precomputed, and thus should not be modified by the user.
Set this value instead if you want to initialize the robot with a different rese joint position.
obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
corresponds to all modalities being used.
Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
override any values specified from @obs_modalities!
proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception
observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
for valid key choices
sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. This will override any default values specified by this class.
grasping_mode (str): One of {"physical", "assisted", "sticky"}.
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers.
If "sticky", will magnetize any object touching the gripper's fingers.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
self_collisions=self_collisions,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
obs_modalities=obs_modalities,
proprio_obs=proprio_obs,
sensor_config=sensor_config,
grasping_mode=grasping_mode,
grasping_direction="upper",
**kwargs,
)
@property
def model_name(self):
return "FrankaAllegro"
@property
def discrete_action_list(self):
# Not supported for this robot
raise NotImplementedError()
def _create_discrete_action_space(self):
# Fetch does not support discrete actions
raise ValueError("Franka does not support discrete actions!")
@property
def controller_order(self):
return ["arm_{}".format(self.default_arm), "gripper_{}".format(self.default_arm)]
@property
def _default_controllers(self):
controllers = super()._default_controllers
controllers["arm_{}".format(self.default_arm)] = "InverseKinematicsController"
controllers["gripper_{}".format(self.default_arm)] = "MultiFingerGripperController"
return controllers
@property
def _default_gripper_multi_finger_controller_configs(self):
conf = super()._default_gripper_multi_finger_controller_configs
conf[self.default_arm]["mode"] = "independent"
conf[self.default_arm]["command_input_limits"] = None
return conf
@property
def _default_joint_pos(self):
# position where the hand is parallel to the ground
return np.r_[[0.86, -0.27, -0.68, -1.52, -0.18, 1.29, 1.72], np.zeros(16)]
@property
def finger_lengths(self):
return {self.default_arm: 0.1}
@property
def arm_control_idx(self):
return {self.default_arm: np.arange(7)}
@property
def gripper_control_idx(self):
# thumb.proximal, ..., thumb.tip, ..., ring.tip
return {self.default_arm: np.array([8, 12, 16, 20, 10, 14, 18, 22, 9, 13, 17, 21, 7, 11, 15, 19])}
@property
def arm_link_names(self):
return {self.default_arm: [f"panda_link{i}" for i in range(8)]}
@property
def arm_joint_names(self):
return {self.default_arm: [f"panda_joint_{i+1}" for i in range(7)]}
@property
def eef_link_names(self):
return {self.default_arm: "base_link"}
@property
def finger_link_names(self):
return {self.default_arm: [f"link_{i}_0" for i in range(16)]}
@property
def finger_joint_names(self):
# thumb.proximal, ..., thumb.tip, ..., ring.tip
return {self.default_arm: [f"joint_{i}_0" for i in [12, 13, 14, 15, 8, 9, 10, 11, 4, 5, 6, 7, 0, 1, 2, 3]]}
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/franka/franka_allegro.usd")
@property
def robot_arm_descriptor_yamls(self):
return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/franka/franka_allegro_description.yaml")}
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/franka/franka_allegro.urdf")
@property
def disabled_collision_pairs(self):
return [
["link_12_0", "part_studio_link"],
]
@property
def assisted_grasp_start_points(self):
return {self.default_arm: [
GraspingPoint(link_name=f"base_link", position=[0.015, 0, -0.03]),
GraspingPoint(link_name=f"base_link", position=[0.015, 0, -0.08]),
GraspingPoint(link_name=f"link_15_0_tip", position=[0, 0.015, 0.007]),
]}
@property
def assisted_grasp_end_points(self):
return {self.default_arm: [
GraspingPoint(link_name=f"link_3_0_tip", position=[0.012, 0, 0.007]),
GraspingPoint(link_name=f"link_7_0_tip", position=[0.012, 0, 0.007]),
GraspingPoint(link_name=f"link_11_0_tip", position=[0.012, 0, 0.007]),
]}
@property
def teleop_rotation_offset(self):
return {self.default_arm: T.euler2quat(np.array([0, np.pi / 2, 0]))}
| 9,906 | Python | 42.643172 | 126 | 0.628104 |
StanfordVL/OmniGibson/omnigibson/robots/two_wheel_robot.py | from abc import abstractmethod
import gym
import numpy as np
from omnigibson.controllers import DifferentialDriveController
from omnigibson.robots.locomotion_robot import LocomotionRobot
from omnigibson.utils.python_utils import classproperty
class TwoWheelRobot(LocomotionRobot):
"""
Robot that is is equipped with locomotive (navigational) capabilities, as defined by two wheels that can be used
for differential drive (e.g.: Turtlebot).
Provides common interface for a wide variety of robots.
NOTE: controller_config should, at the minimum, contain:
base: controller specifications for the controller to control this robot's base (locomotion).
Should include:
- name: Controller to create
- <other kwargs> relevant to the controller being created. Note that all values will have default
values specified, but setting these individual kwargs will override them
"""
def _validate_configuration(self):
# Make sure base only has two indices (i.e.: two wheels for differential drive)
assert len(self.base_control_idx) == 2, "Differential drive can only be used with robot with two base joints!"
# run super
super()._validate_configuration()
def _create_discrete_action_space(self):
# Set action list based on controller (joint or DD) used
# We set straight velocity to be 50% of max velocity for the wheels
max_wheel_joint_vels = self.control_limits["velocity"][1][self.base_control_idx]
assert len(max_wheel_joint_vels) == 2, "TwoWheelRobot must only have two base (wheel) joints!"
assert max_wheel_joint_vels[0] == max_wheel_joint_vels[1], "Both wheels must have the same max speed!"
wheel_straight_vel = 0.5 * max_wheel_joint_vels[0]
wheel_rotate_vel = 0.5
if self._controller_config["base"]["name"] == "JointController":
action_list = [
[wheel_straight_vel, wheel_straight_vel],
[-wheel_straight_vel, -wheel_straight_vel],
[wheel_rotate_vel, -wheel_rotate_vel],
[-wheel_rotate_vel, wheel_rotate_vel],
[0, 0],
]
else:
# DifferentialDriveController
lin_vel = wheel_straight_vel * self.wheel_radius
ang_vel = wheel_rotate_vel * self.wheel_radius * 2.0 / self.wheel_axle_length
action_list = [
[lin_vel, 0],
[-lin_vel, 0],
[0, ang_vel],
[0, -ang_vel],
[0, 0],
]
self.action_list = action_list
# Return this action space
return gym.spaces.Discrete(n=len(self.action_list))
def _get_proprioception_dict(self):
dic = super()._get_proprioception_dict()
# Grab wheel joint velocity info
joints = list(self._joints.values())
wheel_joints = [joints[idx] for idx in self.base_control_idx]
l_vel, r_vel = [jnt.get_state()[1] for jnt in wheel_joints]
# Compute linear and angular velocities
lin_vel = (l_vel + r_vel) / 2.0 * self.wheel_radius
ang_vel = (r_vel - l_vel) / self.wheel_axle_length
# Add info
dic["dd_base_lin_vel"] = lin_vel # lin_vel is already 1D np array of length 1
dic["dd_base_ang_vel"] = ang_vel # lin_vel is already 1D np array of length 1
return dic
@property
def default_proprio_obs(self):
obs_keys = super().default_proprio_obs
return obs_keys + ["dd_base_lin_vel", "dd_base_ang_vel"]
@property
def _default_controllers(self):
# Always call super first
controllers = super()._default_controllers
# Use DifferentialDrive as default
controllers["base"] = "DifferentialDriveController"
return controllers
@property
def _default_base_differential_drive_controller_config(self):
"""
Returns:
dict: Default differential drive controller config to
control this robot's base.
"""
return {
"name": "DifferentialDriveController",
"control_freq": self._control_freq,
"wheel_radius": self.wheel_radius,
"wheel_axle_length": self.wheel_axle_length,
"control_limits": self.control_limits,
"dof_idx": self.base_control_idx,
}
@property
def _default_controller_config(self):
# Always run super method first
cfg = super()._default_controller_config
# Add differential drive option to base
cfg["base"][
self._default_base_differential_drive_controller_config["name"]
] = self._default_base_differential_drive_controller_config
return cfg
@property
@abstractmethod
def wheel_radius(self):
"""
Returns:
float: radius of each wheel at the base, in metric units
"""
raise NotImplementedError
@property
@abstractmethod
def wheel_axle_length(self):
"""
Returns:
float: perpendicular distance between the robot's two wheels, in metric units
"""
raise NotImplementedError
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("TwoWheelRobot")
return classes
def teleop_data_to_action(self, teleop_action) -> np.ndarray:
"""
Generate action data from teleoperation action data
NOTE: This implementation only supports DifferentialDriveController.
Overwrite this function if the robot is using a different base controller.
Args:
teleop_action (TeleopAction): teleoperation action data
Returns:
np.ndarray: array of action data
"""
action = super().teleop_data_to_action(teleop_action)
assert isinstance(self._controllers["base"], DifferentialDriveController), "Only DifferentialDriveController is supported!"
action[self.base_action_idx] = np.array([teleop_action.base[0], teleop_action.base[2]]) * 0.3
return action
| 6,279 | Python | 36.60479 | 131 | 0.621118 |
StanfordVL/OmniGibson/omnigibson/robots/tiago.py | import os
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
import omnigibson.utils.transform_utils as T
from omnigibson.macros import create_module_macros
from omnigibson.robots.active_camera_robot import ActiveCameraRobot
from omnigibson.robots.manipulation_robot import GraspingPoint, ManipulationRobot
from omnigibson.robots.locomotion_robot import LocomotionRobot
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.usd_utils import JointType
# Create settings for this module
m = create_module_macros(module_path=__file__)
DEFAULT_ARM_POSES = {
"vertical",
"diagonal15",
"diagonal30",
"diagonal45",
"horizontal",
}
RESET_JOINT_OPTIONS = {
"tuck",
"untuck",
}
m.MAX_LINEAR_VELOCITY = 1.5 # linear velocity in meters/second
m.MAX_ANGULAR_VELOCITY = np.pi # angular velocity in radians/second
class Tiago(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
"""
Tiago Robot
Reference: https://pal-robotics.com/robots/tiago/
NOTE: If using IK Control for both the right and left arms, note that the left arm dictates control of the trunk,
and the right arm passively must follow. That is, sending desired delta position commands to the right end effector
will be computed independently from any trunk motion occurring during that timestep.
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
visual_only=False,
self_collisions=False,
load_config=None,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
# Unique to BaseRobot
obs_modalities="all",
proprio_obs="default",
sensor_config=None,
# Unique to ManipulationRobot
grasping_mode="physical",
disable_grasp_handling=False,
# Unique to Tiago
variant="default",
rigid_trunk=False,
default_trunk_offset=0.365,
default_reset_mode="untuck",
default_arm_pose="vertical",
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
category (str): Category for the object. Defaults to "object".
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
prim_type (PrimType): Which type of prim the object is, Valid options are: {PrimType.RIGID, PrimType.CLOTH}
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be the render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
Note that _default_joint_pos are hardcoded & precomputed, and thus should not be modified by the user.
Set this value instead if you want to initialize the robot with a different rese joint position.
obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
corresponds to all modalities being used.
Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
override any values specified from @obs_modalities!
proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception
observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
for valid key choices
sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. This will override any default values specified by this class.
grasping_mode (str): One of {"physical", "assisted", "sticky"}.
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers.
If "sticky", will magnetize any object touching the gripper's fingers.
disable_grasp_handling (bool): If True, will disable all grasp handling for this object. This means that
sticky and assisted grasp modes will not work unless the connection/release methodsare manually called.
variant (str): Which variant of the robot should be loaded. One of "default", "wrist_cam"
rigid_trunk (bool) if True, will prevent the trunk from moving during execution.
default_trunk_offset (float): sets the default height of the robot's trunk
default_reset_mode (str): Default reset mode for the robot. Should be one of: {"tuck", "untuck"}
If reset_joint_pos is not None, this will be ignored (since _default_joint_pos won't be used during initialization).
default_arm_pose (str): Default pose for the robot arm. Should be one of:
{"vertical", "diagonal15", "diagonal30", "diagonal45", "horizontal"}
If either reset_joint_pos is not None or default_reset_mode is "tuck", this will be ignored.
Otherwise the reset_joint_pos will be initialized to the precomputed joint positions that represents default_arm_pose.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store args
assert variant in ("default", "wrist_cam"), f"Invalid Tiago variant specified {variant}!"
self._variant = variant
self.rigid_trunk = rigid_trunk
self.default_trunk_offset = default_trunk_offset
assert_valid_key(key=default_reset_mode, valid_keys=RESET_JOINT_OPTIONS, name="default_reset_mode")
self.default_reset_mode = default_reset_mode
assert_valid_key(key=default_arm_pose, valid_keys=DEFAULT_ARM_POSES, name="default_arm_pose")
self.default_arm_pose = default_arm_pose
# Other args that will be created at runtime
self._world_base_fixed_joint_prim = None
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=True,
visual_only=visual_only,
self_collisions=self_collisions,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
obs_modalities=obs_modalities,
proprio_obs=proprio_obs,
sensor_config=sensor_config,
grasping_mode=grasping_mode,
disable_grasp_handling=disable_grasp_handling,
**kwargs,
)
@property
def arm_joint_names(self):
names = dict()
for arm in self.arm_names:
names[arm] = ["torso_lift_joint"] + [
f"arm_{arm}_{i}_joint" for i in range(1, 8)
]
return names
@property
def model_name(self):
return "Tiago"
@property
def n_arms(self):
return 2
@property
def arm_names(self):
return ["left", "right"]
@property
def tucked_default_joint_pos(self):
pos = np.zeros(self.n_dof)
# Keep the current joint positions for the base joints
pos[self.base_idx] = self.get_joint_positions()[self.base_idx]
pos[self.trunk_control_idx] = 0
pos[self.camera_control_idx] = np.array([0.0, 0.0])
for arm in self.arm_names:
pos[self.gripper_control_idx[arm]] = np.array([0.045, 0.045]) # open gripper
pos[self.arm_control_idx[arm]] = np.array(
[-1.10, 1.47, 2.71, 1.71, -1.57, 1.39, 0]
)
return pos
@property
def untucked_default_joint_pos(self):
pos = np.zeros(self.n_dof)
# Keep the current joint positions for the base joints
pos[self.base_idx] = self.get_joint_positions()[self.base_idx]
pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
pos[self.camera_control_idx] = np.array([0.0, -0.45])
# Choose arm joint pos based on setting
for arm in self.arm_names:
pos[self.gripper_control_idx[arm]] = np.array([0.045, 0.045]) # open gripper
if self.default_arm_pose == "vertical":
pos[self.arm_control_idx[arm]] = np.array(
[0.85846, -0.14852, 1.81008, 1.63368, 0.13764, -1.32488, -0.68415]
)
elif self.default_arm_pose == "diagonal15":
pos[self.arm_control_idx[arm]] = np.array(
[0.90522, -0.42811, 2.23505, 1.64627, 0.76867, -0.79464, 2.05251]
)
elif self.default_arm_pose == "diagonal30":
pos[self.arm_control_idx[arm]] = np.array(
[0.71883, -0.02787, 1.86002, 1.52897, 0.52204, -0.99741, 2.03113]
)
elif self.default_arm_pose == "diagonal45" :
pos[self.arm_control_idx[arm]] = np.array(
[0.66058, -0.14251, 1.77547, 1.43345, 0.65988, -1.02741, 1.81302]
)
elif self.default_arm_pose == "horizontal":
pos[self.arm_control_idx[arm]] = np.array(
[0.61511, 0.49229, 1.46306, 1.24919, 1.08282, -1.28865, 1.50910]
)
else:
raise ValueError("Unknown default arm pose: {}".format(self.default_arm_pose))
return pos
def _create_discrete_action_space(self):
# Tiago does not support discrete actions
raise ValueError("Fetch does not support discrete actions!")
@property
def discrete_action_list(self):
# Not supported for this robot
raise NotImplementedError()
def tuck(self):
"""
Immediately set this robot's configuration to be in tucked mode
"""
self.set_joint_positions(self.tucked_default_joint_pos)
def untuck(self):
"""
Immediately set this robot's configuration to be in untucked mode
"""
self.set_joint_positions(self.untucked_default_joint_pos)
def reset(self):
"""
Reset should not change the robot base pose.
We need to cache and restore the base joints to the world.
"""
base_joint_positions = self.get_joint_positions()[self.base_idx]
super().reset()
self.set_joint_positions(base_joint_positions, indices=self.base_idx)
def _post_load(self):
super()._post_load()
# The eef gripper links should be visual-only. They only contain a "ghost" box volume for detecting objects
# inside the gripper, in order to activate attachments (AG for Cloths).
for arm in self.arm_names:
self.eef_links[arm].visual_only = True
self.eef_links[arm].visible = False
self._world_base_fixed_joint_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(f"{self._prim_path}/rootJoint")
position, orientation = self.get_position_orientation()
# Set the world-to-base fixed joint to be at the robot's current pose
self._world_base_fixed_joint_prim.GetAttribute("physics:localPos0").Set(tuple(position))
self._world_base_fixed_joint_prim.GetAttribute("physics:localRot0").Set(lazy.pxr.Gf.Quatf(*orientation[[3, 0, 1, 2]].tolist()))
def _initialize(self):
# Run super method first
super()._initialize()
# Set the joint friction for EEF to be higher
for arm in self.arm_names:
for joint in self.finger_joints[arm]:
if joint.joint_type != JointType.JOINT_FIXED:
joint.friction = 500
# Name of the actual root link that we are interested in. Note that this is different from self.root_link_name,
# which is "base_footprint_x", corresponding to the first of the 6 1DoF joints to control the base.
@property
def base_footprint_link_name(self):
return "base_footprint"
@property
def base_footprint_link(self):
"""
Returns:
RigidPrim: base footprint link of this object prim
"""
return self._links[self.base_footprint_link_name]
def _postprocess_control(self, control, control_type):
# Run super method first
u_vec, u_type_vec = super()._postprocess_control(control=control, control_type=control_type)
# Change the control from base_footprint_link ("base_footprint") frame to root_link ("base_footprint_x") frame
base_orn = self.base_footprint_link.get_orientation()
root_link_orn = self.root_link.get_orientation()
cur_orn = T.mat2quat(T.quat2mat(root_link_orn).T @ T.quat2mat(base_orn))
# Rotate the linear and angular velocity to the desired frame
lin_vel_global, _ = T.pose_transform([0, 0, 0], cur_orn, u_vec[self.base_idx[:3]], [0, 0, 0, 1])
ang_vel_global, _ = T.pose_transform([0, 0, 0], cur_orn, u_vec[self.base_idx[3:]], [0, 0, 0, 1])
u_vec[self.base_control_idx] = np.array([lin_vel_global[0], lin_vel_global[1], ang_vel_global[2]])
return u_vec, u_type_vec
def _get_proprioception_dict(self):
dic = super()._get_proprioception_dict()
# Add trunk info
joint_positions = self.get_joint_positions(normalized=False)
joint_velocities = self.get_joint_velocities(normalized=False)
dic["trunk_qpos"] = joint_positions[self.trunk_control_idx]
dic["trunk_qvel"] = joint_velocities[self.trunk_control_idx]
return dic
@property
def control_limits(self):
# Overwrite the control limits with the maximum linear and angular velocities for the purpose of clip_control
# Note that when clip_control happens, the control is still in the base_footprint_link ("base_footprint") frame
# Omniverse still thinks these joints have no limits because when the control is transformed to the root_link
# ("base_footprint_x") frame, it can go above this limit.
limits = super().control_limits
limits["velocity"][0][self.base_idx[:3]] = -m.MAX_LINEAR_VELOCITY
limits["velocity"][1][self.base_idx[:3]] = m.MAX_LINEAR_VELOCITY
limits["velocity"][0][self.base_idx[3:]] = -m.MAX_ANGULAR_VELOCITY
limits["velocity"][1][self.base_idx[3:]] = m.MAX_ANGULAR_VELOCITY
return limits
def get_control_dict(self):
# Modify the right hand's pos_relative in the z-direction based on the trunk's value
# We do this so we decouple the trunk's dynamic value from influencing the IK controller solution for the right
# hand, which does not control the trunk
fcns = super().get_control_dict()
native_fcn = fcns.get_fcn("eef_right_pos_relative")
fcns["eef_right_pos_relative"] = lambda: (native_fcn() + np.array([0, 0, -self.get_joint_positions()[self.trunk_control_idx[0]]]))
return fcns
@property
def default_proprio_obs(self):
obs_keys = super().default_proprio_obs
return obs_keys + ["trunk_qpos"]
@property
def controller_order(self):
controllers = ["base", "camera"]
for arm in self.arm_names:
controllers += ["arm_{}".format(arm), "gripper_{}".format(arm)]
return controllers
@property
def _default_controllers(self):
# Always call super first
controllers = super()._default_controllers
# We use joint controllers for base and camera as default
controllers["base"] = "JointController"
controllers["camera"] = "JointController"
# We use multi finger gripper, and IK controllers for eefs as default
for arm in self.arm_names:
controllers["arm_{}".format(arm)] = "InverseKinematicsController"
controllers["gripper_{}".format(arm)] = "MultiFingerGripperController"
return controllers
@property
def _default_base_controller_configs(self):
dic = {
"name": "JointController",
"control_freq": self._control_freq,
"control_limits": self.control_limits,
"use_delta_commands": False,
"use_impedances": False,
"motor_type": "velocity",
"dof_idx": self.base_control_idx,
}
return dic
@property
def _default_controller_config(self):
# Grab defaults from super method first
cfg = super()._default_controller_config
# Get default base controller for omnidirectional Tiago
cfg["base"] = {"JointController": self._default_base_controller_configs}
for arm in self.arm_names:
for arm_cfg in cfg["arm_{}".format(arm)].values():
if arm == "left":
# Need to override joint idx being controlled to include trunk in default arm controller configs
arm_control_idx = np.concatenate([self.trunk_control_idx, self.arm_control_idx[arm]])
arm_cfg["dof_idx"] = arm_control_idx
# Need to modify the default joint positions also if this is a null joint controller
if arm_cfg["name"] == "NullJointController":
arm_cfg["default_command"] = self.reset_joint_pos[arm_control_idx]
# If using rigid trunk, we also clamp its limits
# TODO: How to handle for right arm which has a fixed trunk internally even though the trunk is moving
# via the left arm??
if self.rigid_trunk:
arm_cfg["control_limits"]["position"][0][self.trunk_control_idx] = \
self.untucked_default_joint_pos[self.trunk_control_idx]
arm_cfg["control_limits"]["position"][1][self.trunk_control_idx] = \
self.untucked_default_joint_pos[self.trunk_control_idx]
return cfg
@property
def _default_joint_pos(self):
return self.tucked_default_joint_pos if self.default_reset_mode == "tuck" else self.untucked_default_joint_pos
@property
def assisted_grasp_start_points(self):
return {
arm: [
GraspingPoint(link_name="gripper_{}_right_finger_link".format(arm), position=[0.002, 0.0, -0.2]),
GraspingPoint(link_name="gripper_{}_right_finger_link".format(arm), position=[0.002, 0.0, -0.13]),
]
for arm in self.arm_names
}
@property
def assisted_grasp_end_points(self):
return {
arm: [
GraspingPoint(link_name="gripper_{}_left_finger_link".format(arm), position=[-0.002, 0.0, -0.2]),
GraspingPoint(link_name="gripper_{}_left_finger_link".format(arm), position=[-0.002, 0.0, -0.13]),
]
for arm in self.arm_names
}
@property
def base_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to the three controllable 1DoF base joints
"""
joints = list(self.joints.keys())
return np.array(
[
joints.index(f"base_footprint_{component}_joint")
for component in ["x", "y", "rz"]
]
)
@property
def base_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to the six 1DoF base joints
"""
joints = list(self.joints.keys())
return np.array(
[
joints.index(f"base_footprint_{component}_joint")
for component in ["x", "y", "z", "rx", "ry", "rz"]
]
)
@property
def trunk_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to trunk joint.
"""
return np.array([6])
@property
def camera_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
"""
return np.array([9, 12])
@property
def arm_control_idx(self):
return {"left": np.array([7, 10, 13, 15, 17, 19, 21]),
"right": np.array([8, 11, 14, 16, 18, 20, 22]),
"combined": np.array([7, 8, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22])}
@property
def gripper_control_idx(self):
return {"left": np.array([23, 24]), "right": np.array([25, 26])}
@property
def finger_lengths(self):
return {arm: 0.12 for arm in self.arm_names}
@property
def disabled_collision_link_names(self):
# These should NEVER have collisions in the first place (i.e.: these are poorly modeled geoms from the source
# asset) -- they are strictly engulfed within ANOTHER collision mesh from a DIFFERENT link
return [name for arm in self.arm_names for name in [f"arm_{arm}_tool_link", f"wrist_{arm}_ft_link", f"wrist_{arm}_ft_tool_link"]]
@property
def disabled_collision_pairs(self):
return [
["arm_left_1_link", "arm_left_2_link"],
["arm_left_2_link", "arm_left_3_link"],
["arm_left_3_link", "arm_left_4_link"],
["arm_left_4_link", "arm_left_5_link"],
["arm_left_5_link", "arm_left_6_link"],
["arm_left_6_link", "arm_left_7_link"],
["arm_right_1_link", "arm_right_2_link"],
["arm_right_2_link", "arm_right_3_link"],
["arm_right_3_link", "arm_right_4_link"],
["arm_right_4_link", "arm_right_5_link"],
["arm_right_5_link", "arm_right_6_link"],
["arm_right_6_link", "arm_right_7_link"],
["gripper_right_right_finger_link", "gripper_right_left_finger_link"],
["gripper_right_link", "wrist_right_ft_link"],
["arm_right_6_link", "gripper_right_link"],
["arm_right_6_link", "wrist_right_ft_tool_link"],
["arm_right_6_link", "wrist_right_ft_link"],
["arm_right_6_link", "arm_right_tool_link"],
["arm_right_5_link", "wrist_right_ft_link"],
["arm_right_5_link", "arm_right_tool_link"],
["gripper_left_right_finger_link", "gripper_left_left_finger_link"],
["gripper_left_link", "wrist_left_ft_link"],
["arm_left_6_link", "gripper_left_link"],
["arm_left_6_link", "wrist_left_ft_tool_link"],
["arm_left_6_link", "wrist_left_ft_link"],
["arm_left_6_link", "arm_left_tool_link"],
["arm_left_5_link", "wrist_left_ft_link"],
["arm_left_5_link", "arm_left_tool_link"],
["torso_lift_link", "torso_fixed_column_link"],
["torso_fixed_link", "torso_fixed_column_link"],
["base_antenna_left_link", "torso_fixed_link"],
["base_antenna_right_link", "torso_fixed_link"],
["base_link", "wheel_rear_left_link"],
["base_link", "wheel_rear_right_link"],
["base_link", "wheel_front_left_link"],
["base_link", "wheel_front_right_link"],
["base_link", "base_dock_link"],
["base_link", "base_antenna_right_link"],
["base_link", "base_antenna_left_link"],
["base_link", "torso_fixed_column_link"],
["base_link", "suspension_front_left_link"],
["base_link", "suspension_front_right_link"],
["base_link", "torso_fixed_link"],
["suspension_front_left_link", "wheel_front_left_link"],
["torso_lift_link", "arm_right_1_link"],
["torso_lift_link", "arm_right_2_link"],
["torso_lift_link", "arm_left_1_link"],
["torso_lift_link", "arm_left_2_link"],
["arm_left_tool_link", "wrist_left_ft_link"],
["wrist_left_ft_link", "wrist_left_ft_tool_link"],
["wrist_left_ft_tool_link", "gripper_left_link"],
['gripper_left_grasping_frame', 'gripper_left_left_finger_link'],
['gripper_left_grasping_frame', 'gripper_left_right_finger_link'],
['wrist_right_ft_link', 'arm_right_tool_link'],
['wrist_right_ft_tool_link', 'wrist_right_ft_link'],
['gripper_right_link', 'wrist_right_ft_tool_link'],
['head_1_link', 'head_2_link'],
['torso_fixed_column_link', 'arm_right_1_link'],
['torso_fixed_column_link', 'arm_left_1_link'],
['arm_left_1_link', 'arm_left_3_link'],
['arm_right_1_link', 'arm_right_3_link'],
['base_link', 'arm_right_4_link'],
['base_link', 'arm_right_5_link'],
['base_link', 'arm_left_4_link'],
['base_link', 'arm_left_5_link'],
['wrist_left_ft_tool_link', 'arm_left_5_link'],
['wrist_right_ft_tool_link', 'arm_right_5_link'],
['arm_left_tool_link', 'wrist_left_ft_tool_link'],
['arm_right_tool_link', 'wrist_right_ft_tool_link']
]
@property
def manipulation_link_names(self):
return [
"torso_fixed_link",
"torso_lift_link",
"arm_left_1_link",
"arm_left_2_link",
"arm_left_3_link",
"arm_left_4_link",
"arm_left_5_link",
"arm_left_6_link",
"arm_left_7_link",
"arm_left_tool_link",
"wrist_left_ft_link",
"wrist_left_ft_tool_link",
"gripper_left_link",
# "gripper_left_grasping_frame",
"gripper_left_left_finger_link",
"gripper_left_right_finger_link",
"gripper_left_tool_link",
"arm_right_1_link",
"arm_right_2_link",
"arm_right_3_link",
"arm_right_4_link",
"arm_right_5_link",
"arm_right_6_link",
"arm_right_7_link",
"arm_right_tool_link",
"wrist_right_ft_link",
"wrist_right_ft_tool_link",
"gripper_right_link",
# "gripper_right_grasping_frame",
"gripper_right_left_finger_link",
"gripper_right_right_finger_link",
"gripper_right_tool_link",
"head_1_link",
"head_2_link",
"xtion_link",
]
@property
def arm_link_names(self):
return {arm: [f"arm_{arm}_{i}_link" for i in range(1, 8)] for arm in self.arm_names}
@property
def eef_link_names(self):
return {arm: "gripper_{}_grasping_frame".format(arm) for arm in self.arm_names}
@property
def finger_link_names(self):
return {arm: ["gripper_{}_right_finger_link".format(arm), "gripper_{}_left_finger_link".format(arm)] for arm in
self.arm_names}
@property
def finger_joint_names(self):
return {arm: ["gripper_{}_right_finger_joint".format(arm), "gripper_{}_left_finger_joint".format(arm)] for arm
in self.arm_names}
@property
def usd_path(self):
if self._variant == "wrist_cam":
return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_dual_omnidirectional_stanford_33_with_wrist_cam.usd")
# Default variant
return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_dual_omnidirectional_stanford_33.usd")
@property
def simplified_mesh_usd_path(self):
# TODO: How can we make this more general - maybe some automatic way to generate these?
return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_dual_omnidirectional_stanford_33_simplified_collision_mesh.usd")
@property
def robot_arm_descriptor_yamls(self):
# TODO: Remove the need to do this by making the arm descriptor yaml files generated automatically
return {"left": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_left_arm_descriptor.yaml"),
"left_fixed": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_left_arm_fixed_trunk_descriptor.yaml"),
"right": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford_right_arm_fixed_trunk_descriptor.yaml"),
"combined": os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford.yaml")}
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford.urdf")
@property
def arm_workspace_range(self):
return {
"left": [np.deg2rad(15), np.deg2rad(75)],
"right": [np.deg2rad(-75), np.deg2rad(-15)],
}
def get_position_orientation(self):
# TODO: Investigate the need for this custom behavior.
return self.base_footprint_link.get_position_orientation()
def set_position_orientation(self, position=None, orientation=None):
current_position, current_orientation = self.get_position_orientation()
if position is None:
position = current_position
if orientation is None:
orientation = current_orientation
position, orientation = np.array(position), np.array(orientation)
assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
f"{self.name} desired orientation {orientation} is not a unit quaternion."
# TODO: Reconsider the need for this. Why can't these behaviors be unified? Does the joint really need to move?
# If the simulator is playing, set the 6 base joints to achieve the desired pose of base_footprint link frame
if og.sim.is_playing() and self.initialized:
# Find the relative transformation from base_footprint_link ("base_footprint") frame to root_link
# ("base_footprint_x") frame. Assign it to the 6 1DoF joints that control the base.
# Note that the 6 1DoF joints are originated from the root_link ("base_footprint_x") frame.
joint_pos, joint_orn = self.root_link.get_position_orientation()
inv_joint_pos, inv_joint_orn = T.mat2pose(T.pose_inv(T.pose2mat((joint_pos, joint_orn))))
relative_pos, relative_orn = T.pose_transform(inv_joint_pos, inv_joint_orn, position, orientation)
relative_rpy = T.quat2euler(relative_orn)
self.joints["base_footprint_x_joint"].set_pos(relative_pos[0], drive=False)
self.joints["base_footprint_y_joint"].set_pos(relative_pos[1], drive=False)
self.joints["base_footprint_z_joint"].set_pos(relative_pos[2], drive=False)
self.joints["base_footprint_rx_joint"].set_pos(relative_rpy[0], drive=False)
self.joints["base_footprint_ry_joint"].set_pos(relative_rpy[1], drive=False)
self.joints["base_footprint_rz_joint"].set_pos(relative_rpy[2], drive=False)
# Else, set the pose of the robot frame, and then move the joint frame of the world_base_joint to match it
else:
# Call the super() method to move the robot frame first
super().set_position_orientation(position, orientation)
# Move the joint frame for the world_base_joint
if self._world_base_fixed_joint_prim is not None:
self._world_base_fixed_joint_prim.GetAttribute("physics:localPos0").Set(tuple(position))
self._world_base_fixed_joint_prim.GetAttribute("physics:localRot0").Set(lazy.pxr.Gf.Quatf(*orientation[[3, 0, 1, 2]].tolist()))
def set_linear_velocity(self, velocity: np.ndarray):
# Transform the desired linear velocity from the world frame to the root_link ("base_footprint_x") frame
# Note that this will also set the target to be the desired linear velocity (i.e. the robot will try to maintain
# such velocity), which is different from the default behavior of set_linear_velocity for all other objects.
orn = self.root_link.get_orientation()
velocity_in_root_link = T.quat2mat(orn).T @ velocity
self.joints["base_footprint_x_joint"].set_vel(velocity_in_root_link[0], drive=False)
self.joints["base_footprint_y_joint"].set_vel(velocity_in_root_link[1], drive=False)
self.joints["base_footprint_z_joint"].set_vel(velocity_in_root_link[2], drive=False)
def get_linear_velocity(self) -> np.ndarray:
# Note that the link we are interested in is self.base_footprint_link, not self.root_link
return self.base_footprint_link.get_linear_velocity()
def set_angular_velocity(self, velocity: np.ndarray) -> None:
# See comments of self.set_linear_velocity
orn = self.root_link.get_orientation()
velocity_in_root_link = T.quat2mat(orn).T @ velocity
self.joints["base_footprint_rx_joint"].set_vel(velocity_in_root_link[0], drive=False)
self.joints["base_footprint_ry_joint"].set_vel(velocity_in_root_link[1], drive=False)
self.joints["base_footprint_rz_joint"].set_vel(velocity_in_root_link[2], drive=False)
def get_angular_velocity(self) -> np.ndarray:
# Note that the link we are interested in is self.base_footprint_link, not self.root_link
return self.base_footprint_link.get_angular_velocity()
@property
def eef_usd_path(self):
return {arm: os.path.join(gm.ASSET_PATH, "models/tiago/tiago_dual_omnidirectional_stanford/tiago_eef.usd") for arm in self.arm_names}
def teleop_data_to_action(self, teleop_action) -> np.ndarray:
action = ManipulationRobot.teleop_data_to_action(self, teleop_action)
action[self.base_action_idx] = teleop_action.base * 0.1
return action
| 36,634 | Python | 46.701823 | 163 | 0.611345 |
StanfordVL/OmniGibson/omnigibson/robots/active_camera_robot.py | from abc import abstractmethod
import numpy as np
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.utils.python_utils import classproperty
class ActiveCameraRobot(BaseRobot):
"""
Robot that is is equipped with an onboard camera that can be moved independently from the robot's other kinematic
joints (e.g.: independent of base and arm for a mobile manipulator).
NOTE: controller_config should, at the minimum, contain:
camera: controller specifications for the controller to control this robot's camera.
Should include:
- name: Controller to create
- <other kwargs> relevant to the controller being created. Note that all values will have default
values specified, but setting these individual kwargs will override them
"""
def _validate_configuration(self):
# Make sure a camera controller is specified
assert (
"camera" in self._controllers
), "Controller 'camera' must exist in controllers! Current controllers: {}".format(
list(self._controllers.keys())
)
# run super
super()._validate_configuration()
def _get_proprioception_dict(self):
dic = super()._get_proprioception_dict()
# Add camera pos info
joint_positions = self.get_joint_positions(normalized=False)
joint_velocities = self.get_joint_velocities(normalized=False)
dic["camera_qpos"] = joint_positions[self.camera_control_idx]
dic["camera_qpos_sin"] = np.sin(joint_positions[self.camera_control_idx])
dic["camera_qpos_cos"] = np.cos(joint_positions[self.camera_control_idx])
dic["camera_qvel"] = joint_velocities[self.camera_control_idx]
return dic
@property
def default_proprio_obs(self):
obs_keys = super().default_proprio_obs
return obs_keys + ["camera_qpos_sin", "camera_qpos_cos"]
@property
def controller_order(self):
# By default, only camera is supported
return ["camera"]
@property
def _default_controllers(self):
# Always call super first
controllers = super()._default_controllers
# For best generalizability use, joint controller as default
controllers["camera"] = "JointController"
return controllers
@property
def _default_camera_joint_controller_config(self):
"""
Returns:
dict: Default camera joint controller config to control this robot's camera
"""
return {
"name": "JointController",
"control_freq": self._control_freq,
"control_limits": self.control_limits,
"dof_idx": self.camera_control_idx,
"command_output_limits": None,
"motor_type": "position",
"use_delta_commands": True,
"use_impedances": False,
}
@property
def _default_camera_null_joint_controller_config(self):
"""
Returns:
dict: Default null joint controller config to control this robot's camera i.e. dummy controller
"""
return {
"name": "NullJointController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.camera_control_idx,
"default_command": self.reset_joint_pos[self.camera_control_idx],
"use_impedances": False,
}
@property
def _default_controller_config(self):
# Always run super method first
cfg = super()._default_controller_config
# We additionally add in camera default
cfg["camera"] = {
self._default_camera_joint_controller_config["name"]: self._default_camera_joint_controller_config,
self._default_camera_null_joint_controller_config["name"]: self._default_camera_null_joint_controller_config,
}
return cfg
@property
@abstractmethod
def camera_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to camera joints.
"""
raise NotImplementedError
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ActiveCameraRobot")
return classes
| 4,464 | Python | 33.882812 | 121 | 0.62948 |
StanfordVL/OmniGibson/omnigibson/robots/vx300s.py | import os
import numpy as np
from omnigibson.macros import gm
from omnigibson.robots.manipulation_robot import ManipulationRobot, GraspingPoint
from omnigibson.utils.transform_utils import euler2quat
class VX300S(ManipulationRobot):
"""
The VX300-6DOF arm from Trossen Robotics
(https://www.trossenrobotics.com/docs/interbotix_xsarms/specifications/vx300s.html)
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
visual_only=False,
self_collisions=True,
load_config=None,
fixed_base=True,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
# Unique to BaseRobot
obs_modalities="all",
proprio_obs="default",
sensor_config=None,
# Unique to ManipulationRobot
grasping_mode="physical",
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be at the render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
Note that _default_joint_pos are hardcoded & precomputed, and thus should not be modified by the user.
Set this value instead if you want to initialize the robot with a different rese joint position.
obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
corresponds to all modalities being used.
Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
override any values specified from @obs_modalities!
proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception
observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
for valid key choices
sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. This will override any default values specified by this class.
grasping_mode (str): One of {"physical", "assisted", "sticky"}.
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers.
If "sticky", will magnetize any object touching the gripper's fingers.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
self_collisions=self_collisions,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
obs_modalities=obs_modalities,
proprio_obs=proprio_obs,
sensor_config=sensor_config,
grasping_mode=grasping_mode,
**kwargs,
)
@property
def model_name(self):
return "VX300S"
@property
def discrete_action_list(self):
# Not supported for this robot
raise NotImplementedError()
def _create_discrete_action_space(self):
# Fetch does not support discrete actions
raise ValueError("VX300S does not support discrete actions!")
@property
def controller_order(self):
return [f"arm_{self.default_arm}", f"gripper_{self.default_arm}"]
@property
def _default_controllers(self):
controllers = super()._default_controllers
controllers[f"arm_{self.default_arm}"] = "InverseKinematicsController"
controllers[f"gripper_{self.default_arm}"] = "MultiFingerGripperController"
return controllers
@property
def _default_joint_pos(self):
return np.array([0.0, -0.849879, 0.258767, 0.0, 1.2831712, 0.0, 0.057, 0.057])
@property
def finger_lengths(self):
return {self.default_arm: 0.1}
@property
def arm_control_idx(self):
# The first 7 joints
return {self.default_arm: np.arange(6)}
@property
def gripper_control_idx(self):
# The last two joints
return {self.default_arm: np.arange(6, 8)}
@property
def disabled_collision_pairs(self):
return [
["gripper_bar_link", "left_finger_link"],
["gripper_bar_link", "right_finger_link"],
["gripper_bar_link", "gripper_link"],
]
@property
def arm_link_names(self):
return {self.default_arm: [
"base_link",
"shoulder_link",
"upper_arm_link",
"upper_forearm_link",
"lower_forearm_link",
"wrist_link",
"gripper_link",
"gripper_bar_link",
]}
@property
def arm_joint_names(self):
return {self.default_arm: [
"waist",
"shoulder",
"elbow",
"forearm_roll",
"wrist_angle",
"wrist_rotate",
]}
@property
def eef_link_names(self):
return {self.default_arm: "ee_gripper_link"}
@property
def finger_link_names(self):
return {self.default_arm: ["left_finger_link", "right_finger_link"]}
@property
def finger_joint_names(self):
return {self.default_arm: ["left_finger", "right_finger"]}
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/vx300s/vx300s/vx300s.usd")
@property
def robot_arm_descriptor_yamls(self):
return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/vx300s/vx300s_description.yaml")}
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/vx300s/vx300s.urdf")
@property
def eef_usd_path(self):
# return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/vx300s/vx300s_eef.usd")}
raise NotImplementedError
@property
def teleop_rotation_offset(self):
return {self.default_arm: euler2quat([-np.pi, 0, 0])}
@property
def assisted_grasp_start_points(self):
return {self.default_arm: [
GraspingPoint(link_name="right_finger_link", position=[0.0, 0.001, 0.057]),
]}
@property
def assisted_grasp_end_points(self):
return {self.default_arm: [
GraspingPoint(link_name="left_finger_link", position=[0.0, 0.001, 0.057]),
]}
| 9,721 | Python | 40.021097 | 126 | 0.622673 |
StanfordVL/OmniGibson/omnigibson/robots/fetch.py | import os
import numpy as np
from omnigibson.macros import gm
from omnigibson.controllers import ControlType
from omnigibson.robots.active_camera_robot import ActiveCameraRobot
from omnigibson.robots.manipulation_robot import GraspingPoint, ManipulationRobot
from omnigibson.robots.two_wheel_robot import TwoWheelRobot
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.transform_utils import euler2quat
from omnigibson.utils.usd_utils import JointType
log = create_module_logger(module_name=__name__)
DEFAULT_ARM_POSES = {
"vertical",
"diagonal15",
"diagonal30",
"diagonal45",
"horizontal",
}
RESET_JOINT_OPTIONS = {
"tuck",
"untuck",
}
class Fetch(ManipulationRobot, TwoWheelRobot, ActiveCameraRobot):
"""
Fetch Robot
Reference: https://fetchrobotics.com/robotics-platforms/fetch-mobile-manipulator/
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
visual_only=False,
self_collisions=False,
load_config=None,
fixed_base=False,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
# Unique to BaseRobot
obs_modalities="all",
proprio_obs="default",
sensor_config=None,
# Unique to ManipulationRobot
grasping_mode="physical",
disable_grasp_handling=False,
# Unique to Fetch
rigid_trunk=False,
default_trunk_offset=0.365,
default_reset_mode="untuck",
default_arm_pose="vertical",
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be at the render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
Note that _default_joint_pos are hardcoded & precomputed, and thus should not be modified by the user.
Set this value instead if you want to initialize the robot with a different rese joint position.
obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
corresponds to all modalities being used.
Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
override any values specified from @obs_modalities!
proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception
observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
for valid key choices
sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. This will override any default values specified by this class.
grasping_mode (str): One of {"physical", "assisted", "sticky"}.
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers.
If "sticky", will magnetize any object touching the gripper's fingers.
disable_grasp_handling (bool): If True, will disable all grasp handling for this object. This means that
sticky and assisted grasp modes will not work unless the connection/release methodsare manually called.
rigid_trunk (bool) if True, will prevent the trunk from moving during execution.
default_trunk_offset (float): sets the default height of the robot's trunk
default_reset_mode (str): Default reset mode for the robot. Should be one of: {"tuck", "untuck"}
If reset_joint_pos is not None, this will be ignored (since _default_joint_pos won't be used during initialization).
default_arm_pose (str): Default pose for the robot arm. Should be one of:
{"vertical", "diagonal15", "diagonal30", "diagonal45", "horizontal"}
If either reset_joint_pos is not None or default_reset_mode is "tuck", this will be ignored.
Otherwise the reset_joint_pos will be initialized to the precomputed joint positions that represents default_arm_pose.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store args
self.rigid_trunk = rigid_trunk
self.default_trunk_offset = default_trunk_offset
assert_valid_key(key=default_reset_mode, valid_keys=RESET_JOINT_OPTIONS, name="default_reset_mode")
self.default_reset_mode = default_reset_mode
assert_valid_key(key=default_arm_pose, valid_keys=DEFAULT_ARM_POSES, name="default_arm_pose")
self.default_arm_pose = default_arm_pose
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
self_collisions=self_collisions,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
obs_modalities=obs_modalities,
proprio_obs=proprio_obs,
sensor_config=sensor_config,
grasping_mode=grasping_mode,
disable_grasp_handling=disable_grasp_handling,
**kwargs,
)
@property
def model_name(self):
return "Fetch"
@property
def tucked_default_joint_pos(self):
return np.array(
[
0.0,
0.0, # wheels
0.02, # trunk
0.0,
1.1707963267948966,
0.0, # head
1.4707963267948965,
-0.4,
1.6707963267948966,
0.0,
1.5707963267948966,
0.0, # arm
0.05,
0.05, # gripper
]
)
@property
def untucked_default_joint_pos(self):
pos = np.zeros(self.n_joints)
pos[self.base_control_idx] = 0.0
pos[self.trunk_control_idx] = 0.02 + self.default_trunk_offset
pos[self.camera_control_idx] = np.array([0.0, 0.45])
pos[self.gripper_control_idx[self.default_arm]] = np.array([0.05, 0.05]) # open gripper
# Choose arm based on setting
if self.default_arm_pose == "vertical":
pos[self.arm_control_idx[self.default_arm]] = np.array(
[-0.94121, -0.64134, 1.55186, 1.65672, -0.93218, 1.53416, 2.14474]
)
elif self.default_arm_pose == "diagonal15":
pos[self.arm_control_idx[self.default_arm]] = np.array(
[-0.95587, -0.34778, 1.46388, 1.47821, -0.93813, 1.4587, 1.9939]
)
elif self.default_arm_pose == "diagonal30":
pos[self.arm_control_idx[self.default_arm]] = np.array(
[-1.06595, -0.22184, 1.53448, 1.46076, -0.84995, 1.36904, 1.90996]
)
elif self.default_arm_pose == "diagonal45":
pos[self.arm_control_idx[self.default_arm]] = np.array(
[-1.11479, -0.0685, 1.5696, 1.37304, -0.74273, 1.3983, 1.79618]
)
elif self.default_arm_pose == "horizontal":
pos[self.arm_control_idx[self.default_arm]] = np.array(
[-1.43016, 0.20965, 1.86816, 1.77576, -0.27289, 1.31715, 2.01226]
)
else:
raise ValueError("Unknown default arm pose: {}".format(self.default_arm_pose))
return pos
def _post_load(self):
super()._post_load()
# Set the wheels back to using sphere approximations
for wheel_name in ["l_wheel_link", "r_wheel_link"]:
log.warning(
"Fetch wheel links are post-processed to use sphere approximation collision meshes."
"Please ignore any previous errors about these collision meshes.")
wheel_link = self.links[wheel_name]
assert set(wheel_link.collision_meshes) == {"collisions"}, "Wheel link should only have 1 collision!"
wheel_link.collision_meshes["collisions"].set_collision_approximation("boundingSphere")
# Also apply a convex decomposition to the torso lift link
torso_lift_link = self.links["torso_lift_link"]
assert set(torso_lift_link.collision_meshes) == {"collisions"}, "Wheel link should only have 1 collision!"
torso_lift_link.collision_meshes["collisions"].set_collision_approximation("convexDecomposition")
@property
def discrete_action_list(self):
# Not supported for this robot
raise NotImplementedError()
def _create_discrete_action_space(self):
# Fetch does not support discrete actions
raise ValueError("Fetch does not support discrete actions!")
def tuck(self):
"""
Immediately set this robot's configuration to be in tucked mode
"""
self.set_joint_positions(self.tucked_default_joint_pos)
def untuck(self):
"""
Immediately set this robot's configuration to be in untucked mode
"""
self.set_joint_positions(self.untucked_default_joint_pos)
def _initialize(self):
# Run super method first
super()._initialize()
# Set the joint friction for EEF to be higher
for arm in self.arm_names:
for joint in self.finger_joints[arm]:
if joint.joint_type != JointType.JOINT_FIXED:
joint.friction = 500
def _postprocess_control(self, control, control_type):
# Run super method first
u_vec, u_type_vec = super()._postprocess_control(control=control, control_type=control_type)
# Override trunk value if we're keeping the trunk rigid
if self.rigid_trunk:
u_vec[self.trunk_control_idx] = self.untucked_default_joint_pos[self.trunk_control_idx]
u_type_vec[self.trunk_control_idx] = ControlType.POSITION
# Return control
return u_vec, u_type_vec
def _get_proprioception_dict(self):
dic = super()._get_proprioception_dict()
# Add trunk info
joint_positions = self.get_joint_positions(normalized=False)
joint_velocities = self.get_joint_velocities(normalized=False)
dic["trunk_qpos"] = joint_positions[self.trunk_control_idx]
dic["trunk_qvel"] = joint_velocities[self.trunk_control_idx]
return dic
@property
def default_proprio_obs(self):
obs_keys = super().default_proprio_obs
return obs_keys + ["trunk_qpos"]
@property
def controller_order(self):
# Ordered by general robot kinematics chain
return ["base", "camera", "arm_{}".format(self.default_arm), "gripper_{}".format(self.default_arm)]
@property
def _default_controllers(self):
# Always call super first
controllers = super()._default_controllers
# We use multi finger gripper, differential drive, and IK controllers as default
controllers["base"] = "DifferentialDriveController"
controllers["camera"] = "JointController"
controllers["arm_{}".format(self.default_arm)] = "InverseKinematicsController"
controllers["gripper_{}".format(self.default_arm)] = "MultiFingerGripperController"
return controllers
@property
def _default_controller_config(self):
# Grab defaults from super method first
cfg = super()._default_controller_config
# Need to override joint idx being controlled to include trunk in default arm controller configs
for arm_cfg in cfg[f"arm_{self.default_arm}"].values():
arm_control_idx = np.concatenate([self.trunk_control_idx, self.arm_control_idx[self.default_arm]])
arm_cfg["dof_idx"] = arm_control_idx
# Need to modify the default joint positions also if this is a null joint controller
if arm_cfg["name"] == "NullJointController":
arm_cfg["default_command"] = self.reset_joint_pos[arm_control_idx]
# If using rigid trunk, we also clamp its limits
if self.rigid_trunk:
arm_cfg["control_limits"]["position"][0][self.trunk_control_idx] = \
self.untucked_default_joint_pos[self.trunk_control_idx]
arm_cfg["control_limits"]["position"][1][self.trunk_control_idx] = \
self.untucked_default_joint_pos[self.trunk_control_idx]
return cfg
@property
def _default_joint_pos(self):
return self.tucked_default_joint_pos if self.default_reset_mode == "tuck" else self.untucked_default_joint_pos
@property
def wheel_radius(self):
return 0.0613
@property
def wheel_axle_length(self):
return 0.372
@property
def finger_lengths(self):
return {self.default_arm: 0.1}
@property
def assisted_grasp_start_points(self):
return {
self.default_arm: [
GraspingPoint(link_name="r_gripper_finger_link", position=[0.025, -0.012, 0.0]),
GraspingPoint(link_name="r_gripper_finger_link", position=[-0.025, -0.012, 0.0]),
]
}
@property
def assisted_grasp_end_points(self):
return {
self.default_arm: [
GraspingPoint(link_name="l_gripper_finger_link", position=[0.025, 0.012, 0.0]),
GraspingPoint(link_name="l_gripper_finger_link", position=[-0.025, 0.012, 0.0]),
]
}
@property
def base_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([0, 1])
@property
def trunk_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to trunk joint.
"""
return np.array([2])
@property
def camera_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to [tilt, pan] camera joints.
"""
return np.array([3, 5])
@property
def arm_control_idx(self):
return {self.default_arm: np.array([4, 6, 7, 8, 9, 10, 11])}
@property
def gripper_control_idx(self):
return {self.default_arm: np.array([12, 13])}
@property
def disabled_collision_pairs(self):
return [
["torso_lift_link", "shoulder_lift_link"],
["torso_lift_link", "torso_fixed_link"],
["torso_lift_link", "estop_link"],
["base_link", "laser_link"],
["base_link", "torso_fixed_link"],
["base_link", "l_wheel_link"],
["base_link", "r_wheel_link"],
["base_link", "estop_link"],
["torso_lift_link", "shoulder_pan_link"],
["torso_lift_link", "head_pan_link"],
["head_pan_link", "head_tilt_link"],
["shoulder_pan_link", "shoulder_lift_link"],
["shoulder_lift_link", "upperarm_roll_link"],
["upperarm_roll_link", "elbow_flex_link"],
["elbow_flex_link", "forearm_roll_link"],
["forearm_roll_link", "wrist_flex_link"],
["wrist_flex_link", "wrist_roll_link"],
["wrist_roll_link", "gripper_link"],
]
@property
def manipulation_link_names(self):
return [
"torso_lift_link",
"head_pan_link",
"head_tilt_link",
"shoulder_pan_link",
"shoulder_lift_link",
"upperarm_roll_link",
"elbow_flex_link",
"forearm_roll_link",
"wrist_flex_link",
"wrist_roll_link",
"gripper_link",
"l_gripper_finger_link",
"r_gripper_finger_link",
]
@property
def arm_link_names(self):
return {self.default_arm: [
"shoulder_pan_link",
"shoulder_lift_link",
"upperarm_roll_link",
"elbow_flex_link",
"forearm_roll_link",
"wrist_flex_link",
"wrist_roll_link",
]}
@property
def arm_joint_names(self):
return {self.default_arm: [
"torso_lift_joint",
"shoulder_pan_joint",
"shoulder_lift_joint",
"upperarm_roll_joint",
"elbow_flex_joint",
"forearm_roll_joint",
"wrist_flex_joint",
"wrist_roll_joint",
]}
@property
def eef_link_names(self):
return {self.default_arm: "gripper_link"}
@property
def finger_link_names(self):
return {self.default_arm: ["r_gripper_finger_link", "l_gripper_finger_link"]}
@property
def finger_joint_names(self):
return {self.default_arm: ["r_gripper_finger_joint", "l_gripper_finger_joint"]}
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/fetch/fetch/fetch.usd")
@property
def robot_arm_descriptor_yamls(self):
return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/fetch/fetch_descriptor.yaml")}
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/fetch/fetch.urdf")
@property
def arm_workspace_range(self):
return {
self.default_arm : [np.deg2rad(-45), np.deg2rad(45)]
}
@property
def eef_usd_path(self):
return {self.default_arm: os.path.join(gm.ASSET_PATH, "models/fetch/fetch/fetch_eef.usd")}
@property
def teleop_rotation_offset(self):
return {self.default_arm: euler2quat([0, np.pi / 2, np.pi])}
| 20,962 | Python | 40.184676 | 134 | 0.60686 |
StanfordVL/OmniGibson/omnigibson/robots/__init__.py | from omnigibson.robots.active_camera_robot import ActiveCameraRobot
from omnigibson.robots.freight import Freight
from omnigibson.robots.husky import Husky
from omnigibson.robots.locobot import Locobot
from omnigibson.robots.locomotion_robot import LocomotionRobot
from omnigibson.robots.manipulation_robot import ManipulationRobot
from omnigibson.robots.robot_base import REGISTERED_ROBOTS, BaseRobot
from omnigibson.robots.turtlebot import Turtlebot
from omnigibson.robots.fetch import Fetch
from omnigibson.robots.tiago import Tiago
from omnigibson.robots.two_wheel_robot import TwoWheelRobot
from omnigibson.robots.franka import FrankaPanda
from omnigibson.robots.franka_allegro import FrankaAllegro
from omnigibson.robots.franka_leap import FrankaLeap
from omnigibson.robots.vx300s import VX300S
from omnigibson.robots.behavior_robot import BehaviorRobot
| 860 | Python | 49.647056 | 69 | 0.873256 |
StanfordVL/OmniGibson/omnigibson/robots/turtlebot.py | import os
import numpy as np
from omnigibson.macros import gm
from omnigibson.robots.two_wheel_robot import TwoWheelRobot
class Turtlebot(TwoWheelRobot):
"""
Turtlebot robot
Reference: http://wiki.ros.org/Robots/TurtleBot
Uses joint velocity control
"""
@property
def wheel_radius(self):
return 0.038
@property
def wheel_axle_length(self):
return 0.23
@property
def base_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([0, 1])
@property
def _default_joint_pos(self):
return np.zeros(self.n_joints)
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/turtlebot/turtlebot/turtlebot.usd")
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/turtlebot/turtlebot.urdf")
| 963 | Python | 21.95238 | 101 | 0.643821 |
StanfordVL/OmniGibson/omnigibson/robots/manipulation_robot.py | from abc import abstractmethod
from collections import namedtuple
import numpy as np
import networkx as nx
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.controllers import InverseKinematicsController, MultiFingerGripperController, OperationalSpaceController
from omnigibson.macros import gm, create_module_macros
from omnigibson.object_states import ContactBodies
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import (
IsGraspingState,
ControlType,
ManipulationController,
GripperController,
)
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.utils.python_utils import classproperty, assert_valid_key
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function
from omnigibson.utils.constants import JointType, PrimType
from omnigibson.utils.usd_utils import create_joint
from omnigibson.utils.sampling_utils import raytest_batch
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Assisted grasping parameters
m.ASSIST_FRACTION = 1.0
m.ASSIST_GRASP_MASS_THRESHOLD = 10.0
m.ARTICULATED_ASSIST_FRACTION = 0.7
m.MIN_ASSIST_FORCE = 0
m.MAX_ASSIST_FORCE = 100
m.ASSIST_FORCE = m.MIN_ASSIST_FORCE + (m.MAX_ASSIST_FORCE - m.MIN_ASSIST_FORCE) * m.ASSIST_FRACTION
m.CONSTRAINT_VIOLATION_THRESHOLD = 0.1
m.RELEASE_WINDOW = 1 / 30.0 # release window in seconds
AG_MODES = {
"physical",
"assisted",
"sticky",
}
GraspingPoint = namedtuple("GraspingPoint", ["link_name", "position"]) # link_name (str), position (x,y,z tuple)
class ManipulationRobot(BaseRobot):
"""
Robot that is is equipped with grasping (manipulation) capabilities.
Provides common interface for a wide variety of robots.
NOTE: controller_config should, at the minimum, contain:
arm: controller specifications for the controller to control this robot's arm (manipulation).
Should include:
- name: Controller to create
- <other kwargs> relevant to the controller being created. Note that all values will have default
values specified, but setting these individual kwargs will override them
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
self_collisions=False,
load_config=None,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
# Unique to BaseRobot
obs_modalities="all",
proprio_obs="default",
sensor_config=None,
# Unique to ManipulationRobot
grasping_mode="physical",
grasping_direction="lower",
disable_grasp_handling=False,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be at the render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
corresponds to all modalities being used.
Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
override any values specified from @obs_modalities!
proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception
observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
for valid key choices
sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. This will override any default values specified by this class.
grasping_mode (str): One of {"physical", "assisted", "sticky"}.
If "physical", no assistive grasping will be applied (relies on contact friction + finger force).
If "assisted", will magnetize any object touching and within the gripper's fingers. In this mode,
at least two "fingers" need to touch the object.
If "sticky", will magnetize any object touching the gripper's fingers. In this mode, only one finger
needs to touch the object.
grasping_direction (str): One of {"lower", "upper"}. If "lower", lower limit represents a closed grasp,
otherwise upper limit represents a closed grasp.
disable_grasp_handling (bool): If True, the robot will not automatically handle assisted or sticky grasps.
Instead, you will need to call the grasp handling methods yourself.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store relevant internal vars
assert_valid_key(key=grasping_mode, valid_keys=AG_MODES, name="grasping_mode")
assert_valid_key(key=grasping_direction, valid_keys=["lower", "upper"], name="grasping direction")
self._grasping_mode = grasping_mode
self._grasping_direction = grasping_direction
self._disable_grasp_handling = disable_grasp_handling
# Initialize other variables used for assistive grasping
self._ag_freeze_joint_pos = {
arm: {} for arm in self.arm_names
} # Frozen positions for keeping fingers held still
self._ag_obj_in_hand = {arm: None for arm in self.arm_names}
self._ag_obj_constraints = {arm: None for arm in self.arm_names}
self._ag_obj_constraint_params = {arm: {} for arm in self.arm_names}
self._ag_freeze_gripper = {arm: None for arm in self.arm_names}
self._ag_release_counter = {arm: None for arm in self.arm_names}
self._ag_check_in_volume = {arm: None for arm in self.arm_names}
self._ag_calculate_volume = {arm: None for arm in self.arm_names}
# Call super() method
super().__init__(
prim_path=prim_path,
name=name,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
self_collisions=self_collisions,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
obs_modalities=obs_modalities,
proprio_obs=proprio_obs,
sensor_config=sensor_config,
**kwargs,
)
def _validate_configuration(self):
# Iterate over all arms
for arm in self.arm_names:
# We make sure that our arm controller exists and is a manipulation controller
assert (
"arm_{}".format(arm) in self._controllers
), "Controller 'arm_{}' must exist in controllers! Current controllers: {}".format(
arm, list(self._controllers.keys())
)
assert isinstance(
self._controllers["arm_{}".format(arm)], ManipulationController
), "Arm {} controller must be a ManipulationController!".format(arm)
# We make sure that our gripper controller exists and is a gripper controller
assert (
"gripper_{}".format(arm) in self._controllers
), "Controller 'gripper_{}' must exist in controllers! Current controllers: {}".format(
arm, list(self._controllers.keys())
)
assert isinstance(
self._controllers["gripper_{}".format(arm)], GripperController
), "Gripper {} controller must be a GripperController!".format(arm)
# run super
super()._validate_configuration()
def _initialize(self):
super()._initialize()
if gm.AG_CLOTH:
for arm in self.arm_names:
self._ag_check_in_volume[arm], self._ag_calculate_volume[arm] = \
generate_points_in_volume_checker_function(obj=self, volume_link=self.eef_links[arm], mesh_name_prefixes="container")
def is_grasping(self, arm="default", candidate_obj=None):
"""
Returns True if the robot is grasping the target option @candidate_obj or any object if @candidate_obj is None.
Args:
arm (str): specific arm to check for grasping. Default is "default" which corresponds to the first entry
in self.arm_names
candidate_obj (StatefulObject or None): object to check if this robot is currently grasping. If None, then
will be a general (object-agnostic) check for grasping.
Note: if self.grasping_mode is "physical", then @candidate_obj will be ignored completely
Returns:
IsGraspingState: For the specific manipulator appendage, returns IsGraspingState.TRUE if it is grasping
(potentially @candidate_obj if specified), IsGraspingState.FALSE if it is not grasping,
and IsGraspingState.UNKNOWN if unknown.
"""
arm = self.default_arm if arm == "default" else arm
if self.grasping_mode != "physical":
is_grasping_obj = (
self._ag_obj_in_hand[arm] is not None
if candidate_obj is None
else self._ag_obj_in_hand[arm] == candidate_obj
)
is_grasping = (
IsGraspingState.TRUE
if is_grasping_obj and self._ag_release_counter[arm] is None
else IsGraspingState.FALSE
)
else:
# Infer from the gripper controller the state
is_grasping = self._controllers["gripper_{}".format(arm)].is_grasping()
# If candidate obj is not None, we also check to see if our fingers are in contact with the object
if is_grasping == IsGraspingState.TRUE and candidate_obj is not None:
finger_links = {link for link in self.finger_links[arm]}
is_grasping = len(candidate_obj.states[ContactBodies].get_value().intersection(finger_links)) > 0
return is_grasping
def _find_gripper_contacts(self, arm="default", return_contact_positions=False):
"""
For arm @arm, calculate any body IDs and corresponding link IDs that are not part of the robot
itself that are in contact with any of this arm's gripper's fingers
Args:
arm (str): specific arm whose gripper will be checked for contact. Default is "default" which
corresponds to the first entry in self.arm_names
return_contact_positions (bool): if True, will additionally return the contact (x,y,z) position
Returns:
2-tuple:
- set: set of unique contact prim_paths that are not the robot self-collisions.
If @return_contact_positions is True, then returns (prim_path, pos), where pos is the contact
(x,y,z) position
Note: if no objects that are not the robot itself are intersecting, the set will be empty.
- dict: dictionary mapping unique contact objects defined by the contact prim_path to
set of unique robot link prim_paths that it is in contact with
"""
arm = self.default_arm if arm == "default" else arm
robot_contact_links = dict()
contact_data = set()
# Find all objects in contact with all finger joints for this arm
con_results = [con for link in self.finger_links[arm] for con in link.contact_list()]
# Get robot contact links
link_paths = set(self.link_prim_paths)
for con_res in con_results:
# Only add this contact if it's not a robot self-collision
other_contact_set = {con_res.body0, con_res.body1} - link_paths
if len(other_contact_set) == 1:
link_contact, other_contact = (con_res.body0, con_res.body1) if \
list(other_contact_set)[0] == con_res.body1 else (con_res.body1, con_res.body0)
# Add to contact data
contact_data.add((other_contact, tuple(con_res.position)) if return_contact_positions else other_contact)
# Also add robot contact link info
if other_contact not in robot_contact_links:
robot_contact_links[other_contact] = set()
robot_contact_links[other_contact].add(link_contact)
return contact_data, robot_contact_links
def set_position_orientation(self, position=None, orientation=None):
# Store the original EEF poses.
original_poses = {}
for arm in self.arm_names:
original_poses[arm] = (self.get_eef_position(arm), self.get_eef_orientation(arm))
# Run the super method
super().set_position_orientation(position=position, orientation=orientation)
# Now for each hand, if it was holding an AG object, teleport it.
for arm in self.arm_names:
if self._ag_obj_in_hand[arm] is not None:
original_eef_pose = T.pose2mat(original_poses[arm])
inv_original_eef_pose = T.pose_inv(pose_mat=original_eef_pose)
original_obj_pose = T.pose2mat(self._ag_obj_in_hand[arm].get_position_orientation())
new_eef_pose = T.pose2mat((self.get_eef_position(arm), self.get_eef_orientation(arm)))
# New object pose is transform:
# original --> "De"transform the original EEF pose --> "Re"transform the new EEF pose
new_obj_pose = new_eef_pose @ inv_original_eef_pose @ original_obj_pose
self._ag_obj_in_hand[arm].set_position_orientation(*T.mat2pose(hmat=new_obj_pose))
def deploy_control(self, control, control_type, indices=None, normalized=False):
# We intercept the gripper control and replace it with the current joint position if we're freezing our gripper
for arm in self.arm_names:
if self._ag_freeze_gripper[arm]:
control[self.gripper_control_idx[arm]] = self._ag_obj_constraint_params[arm]["gripper_pos"] if \
self.controllers[f"gripper_{arm}"].control_type == ControlType.POSITION else 0.0
super().deploy_control(control=control, control_type=control_type, indices=indices, normalized=normalized)
# Then run assisted grasping
if self.grasping_mode != "physical" and not self._disable_grasp_handling:
self._handle_assisted_grasping()
# Potentially freeze gripper joints
for arm in self.arm_names:
if self._ag_freeze_gripper[arm]:
self._freeze_gripper(arm)
def _release_grasp(self, arm="default"):
"""
Magic action to release this robot's grasp on an object
Args:
arm (str): specific arm whose grasp will be released.
Default is "default" which corresponds to the first entry in self.arm_names
"""
arm = self.default_arm if arm == "default" else arm
# Remove joint and filtered collision restraints
og.sim.stage.RemovePrim(self._ag_obj_constraint_params[arm]["ag_joint_prim_path"])
self._ag_obj_constraints[arm] = None
self._ag_obj_constraint_params[arm] = {}
self._ag_freeze_gripper[arm] = False
self._ag_release_counter[arm] = 0
def release_grasp_immediately(self):
"""
Magic action to release this robot's grasp for all arms at once.
As opposed to @_release_grasp, this method would byupass the release window mechanism and immediately release.
"""
for arm in self.arm_names:
if self._ag_obj_in_hand[arm] is not None:
self._release_grasp(arm=arm)
self._ag_release_counter[arm] = int(np.ceil(m.RELEASE_WINDOW / og.sim.get_rendering_dt()))
self._handle_release_window(arm=arm)
assert not self._ag_obj_in_hand[arm], "Object still in ag list after release!"
# TODO: Verify not needed!
# for finger_link in self.finger_links[arm]:
# finger_link.remove_filtered_collision_pair(prim=self._ag_obj_in_hand[arm])
def get_control_dict(self):
# In addition to super method, add in EEF states
fcns = super().get_control_dict()
for arm in self.arm_names:
self._add_arm_control_dict(fcns=fcns, arm=arm)
return fcns
def _add_arm_control_dict(self, fcns, arm):
"""
Internally helper function to generate per-arm control dictionary entries. Needed because otherwise generated
functions inadvertently point to the same arm, if directly iterated in a for loop!
Args:
fcns (CachedFunctions): Keyword-mapped control values for this object, mapping names to n-arrays.
arm (str): specific arm to generate necessary control dict entries for
"""
fcns[f"_eef_{arm}_pos_quat_relative"] = lambda: self.get_relative_eef_pose(arm)
fcns[f"eef_{arm}_pos_relative"] = lambda: fcns[f"_eef_{arm}_pos_quat_relative"][0]
fcns[f"eef_{arm}_quat_relative"] = lambda: fcns[f"_eef_{arm}_pos_quat_relative"][1]
fcns[f"eef_{arm}_lin_vel_relative"] = lambda: self.get_relative_eef_lin_vel(arm)
fcns[f"eef_{arm}_ang_vel_relative"] = lambda: self.get_relative_eef_ang_vel(arm)
# -n_joints because there may be an additional 6 entries at the beginning of the array, if this robot does
# not have a fixed base (i.e.: the 6DOF --> "floating" joint)
# see self.get_relative_jacobian() for more info
eef_link_idx = self._articulation_view.get_body_index(self.eef_links[arm].body_name)
fcns[f"eef_{arm}_jacobian_relative"] = lambda: self.get_relative_jacobian(clone=False)[eef_link_idx, :, -self.n_joints:]
def _get_proprioception_dict(self):
dic = super()._get_proprioception_dict()
# Loop over all arms to grab proprio info
joint_positions = self.get_joint_positions(normalized=False)
joint_velocities = self.get_joint_velocities(normalized=False)
for arm in self.arm_names:
# Add arm info
dic["arm_{}_qpos".format(arm)] = joint_positions[self.arm_control_idx[arm]]
dic["arm_{}_qpos_sin".format(arm)] = np.sin(joint_positions[self.arm_control_idx[arm]])
dic["arm_{}_qpos_cos".format(arm)] = np.cos(joint_positions[self.arm_control_idx[arm]])
dic["arm_{}_qvel".format(arm)] = joint_velocities[self.arm_control_idx[arm]]
# Add eef and grasping info
dic["eef_{}_pos_global".format(arm)] = self.get_eef_position(arm)
dic["eef_{}_quat_global".format(arm)] = self.get_eef_orientation(arm)
dic["eef_{}_pos".format(arm)] = self.get_relative_eef_position(arm)
dic["eef_{}_quat".format(arm)] = self.get_relative_eef_orientation(arm)
dic["grasp_{}".format(arm)] = np.array([self.is_grasping(arm)])
dic["gripper_{}_qpos".format(arm)] = joint_positions[self.gripper_control_idx[arm]]
dic["gripper_{}_qvel".format(arm)] = joint_velocities[self.gripper_control_idx[arm]]
return dic
@property
def default_proprio_obs(self):
obs_keys = super().default_proprio_obs
for arm in self.arm_names:
obs_keys += [
"arm_{}_qpos_sin".format(arm),
"arm_{}_qpos_cos".format(arm),
"eef_{}_pos".format(arm),
"eef_{}_quat".format(arm),
"gripper_{}_qpos".format(arm),
"grasp_{}".format(arm),
]
return obs_keys
@property
def grasping_mode(self):
"""
Grasping mode of this robot. Is one of AG_MODES
Returns:
str: Grasping mode for this robot
"""
return self._grasping_mode
@property
def controller_order(self):
# Assumes we have arm(s) and corresponding gripper(s)
controllers = []
for arm in self.arm_names:
controllers += ["arm_{}".format(arm), "gripper_{}".format(arm)]
return controllers
@property
def _default_controllers(self):
# Always call super first
controllers = super()._default_controllers
# For best generalizability use, joint controller as default
for arm in self.arm_names:
controllers["arm_{}".format(arm)] = "JointController"
controllers["gripper_{}".format(arm)] = "JointController"
return controllers
@property
def n_arms(self):
"""
Returns:
int: Number of arms this robot has. Returns 1 by default
"""
return 1
@property
def arm_names(self):
"""
Returns:
list of str: List of arm names for this robot. Should correspond to the keys used to index into
arm- and gripper-related dictionaries, e.g.: eef_link_names, finger_link_names, etc.
Default is string enumeration based on @self.n_arms.
"""
return [str(i) for i in range(self.n_arms)]
@property
def default_arm(self):
"""
Returns:
str: Default arm name for this robot, corresponds to the first entry in @arm_names by default
"""
return self.arm_names[0]
@property
def arm_action_idx(self):
arm_action_idx = {}
for arm_name in self.arm_names:
controller_idx = self.controller_order.index(f"arm_{arm_name}")
action_start_idx = sum([self.controllers[self.controller_order[i]].command_dim for i in range(controller_idx)])
arm_action_idx[arm_name] = np.arange(action_start_idx, action_start_idx + self.controllers[f"arm_{arm_name}"].command_dim)
return arm_action_idx
@property
def gripper_action_idx(self):
gripper_action_idx = {}
for arm_name in self.arm_names:
controller_idx = self.controller_order.index(f"gripper_{arm_name}")
action_start_idx = sum([self.controllers[self.controller_order[i]].command_dim for i in range(controller_idx)])
gripper_action_idx[arm_name] = np.arange(action_start_idx, action_start_idx + self.controllers[f"gripper_{arm_name}"].command_dim)
return gripper_action_idx
@property
@abstractmethod
def arm_link_names(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to corresponding arm link names,
should correspond to specific link names in this robot's underlying model file
"""
raise NotImplementedError
@property
@abstractmethod
def arm_joint_names(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to corresponding arm joint names,
should correspond to specific joint names in this robot's underlying model file
"""
raise NotImplementedError
@property
@abstractmethod
def eef_link_names(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to corresponding name of the EEF link,
should correspond to specific link name in this robot's underlying model file
"""
raise NotImplementedError
@property
@abstractmethod
def finger_link_names(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to array of link names corresponding to
this robot's fingers
"""
raise NotImplementedError
@property
@abstractmethod
def finger_joint_names(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to array of joint names corresponding to
this robot's fingers
"""
raise NotImplementedError
@property
@abstractmethod
def arm_control_idx(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to indices in low-level control
vector corresponding to arm joints.
"""
raise NotImplementedError
@property
@abstractmethod
def gripper_control_idx(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to indices in low-level control
vector corresponding to gripper joints.
"""
raise NotImplementedError
@property
def arm_links(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to robot links corresponding to
that arm's links
"""
return {arm: [self._links[link] for link in self.arm_link_names[arm]] for arm in self.arm_names}
@property
def eef_links(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to robot link corresponding to that arm's
eef link
"""
return {arm: self._links[self.eef_link_names[arm]] for arm in self.arm_names}
@property
def finger_links(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to robot links corresponding to
that arm's finger links
"""
return {arm: [self._links[link] for link in self.finger_link_names[arm]] for arm in self.arm_names}
@property
def finger_joints(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to robot joints corresponding to
that arm's finger joints
"""
return {arm: [self._joints[joint] for joint in self.finger_joint_names[arm]] for arm in self.arm_names}
@property
def assisted_grasp_start_points(self):
"""
Returns:
dict: Dictionary mapping individual arm appendage names to array of GraspingPoint tuples,
composed of (link_name, position) values specifying valid grasping start points located at
cartesian (x,y,z) coordinates specified in link_name's local coordinate frame.
These values will be used in conjunction with
@self.assisted_grasp_end_points to trigger assisted grasps, where objects that intersect
with any ray starting at any point in @self.assisted_grasp_start_points and terminating at any point in
@self.assisted_grasp_end_points will trigger an assisted grasp (calculated individually for each gripper
appendage). By default, each entry returns None, and must be implemented by any robot subclass that
wishes to use assisted grasping.
"""
return {arm: None for arm in self.arm_names}
@property
def assisted_grasp_end_points(self):
"""
Returns:
dict: Dictionary mapping individual arm appendage names to array of GraspingPoint tuples,
composed of (link_name, position) values specifying valid grasping end points located at
cartesian (x,y,z) coordinates specified in link_name's local coordinate frame.
These values will be used in conjunction with
@self.assisted_grasp_start_points to trigger assisted grasps, where objects that intersect
with any ray starting at any point in @self.assisted_grasp_start_points and terminating at any point in
@self.assisted_grasp_end_points will trigger an assisted grasp (calculated individually for each gripper
appendage). By default, each entry returns None, and must be implemented by any robot subclass that
wishes to use assisted grasping.
"""
return {arm: None for arm in self.arm_names}
@property
def finger_lengths(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to corresponding length of the fingers in that
hand defined from the palm (assuming all fingers in one hand are equally long)
"""
raise NotImplementedError
@property
def arm_workspace_range(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to a tuple indicating the start and end of the
angular range of the arm workspace around the Z axis of the robot, where 0 is facing
forward.
"""
raise NotImplementedError
def get_eef_position(self, arm="default"):
"""
Args:
arm (str): specific arm to grab eef position. Default is "default" which corresponds to the first entry
in self.arm_names
Returns:
3-array: (x,y,z) global end-effector Cartesian position for this robot's end-effector corresponding
to arm @arm
"""
arm = self.default_arm if arm == "default" else arm
return self._links[self.eef_link_names[arm]].get_position()
def get_eef_orientation(self, arm="default"):
"""
Args:
arm (str): specific arm to grab eef orientation. Default is "default" which corresponds to the first entry
in self.arm_names
Returns:
3-array: (x,y,z,w) global quaternion orientation for this robot's end-effector corresponding
to arm @arm
"""
arm = self.default_arm if arm == "default" else arm
return self._links[self.eef_link_names[arm]].get_orientation()
def get_relative_eef_pose(self, arm="default", mat=False):
"""
Args:
arm (str): specific arm to grab eef pose. Default is "default" which corresponds to the first entry
in self.arm_names
mat (bool): whether to return pose in matrix form (mat=True) or (pos, quat) tuple (mat=False)
Returns:
2-tuple or (4, 4)-array: End-effector pose, either in 4x4 homogeneous
matrix form (if @mat=True) or (pos, quat) tuple (if @mat=False), corresponding to arm @arm
"""
arm = self.default_arm if arm == "default" else arm
eef_link_pose = self.eef_links[arm].get_position_orientation()
base_link_pose = self.get_position_orientation()
pose = T.relative_pose_transform(*eef_link_pose, *base_link_pose)
return T.pose2mat(pose) if mat else pose
def get_relative_eef_position(self, arm="default"):
"""
Args:
arm (str): specific arm to grab relative eef pos.
Default is "default" which corresponds to the first entry in self.arm_names
Returns:
3-array: (x,y,z) Cartesian position of end-effector relative to robot base frame
"""
arm = self.default_arm if arm == "default" else arm
return self.get_relative_eef_pose(arm=arm)[0]
def get_relative_eef_orientation(self, arm="default"):
"""
Args:
arm (str): specific arm to grab relative eef orientation.
Default is "default" which corresponds to the first entry in self.arm_names
Returns:
4-array: (x,y,z,w) quaternion orientation of end-effector relative to robot base frame
"""
arm = self.default_arm if arm == "default" else arm
return self.get_relative_eef_pose(arm=arm)[1]
def get_relative_eef_lin_vel(self, arm="default"):
"""
Args:
arm (str): specific arm to grab relative eef linear velocity.
Default is "default" which corresponds to the first entry in self.arm_names
Returns:
3-array: (x,y,z) Linear velocity of end-effector relative to robot base frame
"""
arm = self.default_arm if arm == "default" else arm
base_link_quat = self.get_orientation()
return T.quat2mat(base_link_quat).T @ self.eef_links[arm].get_linear_velocity()
def get_relative_eef_ang_vel(self, arm="default"):
"""
Args:
arm (str): specific arm to grab relative eef angular velocity.
Default is "default" which corresponds to the first entry in self.arm_names
Returns:
3-array: (ax,ay,az) angular velocity of end-effector relative to robot base frame
"""
arm = self.default_arm if arm == "default" else arm
base_link_quat = self.get_orientation()
return T.quat2mat(base_link_quat).T @ self.eef_links[arm].get_angular_velocity()
def _calculate_in_hand_object_rigid(self, arm="default"):
"""
Calculates which object to assisted-grasp for arm @arm. Returns an (object_id, link_id) tuple or None
if no valid AG-enabled object can be found.
Args:
arm (str): specific arm to calculate in-hand object for.
Default is "default" which corresponds to the first entry in self.arm_names
Returns:
None or 2-tuple: If a valid assisted-grasp object is found, returns the corresponding
(object, object_link) (i.e.: (BaseObject, RigidPrim)) pair to the contacted in-hand object.
Otherwise, returns None
"""
arm = self.default_arm if arm == "default" else arm
# If we're not using physical grasping, we check for gripper contact
if self.grasping_mode != "physical":
candidates_set, robot_contact_links = self._find_gripper_contacts(arm=arm)
# If we're using assisted grasping, we further filter candidates via ray-casting
if self.grasping_mode == "assisted":
candidates_set_raycast = self._find_gripper_raycast_collisions(arm=arm)
candidates_set = candidates_set.intersection(candidates_set_raycast)
else:
raise ValueError("Invalid grasping mode for calculating in hand object: {}".format(self.grasping_mode))
# Immediately return if there are no valid candidates
if len(candidates_set) == 0:
return None
# Find the closest object to the gripper center
gripper_center_pos = self.eef_links[arm].get_position()
candidate_data = []
for prim_path in candidates_set:
# Calculate position of the object link. Only allow this for objects currently.
obj_prim_path, link_name = prim_path.rsplit("/", 1)
candidate_obj = og.sim.scene.object_registry("prim_path", obj_prim_path, None)
if candidate_obj is None or link_name not in candidate_obj.links:
continue
candidate_link = candidate_obj.links[link_name]
dist = np.linalg.norm(np.array(candidate_link.get_position()) - np.array(gripper_center_pos))
candidate_data.append((prim_path, dist))
if not candidate_data:
return None
candidate_data = sorted(candidate_data, key=lambda x: x[-1])
ag_prim_path, _ = candidate_data[0]
# Make sure the ag_prim_path is not a self collision
assert ag_prim_path not in self.link_prim_paths, "assisted grasp object cannot be the robot itself!"
# Make sure at least two fingers are in contact with this object
robot_contacts = robot_contact_links[ag_prim_path]
touching_at_least_two_fingers = True if self.grasping_mode == "sticky" else len({link.prim_path for link in self.finger_links[arm]}.intersection(robot_contacts)) >= 2
# TODO: Better heuristic, hacky, we assume the parent object prim path is the prim_path minus the last "/" item
ag_obj_prim_path = "/".join(ag_prim_path.split("/")[:-1])
ag_obj_link_name = ag_prim_path.split("/")[-1]
ag_obj = og.sim.scene.object_registry("prim_path", ag_obj_prim_path)
# Return None if object cannot be assisted grasped or not touching at least two fingers
if ag_obj is None or not touching_at_least_two_fingers:
return None
# Get object and its contacted link
return ag_obj, ag_obj.links[ag_obj_link_name]
def _find_gripper_raycast_collisions(self, arm="default"):
"""
For arm @arm, calculate any prims that are not part of the robot
itself that intersect with rays cast between any of the gripper's start and end points
Args:
arm (str): specific arm whose gripper will be checked for raycast collisions. Default is "default"
which corresponds to the first entry in self.arm_names
Returns:
set[str]: set of prim path of detected raycast intersections that
are not the robot itself. Note: if no objects that are not the robot itself are intersecting,
the set will be empty.
"""
arm = self.default_arm if arm == "default" else arm
# First, make sure start and end grasp points exist (i.e.: aren't None)
assert (
self.assisted_grasp_start_points[arm] is not None
), "In order to use assisted grasping, assisted_grasp_start_points must not be None!"
assert (
self.assisted_grasp_end_points[arm] is not None
), "In order to use assisted grasping, assisted_grasp_end_points must not be None!"
# Iterate over all start and end grasp points and calculate their x,y,z positions in the world frame
# (per arm appendage)
# Since we'll be calculating the cartesian cross product between start and end points, we stack the start points
# by the number of end points and repeat the individual elements of the end points by the number of start points
startpoints = []
endpoints = []
for grasp_start_point in self.assisted_grasp_start_points[arm]:
# Get world coordinates of link base frame
link_pos, link_orn = self.links[grasp_start_point.link_name].get_position_orientation()
# Calculate grasp start point in world frame and add to startpoints
start_point, _ = T.pose_transform(link_pos, link_orn, grasp_start_point.position, [0, 0, 0, 1])
startpoints.append(start_point)
# Repeat for end points
for grasp_end_point in self.assisted_grasp_end_points[arm]:
# Get world coordinates of link base frame
link_pos, link_orn = self.links[grasp_end_point.link_name].get_position_orientation()
# Calculate grasp start point in world frame and add to endpoints
end_point, _ = T.pose_transform(link_pos, link_orn, grasp_end_point.position, [0, 0, 0, 1])
endpoints.append(end_point)
# Stack the start points and repeat the end points, and add these values to the raycast dicts
n_startpoints, n_endpoints = len(startpoints), len(endpoints)
raycast_startpoints = startpoints * n_endpoints
raycast_endpoints = []
for endpoint in endpoints:
raycast_endpoints += [endpoint] * n_startpoints
ray_data = set()
# Calculate raycasts from each start point to end point -- this is n_startpoints * n_endpoints total rays
for result in raytest_batch(raycast_startpoints, raycast_endpoints, only_closest=True):
if result["hit"]:
# filter out self body parts (we currently assume that the robot cannot grasp itself)
if self.prim_path not in result["rigidBody"]:
ray_data.add(result["rigidBody"])
return ray_data
def _handle_release_window(self, arm="default"):
"""
Handles releasing an object from arm @arm
Args:
arm (str): specific arm to handle release window.
Default is "default" which corresponds to the first entry in self.arm_names
"""
arm = self.default_arm if arm == "default" else arm
self._ag_release_counter[arm] += 1
time_since_release = self._ag_release_counter[arm] * og.sim.get_rendering_dt()
if time_since_release >= m.RELEASE_WINDOW:
self._ag_obj_in_hand[arm] = None
self._ag_release_counter[arm] = None
def _freeze_gripper(self, arm="default"):
"""
Freezes gripper finger joints - used in assisted grasping.
Args:
arm (str): specific arm to freeze gripper.
Default is "default" which corresponds to the first entry in self.arm_names
"""
arm = self.default_arm if arm == "default" else arm
for joint_name, j_val in self._ag_freeze_joint_pos[arm].items():
joint = self._joints[joint_name]
joint.set_pos(pos=j_val)
joint.set_vel(vel=0.0)
@property
def robot_arm_descriptor_yamls(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to files path to the descriptor
of the robot for IK Controller.
"""
raise NotImplementedError
@property
def _default_arm_joint_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default controller config to control that
robot's arm. Uses velocity control by default.
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "JointController",
"control_freq": self._control_freq,
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": None,
"motor_type": "position",
"use_delta_commands": True,
"use_impedances": True,
}
return dic
@property
def _default_arm_ik_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default controller config for an
Inverse kinematics controller to control this robot's arm
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "InverseKinematicsController",
"task_name": f"eef_{arm}",
"robot_description_path": self.robot_arm_descriptor_yamls[arm],
"robot_urdf_path": self.urdf_path,
"eef_name": self.eef_link_names[arm],
"control_freq": self._control_freq,
"reset_joint_pos": self.reset_joint_pos,
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": (
np.array([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
np.array([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
),
"mode": "pose_delta_ori",
"smoothing_filter_size": 2,
"workspace_pose_limiter": None,
}
return dic
@property
def _default_arm_osc_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default controller config for an
operational space controller to control this robot's arm
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "OperationalSpaceController",
"task_name": f"eef_{arm}",
"control_freq": self._control_freq,
"reset_joint_pos": self.reset_joint_pos,
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_output_limits": (
np.array([-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]),
np.array([0.2, 0.2, 0.2, 0.5, 0.5, 0.5]),
),
"mode": "pose_delta_ori",
"workspace_pose_limiter": None,
}
return dic
@property
def _default_arm_null_joint_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default arm null controller config
to control this robot's arm i.e. dummy controller
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "NullJointController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"default_command": self.reset_joint_pos[self.arm_control_idx[arm]],
"use_impedances": False,
}
return dic
@property
def _default_gripper_multi_finger_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default controller config to control
this robot's multi finger gripper. Assumes robot gripper idx has exactly two elements
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "MultiFingerGripperController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.gripper_control_idx[arm],
"command_output_limits": "default",
"mode": "binary",
"limit_tolerance": 0.001,
}
return dic
@property
def _default_gripper_joint_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default gripper joint controller config
to control this robot's gripper
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "JointController",
"control_freq": self._control_freq,
"motor_type": "velocity",
"control_limits": self.control_limits,
"dof_idx": self.gripper_control_idx[arm],
"command_output_limits": "default",
"use_delta_commands": False,
"use_impedances": False,
}
return dic
@property
def _default_gripper_null_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default gripper null controller config
to control this robot's (non-prehensile) gripper i.e. dummy controller
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "NullJointController",
"control_freq": self._control_freq,
"motor_type": "velocity",
"control_limits": self.control_limits,
"dof_idx": self.gripper_control_idx[arm],
"default_command": np.zeros(len(self.gripper_control_idx[arm])),
"use_impedances": False,
}
return dic
@property
def _default_controller_config(self):
# Always run super method first
cfg = super()._default_controller_config
arm_ik_configs = self._default_arm_ik_controller_configs
arm_osc_configs = self._default_arm_osc_controller_configs
arm_joint_configs = self._default_arm_joint_controller_configs
arm_null_joint_configs = self._default_arm_null_joint_controller_configs
gripper_pj_configs = self._default_gripper_multi_finger_controller_configs
gripper_joint_configs = self._default_gripper_joint_controller_configs
gripper_null_configs = self._default_gripper_null_controller_configs
# Add arm and gripper defaults, per arm
for arm in self.arm_names:
cfg["arm_{}".format(arm)] = {
arm_ik_configs[arm]["name"]: arm_ik_configs[arm],
arm_osc_configs[arm]["name"]: arm_osc_configs[arm],
arm_joint_configs[arm]["name"]: arm_joint_configs[arm],
arm_null_joint_configs[arm]["name"]: arm_null_joint_configs[arm],
}
cfg["gripper_{}".format(arm)] = {
gripper_pj_configs[arm]["name"]: gripper_pj_configs[arm],
gripper_joint_configs[arm]["name"]: gripper_joint_configs[arm],
gripper_null_configs[arm]["name"]: gripper_null_configs[arm],
}
return cfg
def _get_assisted_grasp_joint_type(self, ag_obj, ag_link):
"""
Check whether an object @obj can be grasped. If so, return the joint type to use for assisted grasping.
Otherwise, return None.
Args:
ag_obj (BaseObject): Object targeted for an assisted grasp
ag_link (RigidPrim): Link of the object to be grasped
Returns:
(None or str): If obj can be grasped, returns the joint type to use for assisted grasping.
"""
# Deny objects that are too heavy and are not a non-base link of a fixed-base object)
mass = ag_link.mass
if mass > m.ASSIST_GRASP_MASS_THRESHOLD and not (ag_obj.fixed_base and ag_link != ag_obj.root_link):
return None
# Otherwise, compute the joint type. We use a fixed joint unless the link is a non-fixed link.
# A link is non-fixed if it has any non-fixed parent joints.
joint_type = "FixedJoint"
for edge in nx.edge_dfs(ag_obj.articulation_tree, ag_link.body_name, orientation="reverse"):
joint = ag_obj.articulation_tree.edges[edge]["joint"]
if joint.joint_type != JointType.JOINT_FIXED:
joint_type = "SphericalJoint"
break
return joint_type
def _establish_grasp_rigid(self, arm="default", ag_data=None, contact_pos=None):
"""
Establishes an ag-assisted grasp, if enabled.
Args:
arm (str): specific arm to establish grasp.
Default is "default" which corresponds to the first entry in self.arm_names
ag_data (None or 2-tuple): if specified, assisted-grasp object, link tuple (i.e. :(BaseObject, RigidPrim)).
Otherwise, does a no-op
contact_pos (None or np.array): if specified, contact position to use for grasp.
"""
arm = self.default_arm if arm == "default" else arm
# Return immediately if ag_data is None
if ag_data is None:
return
ag_obj, ag_link = ag_data
# Get the appropriate joint type
joint_type = self._get_assisted_grasp_joint_type(ag_obj, ag_link)
if joint_type is None:
return
if contact_pos is None:
force_data, _ = self._find_gripper_contacts(arm=arm, return_contact_positions=True)
for c_link_prim_path, c_contact_pos in force_data:
if c_link_prim_path == ag_link.prim_path:
contact_pos = np.array(c_contact_pos)
break
assert contact_pos is not None
# Joint frame set at the contact point
# Need to find distance between robot and contact point in robot link's local frame and
# ag link and contact point in ag link's local frame
joint_frame_pos = contact_pos
joint_frame_orn = np.array([0, 0, 0, 1.0])
eef_link_pos, eef_link_orn = self.eef_links[arm].get_position_orientation()
parent_frame_pos, parent_frame_orn = T.relative_pose_transform(joint_frame_pos, joint_frame_orn, eef_link_pos, eef_link_orn)
obj_link_pos, obj_link_orn = ag_link.get_position_orientation()
child_frame_pos, child_frame_orn = T.relative_pose_transform(joint_frame_pos, joint_frame_orn, obj_link_pos, obj_link_orn)
# Create the joint
joint_prim_path = f"{self.eef_links[arm].prim_path}/ag_constraint"
joint_prim = create_joint(
prim_path=joint_prim_path,
joint_type=joint_type,
body0=self.eef_links[arm].prim_path,
body1=ag_link.prim_path,
enabled=True,
joint_frame_in_parent_frame_pos=parent_frame_pos / self.scale,
joint_frame_in_parent_frame_quat=parent_frame_orn,
joint_frame_in_child_frame_pos=child_frame_pos / ag_obj.scale,
joint_frame_in_child_frame_quat=child_frame_orn,
)
# Save a reference to this joint prim
self._ag_obj_constraints[arm] = joint_prim
# Modify max force based on user-determined assist parameters
# TODO
max_force = m.ASSIST_FORCE if joint_type == "FixedJoint" else m.ASSIST_FORCE * m.ARTICULATED_ASSIST_FRACTION
# joint_prim.GetAttribute("physics:breakForce").Set(max_force)
self._ag_obj_constraint_params[arm] = {
"ag_obj_prim_path": ag_obj.prim_path,
"ag_link_prim_path": ag_link.prim_path,
"ag_joint_prim_path": joint_prim_path,
"joint_type": joint_type,
"gripper_pos": self.get_joint_positions()[self.gripper_control_idx[arm]],
"max_force": max_force,
"contact_pos": contact_pos,
}
self._ag_obj_in_hand[arm] = ag_obj
self._ag_freeze_gripper[arm] = True
for joint in self.finger_joints[arm]:
j_val = joint.get_state()[0][0]
self._ag_freeze_joint_pos[arm][joint.joint_name] = j_val
def _handle_assisted_grasping(self):
"""
Handles assisted grasping by creating or removing constraints.
"""
# Loop over all arms
for arm in self.arm_names:
# We apply a threshold based on the control rather than the command here so that the behavior
# stays the same across different controllers and control modes (absolute / delta). This way,
# a zero action will actually keep the AG setting where it already is.
controller = self._controllers[f"gripper_{arm}"]
controlled_joints = controller.dof_idx
threshold = np.mean([self.joint_lower_limits[controlled_joints], self.joint_upper_limits[controlled_joints]], axis=0)
if controller.control is None:
applying_grasp = False
elif self._grasping_direction == "lower":
applying_grasp = np.any(controller.control < threshold)
else:
applying_grasp = np.any(controller.control > threshold)
# Execute gradual release of object
if self._ag_obj_in_hand[arm]:
if self._ag_release_counter[arm] is not None:
self._handle_release_window(arm=arm)
else:
if gm.AG_CLOTH:
self._update_constraint_cloth(arm=arm)
if not applying_grasp:
self._release_grasp(arm=arm)
elif applying_grasp:
self._establish_grasp(arm=arm, ag_data=self._calculate_in_hand_object(arm=arm))
def _update_constraint_cloth(self, arm="default"):
"""
Update the AG constraint for cloth: for the fixed joint between the attachment point and the world, we set
the local pos to match the current eef link position plus the attachment_point_pos_local offset. As a result,
the joint will drive the attachment point to the updated position, which will then drive the cloth.
See _establish_grasp_cloth for more details.
Args:
arm (str): specific arm to establish grasp.
Default is "default" which corresponds to the first entry in self.arm_names
"""
attachment_point_pos_local = self._ag_obj_constraint_params[arm]["attachment_point_pos_local"]
eef_link_pos, eef_link_orn = self.eef_links[arm].get_position_orientation()
attachment_point_pos, _ = T.pose_transform(eef_link_pos, eef_link_orn, attachment_point_pos_local, [0, 0, 0, 1])
joint_prim = self._ag_obj_constraints[arm]
joint_prim.GetAttribute("physics:localPos1").Set(lazy.pxr.Gf.Vec3f(*attachment_point_pos.astype(float)))
def _calculate_in_hand_object(self, arm="default"):
if gm.AG_CLOTH:
return self._calculate_in_hand_object_cloth(arm)
else:
return self._calculate_in_hand_object_rigid(arm)
def _establish_grasp(self, arm="default", ag_data=None, contact_pos=None):
if gm.AG_CLOTH:
return self._establish_grasp_cloth(arm, ag_data)
else:
return self._establish_grasp_rigid(arm, ag_data, contact_pos)
def _calculate_in_hand_object_cloth(self, arm="default"):
"""
Same as _calculate_in_hand_object_rigid, except for cloth. Only one should be used at any given time.
Calculates which object to assisted-grasp for arm @arm. Returns an (BaseObject, RigidPrim, np.ndarray) tuple or
None if no valid AG-enabled object can be found.
1) Check if the gripper is closed enough
2) Go through each of the cloth object, and check if its attachment point link position is within the "ghost"
box volume of the gripper link.
Only returns the first valid object and ignore the rest.
Args:
arm (str): specific arm to establish grasp.
Default is "default" which corresponds to the first entry in self.arm_names
Returns:
None or 3-tuple: If a valid assisted-grasp object is found,
returns the corresponding (object, object_link, attachment_point_position), i.e.
((BaseObject, RigidPrim, np.ndarray)) to the contacted in-hand object. Otherwise, returns None
"""
# TODO (eric): Assume joint_pos = 0 means fully closed
GRIPPER_FINGER_CLOSE_THRESHOLD = 0.03
gripper_finger_pos = self.get_joint_positions()[self.gripper_control_idx[arm]]
gripper_finger_close = np.sum(gripper_finger_pos) < GRIPPER_FINGER_CLOSE_THRESHOLD
if not gripper_finger_close:
return None
cloth_objs = og.sim.scene.object_registry("prim_type", PrimType.CLOTH)
if cloth_objs is None:
return None
# TODO (eric): Only AG one cloth at any given moment.
# Returns the first cloth that overlaps with the "ghost" box volume
for cloth_obj in cloth_objs:
attachment_point_pos = cloth_obj.links["attachment_point"].get_position()
particles_in_volume = self._ag_check_in_volume[arm]([attachment_point_pos])
if particles_in_volume.sum() > 0:
return cloth_obj, cloth_obj.links["attachment_point"], attachment_point_pos
return None
def _establish_grasp_cloth(self, arm="default", ag_data=None):
"""
Same as _establish_grasp_cloth, except for cloth. Only one should be used at any given time.
Establishes an ag-assisted grasp, if enabled.
Create a fixed joint between the attachment point link of the cloth object and the world.
In theory, we could have created a fixed joint to the eef link, but omni doesn't support this as the robot has
an articulation root API attached to it, which is incompatible with the attachment API.
We also store attachment_point_pos_local as the attachment point position in the eef link frame when the fixed
joint is created. As the eef link frame changes its pose, we will use attachment_point_pos_local to figure out
the new attachment_point_pos in the world frame and set the fixed joint to there. See _update_constraint_cloth
for more details.
Args:
arm (str): specific arm to establish grasp.
Default is "default" which corresponds to the first entry in self.arm_names
ag_data (None or 3-tuple): If specified, should be the corresponding
(object, object_link, attachment_point_position), i.e. ((BaseObject, RigidPrim, np.ndarray)) to the]
contacted in-hand object
"""
arm = self.default_arm if arm == "default" else arm
# Return immediately if ag_data is None
if ag_data is None:
return
ag_obj, ag_link, attachment_point_pos = ag_data
# Find the attachment point position in the eef frame
eef_link_pos, eef_link_orn = self.eef_links[arm].get_position_orientation()
attachment_point_pos_local, _ = \
T.relative_pose_transform(attachment_point_pos, [0, 0, 0, 1], eef_link_pos, eef_link_orn)
# Create the joint
joint_prim_path = f"{ag_link.prim_path}/ag_constraint"
joint_type = "FixedJoint"
joint_prim = create_joint(
prim_path=joint_prim_path,
joint_type=joint_type,
body0=ag_link.prim_path,
body1=None,
enabled=False,
joint_frame_in_child_frame_pos=attachment_point_pos,
)
# Save a reference to this joint prim
self._ag_obj_constraints[arm] = joint_prim
# Modify max force based on user-determined assist parameters
# TODO
max_force = m.ASSIST_FORCE
# joint_prim.GetAttribute("physics:breakForce").Set(max_force)
self._ag_obj_constraint_params[arm] = {
"ag_obj_prim_path": ag_obj.prim_path,
"ag_link_prim_path": ag_link.prim_path,
"ag_joint_prim_path": joint_prim_path,
"joint_type": joint_type,
"gripper_pos": self.get_joint_positions()[self.gripper_control_idx[arm]],
"max_force": max_force,
"attachment_point_pos_local": attachment_point_pos_local,
"contact_pos": attachment_point_pos,
}
self._ag_obj_in_hand[arm] = ag_obj
self._ag_freeze_gripper[arm] = True
for joint in self.finger_joints[arm]:
j_val = joint.get_state()[0][0]
self._ag_freeze_joint_pos[arm][joint.joint_name] = j_val
def _dump_state(self):
# Call super first
state = super()._dump_state()
# If we're using actual physical grasping, no extra state needed to save
if self.grasping_mode == "physical":
return state
# Include AG_state
state["ag_obj_constraint_params"] = self._ag_obj_constraint_params.copy()
return state
def _load_state(self, state):
# If there is an existing AG object, remove it
self.release_grasp_immediately()
super()._load_state(state=state)
# No additional loading needed if we're using physical grasping
if self.grasping_mode == "physical":
return
# Include AG_state
# TODO: currently does not take care of cloth objects
# TODO: add unit tests
for arm in state["ag_obj_constraint_params"].keys():
if len(state["ag_obj_constraint_params"][arm]) > 0:
data = state["ag_obj_constraint_params"][arm]
obj = og.sim.scene.object_registry("prim_path", data["ag_obj_prim_path"])
link = obj.links[data["ag_link_prim_path"].split("/")[-1]]
self._establish_grasp(arm=arm, ag_data=(obj, link), contact_pos=data["contact_pos"])
def _serialize(self, state):
# Call super first
state_flat = super()._serialize(state=state)
# No additional serialization needed if we're using physical grasping
if self.grasping_mode == "physical":
return state_flat
# TODO AG
return state_flat
def _deserialize(self, state):
# Call super first
state_dict, idx = super()._deserialize(state=state)
# No additional deserialization needed if we're using physical grasping
if self.grasping_mode == "physical":
return state_dict, idx
# TODO AG
return state_dict, idx
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ManipulationRobot")
return classes
@property
def eef_usd_path(self):
"""
Returns:
dict(str, str): dict mapping arm name to the path to the eef usd file
"""
raise NotImplementedError
@property
def teleop_rotation_offset(self):
"""
Rotational offset that will be applied for teleoperation
such that [0, 0, 0, 1] as action will keep the robot eef pointing at +x axis
"""
return {arm: np.array([0, 0, 0, 1]) for arm in self.arm_names}
def teleop_data_to_action(self, teleop_action) -> np.ndarray:
"""
Generate action data from teleoperation action data
NOTE: This implementation only supports IK/OSC controller for arm and MultiFingerGripperController for gripper.
Overwrite this function if the robot is using a different controller.
Args:
teleop_action (TeleopAction): teleoperation action data
Returns:
np.ndarray: array of action data for arm and gripper
"""
action = super().teleop_data_to_action(teleop_action)
hands = ["left", "right"] if self.n_arms == 2 else ["right"]
for i, hand in enumerate(hands):
arm_name = self.arm_names[i]
arm_action = teleop_action[hand]
# arm action
assert \
isinstance(self._controllers[f"arm_{arm_name}"], InverseKinematicsController) or \
isinstance(self._controllers[f"arm_{arm_name}"], OperationalSpaceController), \
f"Only IK and OSC controllers are supported for arm {arm_name}!"
target_pos, target_orn = arm_action[:3], T.quat2axisangle(T.euler2quat(arm_action[3:6]))
action[self.arm_action_idx[arm_name]] = np.r_[target_pos, target_orn]
# gripper action
assert isinstance(self._controllers[f"gripper_{arm_name}"], MultiFingerGripperController), \
f"Only MultiFingerGripperController is supported for gripper {arm_name}!"
action[self.gripper_action_idx[arm_name]] = arm_action[6]
return action
| 69,213 | Python | 45.050566 | 174 | 0.612515 |
StanfordVL/OmniGibson/omnigibson/robots/locobot.py | import os
import numpy as np
from omnigibson.macros import gm
from omnigibson.robots.two_wheel_robot import TwoWheelRobot
class Locobot(TwoWheelRobot):
"""
Locobot robot
Reference: https://www.trossenrobotics.com/locobot-pyrobot-ros-rover.aspx
"""
@property
def model_name(self):
return "Locobot"
@property
def wheel_radius(self):
return 0.038
@property
def wheel_axle_length(self):
return 0.230
@property
def base_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to [Left, Right] wheel joints.
"""
return np.array([1, 0])
@property
def _default_joint_pos(self):
return np.zeros(self.n_joints)
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/locobot/locobot/locobot.usd")
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/locobot/locobot.urdf")
| 1,010 | Python | 21.466666 | 101 | 0.636634 |
StanfordVL/OmniGibson/omnigibson/robots/robot_base.py | from abc import abstractmethod
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from omnigibson.macros import create_module_macros
from omnigibson.sensors import create_sensor, SENSOR_PRIMS_TO_SENSOR_CLS, ALL_SENSOR_MODALITIES, VisionSensor, ScanSensor
from omnigibson.objects.usd_object import USDObject
from omnigibson.objects.object_base import BaseObject
from omnigibson.objects.controllable_object import ControllableObject
from omnigibson.utils.gym_utils import GymObservable
from omnigibson.utils.usd_utils import add_asset_to_stage
from omnigibson.utils.python_utils import classproperty, merge_nested_dicts
from omnigibson.utils.vision_utils import segmentation_to_rgb
from omnigibson.utils.constants import PrimType
# Global dicts that will contain mappings
REGISTERED_ROBOTS = dict()
# Add proprio sensor modality to ALL_SENSOR_MODALITIES
ALL_SENSOR_MODALITIES.add("proprio")
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Name of the category to assign to all robots
m.ROBOT_CATEGORY = "agent"
class BaseRobot(USDObject, ControllableObject, GymObservable):
"""
Base class for USD-based robot agents.
This class handles object loading, and provides method interfaces that should be
implemented by subclassed robots.
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
fixed_base=False,
visual_only=False,
self_collisions=False,
load_config=None,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=True,
reset_joint_pos=None,
# Unique to this class
obs_modalities="all",
proprio_obs="default",
sensor_config=None,
**kwargs,
):
"""
Args:
name (str): Name for the object. Names need to be unique per scene
prim_path (None or str): global path in the stage to this object. If not specified, will automatically be
created at /World/<name>
uuid (None or int): Unique unsigned-integer identifier to assign to this object (max 8-numbers).
If None is specified, then it will be auto-generated
scale (None or float or 3-array): if specified, sets either the uniform (float) or x,y,z (3-array) scale
for this object. A single number corresponds to uniform scaling along the x,y,z axes, whereas a
3-array specifies per-axis scaling.
visible (bool): whether to render this object or not in the stage
fixed_base (bool): whether to fix the base of this object or not
visual_only (bool): Whether this object should be visual only (and not collide with any other objects)
self_collisions (bool): Whether to enable self collisions for this object
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime.
abilities (None or dict): If specified, manually adds specific object states to this object. It should be
a dict in the form of {ability: {param: value}} containing object abilities and parameters to pass to
the object state instance constructor.
control_freq (float): control frequency (in Hz) at which to control the object. If set to be None,
simulator.import_object will automatically set the control frequency to be at the render frequency by default.
controller_config (None or dict): nested dictionary mapping controller name(s) to specific controller
configurations for this object. This will override any default values specified by this class.
action_type (str): one of {discrete, continuous} - what type of action space to use
action_normalize (bool): whether to normalize inputted actions. This will override any default values
specified by this class.
reset_joint_pos (None or n-array): if specified, should be the joint positions that the object should
be set to during a reset. If None (default), self._default_joint_pos will be used instead.
Note that _default_joint_pos are hardcoded & precomputed, and thus should not be modified by the user.
Set this value instead if you want to initialize the robot with a different rese joint position.
obs_modalities (str or list of str): Observation modalities to use for this robot. Default is "all", which
corresponds to all modalities being used.
Otherwise, valid options should be part of omnigibson.sensors.ALL_SENSOR_MODALITIES.
Note: If @sensor_config explicitly specifies `modalities` for a given sensor class, it will
override any values specified from @obs_modalities!
proprio_obs (str or list of str): proprioception observation key(s) to use for generating proprioceptive
observations. If str, should be exactly "default" -- this results in the default proprioception
observations being used, as defined by self.default_proprio_obs. See self._get_proprioception_dict
for valid key choices
sensor_config (None or dict): nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. This will override any default values specified by this class.
kwargs (dict): Additional keyword arguments that are used for other super() calls from subclasses, allowing
for flexible compositions of various object subclasses (e.g.: Robot is USDObject + ControllableObject).
"""
# Store inputs
self._obs_modalities = obs_modalities if obs_modalities == "all" else \
{obs_modalities} if isinstance(obs_modalities, str) else set(obs_modalities) # this will get updated later when we fill in our sensors
self._proprio_obs = self.default_proprio_obs if proprio_obs == "default" else list(proprio_obs)
self._sensor_config = sensor_config
# Process abilities
robot_abilities = {"robot": {}}
abilities = robot_abilities if abilities is None else robot_abilities.update(abilities)
# Initialize internal attributes that will be loaded later
self._sensors = None # e.g.: scan sensor, vision sensor
self._dummy = None # Dummy version of the robot w/ fixed base for computing generalized gravity forces
# If specified, make sure scale is uniform -- this is because non-uniform scale can result in non-matching
# collision representations for parts of the robot that were optimized (e.g.: bounding sphere for wheels)
assert scale is None or isinstance(scale, int) or isinstance(scale, float) or np.all(scale == scale[0]), \
f"Robot scale must be uniform! Got: {scale}"
# Run super init
super().__init__(
prim_path=prim_path,
usd_path=self.usd_path,
name=name,
category=m.ROBOT_CATEGORY,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=fixed_base,
visual_only=visual_only,
self_collisions=self_collisions,
prim_type=PrimType.RIGID,
include_default_states=True,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
**kwargs,
)
def _load(self):
# Run super first
prim = super()._load()
# Also import dummy object if this robot is not fixed base
if self._use_dummy:
dummy_path = f"{self._prim_path}_dummy"
dummy_prim = add_asset_to_stage(asset_path=self._dummy_usd_path, prim_path=dummy_path)
self._dummy = BaseObject(
name=f"{self.name}_dummy",
prim_path=dummy_path,
scale=self._load_config.get("scale", None),
visible=False,
fixed_base=True,
visual_only=True,
)
return prim
def _post_load(self):
# Run super post load first
super()._post_load()
# Load the sensors
self._load_sensors()
def _initialize(self):
# Initialize the dummy first if it exists
if self._dummy is not None:
self._dummy.initialize()
# Run super
super()._initialize()
# Initialize all sensors
for sensor in self._sensors.values():
sensor.initialize()
# Load the observation space for this robot
self.load_observation_space()
# Validate this robot configuration
self._validate_configuration()
self._reset_joint_pos_aabb_extent = self.aabb_extent
def _load_sensors(self):
"""
Loads sensor(s) to retrieve observations from this object.
Stores created sensors as dictionary mapping sensor names to specific sensor
instances used by this object.
"""
# Populate sensor config
self._sensor_config = self._generate_sensor_config(custom_config=self._sensor_config)
# Search for any sensors this robot might have attached to any of its links
self._sensors = dict()
obs_modalities = set()
for link_name, link in self._links.items():
# Search through all children prims and see if we find any sensor
sensor_counts = {p: 0 for p in SENSOR_PRIMS_TO_SENSOR_CLS.keys()}
for prim in link.prim.GetChildren():
prim_type = prim.GetPrimTypeInfo().GetTypeName()
if prim_type in SENSOR_PRIMS_TO_SENSOR_CLS:
# Infer what obs modalities to use for this sensor
sensor_cls = SENSOR_PRIMS_TO_SENSOR_CLS[prim_type]
sensor_kwargs = self._sensor_config[sensor_cls.__name__]
if "modalities" not in sensor_kwargs:
sensor_kwargs["modalities"] = sensor_cls.all_modalities if self._obs_modalities == "all" else \
sensor_cls.all_modalities.intersection(self._obs_modalities)
obs_modalities = obs_modalities.union(sensor_kwargs["modalities"])
# Create the sensor and store it internally
sensor = create_sensor(
sensor_type=prim_type,
prim_path=str(prim.GetPrimPath()),
name=f"{self.name}:{link_name}:{prim_type}:{sensor_counts[prim_type]}",
**sensor_kwargs,
)
self._sensors[sensor.name] = sensor
sensor_counts[prim_type] += 1
# Since proprioception isn't an actual sensor, we need to possibly manually add it here as well
if self._obs_modalities == "all" or "proprio" in self._obs_modalities:
obs_modalities.add("proprio")
# Update our overall obs modalities
self._obs_modalities = obs_modalities
def _generate_sensor_config(self, custom_config=None):
"""
Generates a fully-populated sensor config, overriding any default values with the corresponding values
specified in @custom_config
Args:
custom_config (None or Dict[str, ...]): nested dictionary mapping sensor class name(s) to specific custom
sensor configurations for this object. This will override any default values specified by this class
Returns:
dict: Fully-populated nested dictionary mapping sensor class name(s) to specific sensor configurations
for this object
"""
sensor_config = {} if custom_config is None else deepcopy(custom_config)
# Merge the sensor dictionaries
sensor_config = merge_nested_dicts(
base_dict=self._default_sensor_config,
extra_dict=sensor_config,
)
return sensor_config
def _validate_configuration(self):
"""
Run any needed sanity checks to make sure this robot was created correctly.
"""
pass
def step(self):
# Skip this step if our articulation view is not valid
if self._articulation_view_direct is None or not self._articulation_view_direct.initialized:
return
# Before calling super, update the dummy robot's kinematic state based on this robot's kinematic state
# This is done prior to any state getter calls, since setting kinematic state results in physx backend
# having to re-fetch tensorized state.
# We do this so we have more optimal runtime performance
if self._use_dummy:
self._dummy.set_joint_positions(self.get_joint_positions())
self._dummy.set_joint_velocities(self.get_joint_velocities())
self._dummy.set_position_orientation(*self.get_position_orientation())
super().step()
def get_obs(self):
"""
Grabs all observations from the robot. This is keyword-mapped based on each observation modality
(e.g.: proprio, rgb, etc.)
Returns:
2-tuple:
dict: Keyword-mapped dictionary mapping observation modality names to
observations (usually np arrays)
dict: Keyword-mapped dictionary mapping observation modality names to
additional info
"""
# Our sensors already know what observation modalities it has, so we simply iterate over all of them
# and grab their observations, processing them into a flat dict
obs_dict = dict()
info_dict = dict()
for sensor_name, sensor in self._sensors.items():
obs_dict[sensor_name], info_dict[sensor_name] = sensor.get_obs()
# Have to handle proprio separately since it's not an actual sensor
if "proprio" in self._obs_modalities:
obs_dict["proprio"], info_dict["proprio"] = self.get_proprioception()
return obs_dict, info_dict
def get_proprioception(self):
"""
Returns:
n-array: numpy array of all robot-specific proprioceptive observations.
dict: empty dictionary, a placeholder for additional info
"""
proprio_dict = self._get_proprioception_dict()
return np.concatenate([proprio_dict[obs] for obs in self._proprio_obs]), {}
def _get_proprioception_dict(self):
"""
Returns:
dict: keyword-mapped proprioception observations available for this robot.
Can be extended by subclasses
"""
joint_positions = self.get_joint_positions(normalized=False)
joint_velocities = self.get_joint_velocities(normalized=False)
joint_efforts = self.get_joint_efforts(normalized=False)
pos, ori = self.get_position(), self.get_rpy()
ori_2d = self.get_2d_orientation()
return dict(
joint_qpos=joint_positions,
joint_qpos_sin=np.sin(joint_positions),
joint_qpos_cos=np.cos(joint_positions),
joint_qvel=joint_velocities,
joint_qeffort=joint_efforts,
robot_pos=pos,
robot_ori_cos=np.cos(ori),
robot_ori_sin=np.sin(ori),
robot_2d_ori=ori_2d,
robot_2d_ori_cos=np.cos(ori_2d),
robot_2d_ori_sin=np.sin(ori_2d),
robot_lin_vel=self.get_linear_velocity(),
robot_ang_vel=self.get_angular_velocity(),
)
def _load_observation_space(self):
# We compile observation spaces from our sensors
obs_space = dict()
for sensor_name, sensor in self._sensors.items():
# Load the sensor observation space
obs_space[sensor_name] = sensor.load_observation_space()
# Have to handle proprio separately since it's not an actual sensor
if "proprio" in self._obs_modalities:
obs_space["proprio"] = self._build_obs_box_space(shape=(self.proprioception_dim,), low=-np.inf, high=np.inf, dtype=np.float64)
return obs_space
def add_obs_modality(self, modality):
"""
Adds observation modality @modality to this robot. Note: Should be one of omnigibson.sensors.ALL_SENSOR_MODALITIES
Args:
modality (str): Observation modality to add to this robot
"""
# Iterate over all sensors we own, and if the requested modality is a part of its possible valid modalities,
# then we add it
for sensor in self._sensors.values():
if modality in sensor.all_modalities:
sensor.add_modality(modality=modality)
def remove_obs_modality(self, modality):
"""
Remove observation modality @modality from this robot. Note: Should be one of
omnigibson.sensors.ALL_SENSOR_MODALITIES
Args:
modality (str): Observation modality to remove from this robot
"""
# Iterate over all sensors we own, and if the requested modality is a part of its possible valid modalities,
# then we remove it
for sensor in self._sensors.values():
if modality in sensor.all_modalities:
sensor.remove_modality(modality=modality)
def visualize_sensors(self):
"""
Renders this robot's key sensors, visualizing them via matplotlib plots
"""
frames = dict()
remaining_obs_modalities = deepcopy(self.obs_modalities)
for sensor in self.sensors.values():
obs, _ = sensor.get_obs()
sensor_frames = []
if isinstance(sensor, VisionSensor):
# We check for rgb, depth, normal, seg_instance
for modality in ["rgb", "depth", "normal", "seg_instance"]:
if modality in sensor.modalities:
ob = obs[modality]
if modality == "rgb":
# Ignore alpha channel, map to floats
ob = ob[:, :, :3] / 255.0
elif modality == "seg_instance":
# Map IDs to rgb
ob = segmentation_to_rgb(ob, N=256) / 255.0
elif modality == "normal":
# Re-map to 0 - 1 range
ob = (ob + 1.0) / 2.0
else:
# Depth, nothing to do here
pass
# Add this observation to our frames and remove the modality
sensor_frames.append((modality, ob))
remaining_obs_modalities -= {modality}
else:
# Warn user that we didn't find this modality
print(f"Modality {modality} is not active in sensor {sensor.name}, skipping...")
elif isinstance(sensor, ScanSensor):
# We check for occupancy_grid
occupancy_grid = obs.get("occupancy_grid", None)
if occupancy_grid is not None:
sensor_frames.append(("occupancy_grid", occupancy_grid))
remaining_obs_modalities -= {"occupancy_grid"}
# Map the sensor name to the frames for that sensor
frames[sensor.name] = sensor_frames
# Warn user that any remaining modalities are not able to be visualized
if len(remaining_obs_modalities) > 0:
print(f"Modalities: {remaining_obs_modalities} cannot be visualized, skipping...")
# Write all the frames to a plot
for sensor_name, sensor_frames in frames.items():
n_sensor_frames = len(sensor_frames)
if n_sensor_frames > 0:
fig, axes = plt.subplots(nrows=1, ncols=n_sensor_frames)
if n_sensor_frames == 1:
axes = [axes]
# Dump frames and set each subtitle
for i, (modality, frame) in enumerate(sensor_frames):
axes[i].imshow(frame)
axes[i].set_title(modality)
axes[i].set_axis_off()
# Set title
fig.suptitle(sensor_name)
plt.show(block=False)
# One final plot show so all the figures get rendered
plt.show()
def update_handles(self):
# Call super first
super().update_handles()
# If we have a dummy robot, also update its handles too
if self._dummy is not None:
self._dummy.update_handles()
def remove(self):
"""
Do NOT call this function directly to remove a prim - call og.sim.remove_prim(prim) for proper cleanup
"""
# Remove all sensors
for sensor in self._sensors.values():
sensor.remove()
# Run super
super().remove()
@property
def reset_joint_pos_aabb_extent(self):
"""
This is the aabb extent of the robot in the robot frame after resetting the joints.
Returns:
3-array: Axis-aligned bounding box extent of the robot base
"""
return self._reset_joint_pos_aabb_extent
def teleop_data_to_action(self, teleop_action) -> np.ndarray:
"""
Generate action data from teleoperation action data
Args:
teleop_action (TeleopAction): teleoperation action data
Returns:
np.ndarray: array of action data filled with update value
"""
return np.zeros(self.action_dim)
def get_generalized_gravity_forces(self, clone=True):
# Override method based on whether we're using a dummy or not
return self._dummy.get_generalized_gravity_forces(clone=clone) \
if self._use_dummy else super().get_generalized_gravity_forces(clone=clone)
@property
def sensors(self):
"""
Returns:
dict: Keyword-mapped dictionary mapping sensor names to BaseSensor instances owned by this robot
"""
return self._sensors
@property
def obs_modalities(self):
"""
Returns:
set of str: Observation modalities used for this robot (e.g.: proprio, rgb, etc.)
"""
assert self._loaded, "Cannot check observation modalities until we load this robot!"
return self._obs_modalities
@property
def proprioception_dim(self):
"""
Returns:
int: Size of self.get_proprioception() vector
"""
return len(self.get_proprioception()[0])
@property
def _default_sensor_config(self):
"""
Returns:
dict: default nested dictionary mapping sensor class name(s) to specific sensor
configurations for this object. See kwargs from omnigibson/sensors/__init__/create_sensor for more
details
Expected structure is as follows:
SensorClassName1:
modalities: ...
enabled: ...
noise_type: ...
noise_kwargs:
...
sensor_kwargs:
...
SensorClassName2:
modalities: ...
enabled: ...
noise_type: ...
noise_kwargs:
...
sensor_kwargs:
...
...
"""
return {
"VisionSensor": {
"enabled": True,
"noise_type": None,
"noise_kwargs": None,
"sensor_kwargs": {
"image_height": 128,
"image_width": 128,
},
},
"ScanSensor": {
"enabled": True,
"noise_type": None,
"noise_kwargs": None,
"sensor_kwargs": {
# Basic LIDAR kwargs
"min_range": 0.05,
"max_range": 10.0,
"horizontal_fov": 360.0,
"vertical_fov": 1.0,
"yaw_offset": 0.0,
"horizontal_resolution": 1.0,
"vertical_resolution": 1.0,
"rotation_rate": 0.0,
"draw_points": False,
"draw_lines": False,
# Occupancy Grid kwargs
"occupancy_grid_resolution": 128,
"occupancy_grid_range": 5.0,
"occupancy_grid_inner_radius": 0.5,
"occupancy_grid_local_link": None,
},
},
}
@property
def default_proprio_obs(self):
"""
Returns:
list of str: Default proprioception observations to use
"""
return []
@property
def model_name(self):
"""
Returns:
str: name of this robot model. usually corresponds to the class name of a given robot model
"""
return self.__class__.__name__
@property
@abstractmethod
def usd_path(self):
# For all robots, this must be specified a priori, before we actually initialize the USDObject constructor!
# So we override the parent implementation, and make this an abstract method
raise NotImplementedError
@property
def _dummy_usd_path(self):
"""
Returns:
str: Absolute path to the dummy USD to load for, e.g., computing gravity compensation
"""
# By default, this is just the normal usd path
return self.usd_path
@property
def urdf_path(self):
"""
Returns:
str: file path to the robot urdf file.
"""
raise NotImplementedError
@property
def _use_dummy(self):
"""
Returns:
bool: Whether the robot dummy should be loaded and used for some computations, e.g., gravity compensation
"""
# By default, only load if robot is not fixed base
return not self.fixed_base
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseRobot")
return classes
@classproperty
def _cls_registry(cls):
# Global robot registry -- override super registry
global REGISTERED_ROBOTS
return REGISTERED_ROBOTS
| 27,335 | Python | 41.250386 | 159 | 0.588257 |
StanfordVL/OmniGibson/omnigibson/robots/behavior_robot.py | from abc import ABC
from collections import OrderedDict
import itertools
import numpy as np
import os
from scipy.spatial.transform import Rotation as R
from typing import List, Tuple, Iterable
import omnigibson as og
import omnigibson.lazy as lazy
import omnigibson.utils.transform_utils as T
from omnigibson.macros import gm, create_module_macros
from omnigibson.robots.locomotion_robot import LocomotionRobot
from omnigibson.robots.manipulation_robot import ManipulationRobot, GraspingPoint
from omnigibson.robots.active_camera_robot import ActiveCameraRobot
from omnigibson.objects.usd_object import USDObject
m = create_module_macros(module_path=__file__)
# component suffixes for the 6-DOF arm joint names
m.COMPONENT_SUFFIXES = ['x', 'y', 'z', 'rx', 'ry', 'rz']
# Offset between the body and parts
m.HEAD_TO_BODY_OFFSET = [0, 0, -0.4]
m.HAND_TO_BODY_OFFSET = {
"left": [0, -0.15, -0.4],
"right": [0, 0.15, -0.4]
}
m.BODY_HEIGHT_OFFSET = 0.45
# Hand parameters
m.HAND_GHOST_HAND_APPEAR_THRESHOLD = 0.15
m.THUMB_2_POS = [0, -0.02, -0.05]
m.THUMB_1_POS = [0, -0.015, -0.02]
m.PALM_CENTER_POS = [0, -0.04, 0.01]
m.PALM_BASE_POS = [0, 0, 0.015]
m.FINGER_TIP_POS = [0, -0.025, -0.055]
# Hand link index constants
m.PALM_LINK_NAME = "palm"
m.FINGER_MID_LINK_NAMES = ("Tproximal", "Iproximal", "Mproximal", "Rproximal", "Pproximal")
m.FINGER_TIP_LINK_NAMES = ("Tmiddle", "Imiddle", "Mmiddle", "Rmiddle", "Pmiddle")
m.THUMB_LINK_NAME = "Tmiddle"
# joint parameters
m.BASE_JOINT_STIFFNESS = 1e8
m.BASE_JOINT_MAX_EFFORT = 7500
m.ARM_JOINT_STIFFNESS = 1e6
m.ARM_JOINT_MAX_EFFORT = 300
m.FINGER_JOINT_STIFFNESS = 1e3
m.FINGER_JOINT_MAX_EFFORT = 50
m.FINGER_JOINT_MAX_VELOCITY = np.pi * 4
class BehaviorRobot(ManipulationRobot, LocomotionRobot, ActiveCameraRobot):
"""
A humanoid robot that can be used in VR as an avatar. It has two hands, a body and a head with two cameras.
"""
def __init__(
self,
# Shared kwargs in hierarchy
name,
prim_path=None,
uuid=None,
scale=None,
visible=True,
visual_only=False,
self_collisions=True,
load_config=None,
# Unique to USDObject hierarchy
abilities=None,
# Unique to ControllableObject hierarchy
control_freq=None,
controller_config=None,
action_type="continuous",
action_normalize=False,
reset_joint_pos=None,
# Unique to BaseRobot
obs_modalities="rgb",
proprio_obs="default",
# Unique to ManipulationRobot
grasping_mode="assisted",
# unique to BehaviorRobot
use_ghost_hands=True,
**kwargs
):
"""
Initializes BehaviorRobot
Args:
use_ghost_hands (bool): whether to show ghost hand when the robot hand is too far away from the controller
"""
super(BehaviorRobot, self).__init__(
prim_path=prim_path,
name=name,
uuid=uuid,
scale=scale,
visible=visible,
fixed_base=True,
visual_only=visual_only,
self_collisions=self_collisions,
load_config=load_config,
abilities=abilities,
control_freq=control_freq,
controller_config=controller_config,
action_type=action_type,
action_normalize=action_normalize,
reset_joint_pos=reset_joint_pos,
obs_modalities=obs_modalities,
proprio_obs=proprio_obs,
grasping_mode=grasping_mode,
grasping_direction="upper",
**kwargs,
)
# setup eef parts
self.parts = OrderedDict()
for arm_name in self.arm_names:
self.parts[arm_name] = BRPart(
name=arm_name, parent=self, prim_path=f"{arm_name}_palm", eef_type="hand",
offset_to_body=m.HAND_TO_BODY_OFFSET[arm_name], **kwargs
)
self.parts["head"] = BRPart(
name="head", parent=self, prim_path="eye", eef_type="head",
offset_to_body=m.HEAD_TO_BODY_OFFSET, **kwargs
)
# whether to use ghost hands (visual markers to help visualize current vr hand pose)
self._use_ghost_hands = use_ghost_hands
# prim for the world_base_fixed_joint, used to reset the robot pose
self._world_base_fixed_joint_prim = None
# whether hand or body is in contact with other objects (we need this since checking contact list is costly)
self._part_is_in_contact = {hand_name: False for hand_name in self.arm_names + ["body"]}
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/behavior_robot/usd/BehaviorRobot.usd")
@property
def model_name(self):
return "BehaviorRobot"
@property
def n_arms(self):
return 2
@property
def arm_names(self):
return ["left", "right"]
@property
def eef_link_names(self):
dic = {arm: f"{arm}_{m.PALM_LINK_NAME}" for arm in self.arm_names}
dic["head"] = "head"
return dic
@property
def arm_link_names(self):
"""The head counts as a arm since it has the same 33 joint configuration"""
return {arm: [f"{arm}_{component}" for component in m.COMPONENT_SUFFIXES] for arm in self.arm_names + ['head']}
@property
def finger_link_names(self):
return {
arm: [f"{arm}_{link_name}" for link_name in itertools.chain(m.FINGER_MID_LINK_NAMES, m.FINGER_TIP_LINK_NAMES)]
for arm in self.arm_names
}
@property
def base_joint_names(self):
return [f"base_{component}_joint" for component in m.COMPONENT_SUFFIXES]
@property
def arm_joint_names(self):
"""The head counts as a arm since it has the same 33 joint configuration"""
return {eef: [f"{eef}_{component}_joint" for component in m.COMPONENT_SUFFIXES] for eef in self.arm_names + ["head"]}
@property
def finger_joint_names(self):
return {
arm: (
# palm-to-proximal joints.
[f"{arm}_{to_link}__{arm}_{m.PALM_LINK_NAME}" for to_link in m.FINGER_MID_LINK_NAMES]
+
# proximal-to-tip joints.
[f"{arm}_{to_link}__{arm}_{from_link}" for from_link, to_link in zip(m.FINGER_MID_LINK_NAMES, m.FINGER_TIP_LINK_NAMES)]
)
for arm in self.arm_names
}
@property
def base_control_idx(self):
joints = list(self.joints.keys())
return [joints.index(joint) for joint in self.base_joint_names]
@property
def arm_control_idx(self):
joints = list(self.joints.keys())
return {
arm: [joints.index(f"{arm}_{component}_joint") for component in m.COMPONENT_SUFFIXES]
for arm in self.arm_names
}
@property
def gripper_control_idx(self):
joints = list(self.joints.values())
return {arm: [joints.index(joint) for joint in arm_joints] for arm, arm_joints in self.finger_joints.items()}
@property
def camera_control_idx(self):
joints = list(self.joints.keys())
return [joints.index(f"head_{component}_joint") for component in m.COMPONENT_SUFFIXES]
@property
def _default_joint_pos(self):
return np.zeros(self.n_joints)
@property
def controller_order(self):
controllers = ["base", "camera"]
for arm_name in self.arm_names:
controllers += [f"arm_{arm_name}", f"gripper_{arm_name}"]
return controllers
@property
def _default_controllers(self):
controllers = {
"base": "JointController",
"camera": "JointController"
}
controllers.update({f"arm_{arm_name}": "JointController" for arm_name in self.arm_names})
controllers.update({f"gripper_{arm_name}": "MultiFingerGripperController" for arm_name in self.arm_names})
return controllers
@property
def _default_base_joint_controller_config(self):
return {
"name": "JointController",
"control_freq": self._control_freq,
"control_limits": self.control_limits,
"use_delta_commands": False,
"motor_type": "position",
"dof_idx": self.base_control_idx,
"command_input_limits": None,
}
@property
def _default_arm_joint_controller_configs(self):
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "JointController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.arm_control_idx[arm],
"command_input_limits": None,
"use_delta_commands": False,
}
return dic
@property
def _default_gripper_multi_finger_controller_configs(self):
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "MultiFingerGripperController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.gripper_control_idx[arm],
"command_input_limits": None,
"mode": "independent",
}
return dic
@property
def _default_camera_joint_controller_config(self):
return {
"name": "JointController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.camera_control_idx,
"command_input_limits": None,
"use_delta_commands": False,
}
@property
def _default_gripper_joint_controller_configs(self):
"""
Returns:
dict: Dictionary mapping arm appendage name to default gripper joint controller config
to control this robot's gripper
"""
dic = {}
for arm in self.arm_names:
dic[arm] = {
"name": "JointController",
"control_freq": self._control_freq,
"motor_type": "position",
"control_limits": self.control_limits,
"dof_idx": self.gripper_control_idx[arm],
"command_input_limits": None,
"use_delta_commands": False,
}
return dic
@property
def _default_controller_config(self):
controllers = {
"base": {"JointController": self._default_base_joint_controller_config},
"camera": {"JointController": self._default_camera_joint_controller_config},
}
controllers.update(
{
f"arm_{arm_name}": {"JointController": self._default_arm_joint_controller_configs[arm_name]}
for arm_name in self.arm_names
}
)
controllers.update(
{
f"gripper_{arm_name}": {
"MultiFingerGripperController": self._default_gripper_multi_finger_controller_configs[arm_name],
"JointController": self._default_gripper_joint_controller_configs[arm_name],
}
for arm_name in self.arm_names
}
)
return controllers
def load(self):
prim = super(BehaviorRobot, self).load()
for part in self.parts.values():
part.load()
return prim
def _post_load(self):
super()._post_load()
def _create_discrete_action_space(self):
raise ValueError("BehaviorRobot does not support discrete actions!")
def update_controller_mode(self):
super().update_controller_mode()
# set base joint properties
for joint_name in self.base_joint_names:
self.joints[joint_name].stiffness = m.BASE_JOINT_STIFFNESS
self.joints[joint_name].max_effort = m.BASE_JOINT_MAX_EFFORT
# set arm joint properties
for arm in self.arm_joint_names:
for joint_name in self.arm_joint_names[arm]:
self.joints[joint_name].stiffness = m.ARM_JOINT_STIFFNESS
self.joints[joint_name].max_effort = m.ARM_JOINT_MAX_EFFORT
# set finger joint properties
for arm in self.finger_joint_names:
for joint_name in self.finger_joint_names[arm]:
self.joints[joint_name].stiffness = m.FINGER_JOINT_STIFFNESS
self.joints[joint_name].max_effort = m.FINGER_JOINT_MAX_EFFORT
self.joints[joint_name].max_velocity = m.FINGER_JOINT_MAX_VELOCITY
@property
def base_footprint_link_name(self):
"""
Name of the actual root link that we are interested in.
"""
return "base"
@property
def base_footprint_link(self):
"""
Returns:
RigidPrim: base footprint link of this object prim
"""
return self._links[self.base_footprint_link_name]
def get_position_orientation(self):
return self.base_footprint_link.get_position_orientation()
def set_position_orientation(self, position=None, orientation=None):
super().set_position_orientation(position, orientation)
# Move the joint frame for the world_base_joint
if self._world_base_fixed_joint_prim is not None:
if position is not None:
self._world_base_fixed_joint_prim.GetAttribute("physics:localPos0").Set(tuple(position))
if orientation is not None:
self._world_base_fixed_joint_prim.GetAttribute("physics:localRot0").Set(lazy.pxr.Gf.Quatf(*np.float_(orientation)[[3, 0, 1, 2]]))
@property
def assisted_grasp_start_points(self):
side_coefficients = {"left": np.array([1, -1, 1]), "right": np.array([1, 1, 1])}
return {
arm: [
GraspingPoint(link_name=f"{arm}_{m.PALM_LINK_NAME}", position=m.PALM_BASE_POS),
GraspingPoint(link_name=f"{arm}_{m.PALM_LINK_NAME}", position=m.PALM_CENTER_POS * side_coefficients[arm]),
GraspingPoint(
link_name=f"{arm}_{m.THUMB_LINK_NAME}", position=m.THUMB_1_POS * side_coefficients[arm]
),
GraspingPoint(
link_name=f"{arm}_{m.THUMB_LINK_NAME}", position=m.THUMB_2_POS * side_coefficients[arm]
),
]
for arm in self.arm_names
}
@property
def assisted_grasp_end_points(self):
side_coefficients = {"left": np.array([1, -1, 1]), "right": np.array([1, 1, 1])}
return {
arm: [
GraspingPoint(link_name=f"{arm}_{finger}", position=m.FINGER_TIP_POS * side_coefficients[arm])
for finger in m.FINGER_TIP_LINK_NAMES
]
for arm in self.arm_names
}
def update_hand_contact_info(self):
"""
Helper function that updates the contact info for the hands and body.
Can be used in the future with device haptics to provide collision feedback.
"""
self._part_is_in_contact["body"] = len(self.links["body"].contact_list()) > 0
for hand_name in self.arm_names:
self._part_is_in_contact[hand_name] = len(self.eef_links[hand_name].contact_list()) > 0 \
or np.any([len(finger.contact_list()) > 0 for finger in self.finger_links[hand_name]])
def teleop_data_to_action(self, teleop_action) -> np.ndarray:
"""
Generates an action for the BehaviorRobot to perform based on teleop action data dict.
Action space (all non-normalized values that will be clipped if they are too large)
Body:
- 6DOF pose - relative to body frame from previous frame
Eye:
- 6DOF pose - relative to body frame (where the body will be after applying this frame's action)
Left hand, right hand (in that order):
- 6DOF pose - relative to body frame (same as above)
- 10DOF gripper joint rotation
Total size: 44
"""
# Actions are stored as 1D numpy array
action = np.zeros(self.action_dim)
# Update body action space
if teleop_action.is_valid["head"]:
head_pos, head_orn = teleop_action.head[:3], T.euler2quat(teleop_action.head[3:6])
des_body_pos = head_pos - np.array([0, 0, m.BODY_HEIGHT_OFFSET])
des_body_rpy = np.array([0, 0, R.from_quat(head_orn).as_euler("XYZ")[2]])
des_body_orn = T.euler2quat(des_body_rpy)
else:
des_body_pos, des_body_orn = self.get_position_orientation()
des_body_rpy = R.from_quat(des_body_orn).as_euler("XYZ")
action[self.controller_action_idx["base"]] = np.r_[des_body_pos, des_body_rpy]
# Update action space for other VR objects
for part_name, eef_part in self.parts.items():
# Process local transform adjustments
hand_data = 0
if teleop_action.is_valid[part_name]:
des_world_part_pos, des_world_part_orn = teleop_action[part_name][:3], T.euler2quat(teleop_action[part_name][3:6])
if part_name in self.arm_names:
# compute gripper action
if hasattr(teleop_action, "hand_data"):
# hand tracking mode, compute joint rotations for each independent hand joint
hand_data = teleop_action.hand_data[part_name]
hand_data = hand_data[:, :2].T.reshape(-1)
else:
# controller mode, map trigger fraction from [0, 1] to [-1, 1] range.
hand_data = teleop_action[part_name][6] * 2 - 1
action[self.controller_action_idx[f"gripper_{part_name}"]] = hand_data
# update ghost hand if necessary
if self._use_ghost_hands:
self.parts[part_name].update_ghost_hands(des_world_part_pos, des_world_part_orn)
else:
des_world_part_pos, des_world_part_orn = eef_part.local_position_orientation
# Get local pose with respect to the new body frame
des_local_part_pos, des_local_part_orn = T.relative_pose_transform(
des_world_part_pos, des_world_part_orn, des_body_pos, des_body_orn
)
# apply shoulder position offset to the part transform to get final destination pose
des_local_part_pos, des_local_part_orn = T.pose_transform(
eef_part.offset_to_body, [0, 0, 0, 1], des_local_part_pos, des_local_part_orn
)
des_part_rpy = R.from_quat(des_local_part_orn).as_euler("XYZ")
controller_name = "camera" if part_name == "head" else "arm_" + part_name
action[self.controller_action_idx[controller_name]] = np.r_[des_local_part_pos, des_part_rpy]
# If we reset, teleop the robot parts to the desired pose
if part_name in self.arm_names and teleop_action.reset[part_name]:
self.parts[part_name].set_position_orientation(des_local_part_pos, des_part_rpy)
return action
class BRPart(ABC):
"""This is the interface that all BehaviorRobot eef parts must implement."""
def __init__(self, name: str, parent: BehaviorRobot, prim_path: str, eef_type: str, offset_to_body: List[float]) -> None:
"""
Create an object instance with the minimum information of class ID and rendering parameters.
Args:
name (str): unique name of this BR part
parent (BehaviorRobot): the parent BR object
prim_path (str): prim path to the root link of the eef
eef_type (str): type of eef. One of hand, head
offset_to_body (List[float]): relative POSITION offset between the rz link and the eef link.
"""
self.name = name
self.parent = parent
self.prim_path = prim_path
self.eef_type = eef_type
self.offset_to_body = offset_to_body
self.ghost_hand = None
self._root_link = None
def load(self) -> None:
self._root_link = self.parent.links[self.prim_path]
# setup ghost hand
if self.eef_type == "hand" and self.parent._use_ghost_hands:
gh_name = f"ghost_hand_{self.name}"
self.ghost_hand = USDObject(
prim_path=f"/World/{gh_name}",
usd_path=os.path.join(gm.ASSET_PATH, f"models/behavior_robot/usd/{gh_name}.usd"),
name=gh_name,
scale=0.001,
visible=False,
visual_only=True,
)
og.sim.import_object(self.ghost_hand)
@property
def local_position_orientation(self) -> Tuple[Iterable[float], Iterable[float]]:
"""
Get local position and orientation w.r.t. to the body
Returns:
Tuple[Array[x, y, z], Array[x, y, z, w]]
"""
return T.relative_pose_transform(*self.get_position_orientation(), *self.parent.get_position_orientation())
def get_position_orientation(self) -> Tuple[Iterable[float], Iterable[float]]:
"""
Get position and orientation in the world space
Returns:
Tuple[Array[x, y, z], Array[x, y, z, w]]
"""
return self._root_link.get_position_orientation()
def set_position_orientation(self, pos: Iterable[float], orn: Iterable[float]) -> None:
"""
Call back function to set the base's position
"""
self.parent.joints[f"{self.name}_x_joint"].set_pos(pos[0], drive=False)
self.parent.joints[f"{self.name}_y_joint"].set_pos(pos[1], drive=False)
self.parent.joints[f"{self.name}_z_joint"].set_pos(pos[2], drive=False)
self.parent.joints[f"{self.name}_rx_joint"].set_pos(orn[0], drive=False)
self.parent.joints[f"{self.name}_ry_joint"].set_pos(orn[1], drive=False)
self.parent.joints[f"{self.name}_rz_joint"].set_pos(orn[2], drive=False)
def update_ghost_hands(self, pos: Iterable[float], orn: Iterable[float]) -> None:
"""
Updates ghost hand to track real hand and displays it if the real and virtual hands are too far apart.
Args:
pos (Iterable[float]): list of positions [x, y, z]
orn (Iterable[float]): list of rotations [x, y, z, w]
"""
assert self.eef_type == "hand", "ghost hand is only valid for BR hand!"
# Ghost hand tracks real hand whether it is hidden or not
self.ghost_hand.set_position_orientation(pos, orn)
# If distance between hand and controller is greater than threshold,
# ghost hand appears
dist_to_real_controller = np.linalg.norm(pos - self.get_position_orientation()[0])
should_visible = dist_to_real_controller > m.HAND_GHOST_HAND_APPEAR_THRESHOLD
# Only toggle visibility if we are transition from hidden to unhidden, or the other way around
if self.ghost_hand.visible is not should_visible:
self.ghost_hand.visible = should_visible
| 23,460 | Python | 39.035836 | 145 | 0.587127 |
StanfordVL/OmniGibson/omnigibson/robots/locomotion_robot.py | from abc import abstractmethod
import numpy as np
from transforms3d.euler import euler2quat
from transforms3d.quaternions import qmult, quat2mat
from omnigibson.controllers import LocomotionController
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.utils.python_utils import classproperty
class LocomotionRobot(BaseRobot):
"""
Robot that is is equipped with locomotive (navigational) capabilities.
Provides common interface for a wide variety of robots.
NOTE: controller_config should, at the minimum, contain:
base: controller specifications for the controller to control this robot's base (locomotion).
Should include:
- name: Controller to create
- <other kwargs> relevant to the controller being created. Note that all values will have default
values specified, but setting these individual kwargs will override them
"""
def _validate_configuration(self):
# We make sure that our base controller exists and is a locomotion controller
assert (
"base" in self._controllers
), "Controller 'base' must exist in controllers! Current controllers: {}".format(list(self._controllers.keys()))
assert isinstance(
self._controllers["base"], LocomotionController
), "Base controller must be a LocomotionController!"
# run super
super()._validate_configuration()
def _get_proprioception_dict(self):
dic = super()._get_proprioception_dict()
joint_positions = self.get_joint_positions(normalized=False)
joint_velocities = self.get_joint_velocities(normalized=False)
# Add base info
dic["base_qpos"] = joint_positions[self.base_control_idx]
dic["base_qpos_sin"] = np.sin(joint_positions[self.base_control_idx])
dic["base_qpos_cos"] = np.cos(joint_positions[self.base_control_idx])
dic["base_qvel"] = joint_velocities[self.base_control_idx]
return dic
@property
def default_proprio_obs(self):
obs_keys = super().default_proprio_obs
return obs_keys + ["base_qpos_sin", "base_qpos_cos", "robot_lin_vel", "robot_ang_vel"]
@property
def controller_order(self):
# By default, only base is supported
return ["base"]
@property
def _default_controllers(self):
# Always call super first
controllers = super()._default_controllers
# For best generalizability use, joint controller as default
controllers["base"] = "JointController"
return controllers
@property
def _default_base_joint_controller_config(self):
"""
Returns:
dict: Default base joint controller config to control this robot's base. Uses velocity
control by default.
"""
return {
"name": "JointController",
"control_freq": self._control_freq,
"motor_type": "velocity",
"control_limits": self.control_limits,
"dof_idx": self.base_control_idx,
"command_output_limits": "default",
"use_delta_commands": False,
}
@property
def _default_base_null_joint_controller_config(self):
"""
Returns:
dict: Default null joint controller config to control this robot's base i.e. dummy controller
"""
return {
"name": "NullJointController",
"control_freq": self._control_freq,
"motor_type": "velocity",
"control_limits": self.control_limits,
"dof_idx": self.base_control_idx,
"default_command": np.zeros(len(self.base_control_idx)),
"use_impedances": False,
}
@property
def _default_controller_config(self):
# Always run super method first
cfg = super()._default_controller_config
# Add supported base controllers
cfg["base"] = {
self._default_base_joint_controller_config["name"]: self._default_base_joint_controller_config,
self._default_base_null_joint_controller_config["name"]: self._default_base_null_joint_controller_config,
}
return cfg
def move_by(self, delta):
"""
Move robot base without physics simulation
Args:
delta (float):float], (x,y,z) cartesian delta base position
"""
new_pos = np.array(delta) + self.get_position()
self.set_position(position=new_pos)
def move_forward(self, delta=0.05):
"""
Move robot base forward without physics simulation
Args:
delta (float): delta base position forward
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([delta, 0, 0])))
def move_backward(self, delta=0.05):
"""
Move robot base backward without physics simulation
Args:
delta (float): delta base position backward
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([-delta, 0, 0])))
def move_left(self, delta=0.05):
"""
Move robot base left without physics simulation
Args:
delta (float): delta base position left
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([0, -delta, 0])))
def move_right(self, delta=0.05):
"""
Move robot base right without physics simulation
Args:
delta (float): delta base position right
"""
self.move_by(quat2mat(self.get_orientation()).dot(np.array([0, delta, 0])))
def turn_left(self, delta=0.03):
"""
Rotate robot base left without physics simulation
Args:
delta (float): delta angle to rotate the base left
"""
quat = self.get_orientation()
quat = qmult((euler2quat(-delta, 0, 0)), quat)
self.set_orientation(quat)
def turn_right(self, delta=0.03):
"""
Rotate robot base right without physics simulation
Args:
delta (float): angle to rotate the base right
"""
quat = self.get_orientation()
quat = qmult((euler2quat(delta, 0, 0)), quat)
self.set_orientation(quat)
@property
def base_action_idx(self):
controller_idx = self.controller_order.index("base")
action_start_idx = sum([self.controllers[self.controller_order[i]].command_dim for i in range(controller_idx)])
return np.arange(action_start_idx, action_start_idx + self.controllers["base"].command_dim)
@property
@abstractmethod
def base_control_idx(self):
"""
Returns:
n-array: Indices in low-level control vector corresponding to base joints.
"""
raise NotImplementedError
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("LocomotionRobot")
return classes
| 7,069 | Python | 32.990384 | 120 | 0.618758 |
StanfordVL/OmniGibson/omnigibson/robots/husky.py | import os
import numpy as np
from omnigibson.macros import gm
from omnigibson.robots.locomotion_robot import LocomotionRobot
class Husky(LocomotionRobot):
"""
Husky robot
Reference: https://clearpathrobotics.com/, http://wiki.ros.org/Robots/Husky
"""
def _create_discrete_action_space(self):
raise ValueError("Husky does not support discrete actions!")
@property
def base_control_idx(self):
return np.array([0, 1, 2, 3])
@property
def _default_joint_pos(self):
return np.zeros(self.n_joints)
@property
def usd_path(self):
return os.path.join(gm.ASSET_PATH, "models/husky/husky/husky.usd")
@property
def urdf_path(self):
return os.path.join(gm.ASSET_PATH, "models/husky/husky.urdf")
| 782 | Python | 23.468749 | 79 | 0.671355 |
StanfordVL/OmniGibson/docker/README.md | # Requirements
- Modern Linux distribution (Ubuntu 20.04, Fedora 36, etc.)
- RTX capable Nvidia graphics card (20 series or newer,)
- Up-to-date NVIDIA drivers
# Usage
**The below instructions concern the usage of OmniGibson containers with self-built images. Please see the BEHAVIOR-1K docs for instructions on how to pull and run a cloud image.**
1. Set up the NVIDIA Docker Runtime and login to the NVIDIA Container Registry
See [here](https://www.pugetsystems.com/labs/hpc/how-to-setup-nvidia-docker-and-ngc-registry-on-your-workstation-part-4-accessing-the-ngc-registry-1115/) for details.
2. Build the container. **From the OmniGibson root**, run: `./docker/build_docker.sh`
3. Run the container
* To get a shell inside a container with GUI: `sudo ./docker/run_docker_gui.sh`
* To get a jupyter notebook: `sudo ./docker/run_docker_notebook.sh`
* To get access to a shell inside a headless container `sudo ./docker/run_docker.sh`
# Development
To push a Docker container, run: `sudo ./docker/push_docker.sh` | 1,019 | Markdown | 45.363634 | 180 | 0.758587 |
StanfordVL/OmniGibson/tests/test_symbolic_primitives.py | import os
import pytest
import yaml
from omnigibson.macros import gm
gm.USE_GPU_DYNAMICS = True
gm.USE_FLATCACHE = True
import omnigibson as og
from omnigibson import object_states
from omnigibson.action_primitives.symbolic_semantic_action_primitives import SymbolicSemanticActionPrimitiveSet, SymbolicSemanticActionPrimitives
from omnigibson.systems import get_system
def start_env():
og.sim.stop()
config = {
"env": {
"initial_pos_z_offset": 0.1
},
"render": {
"viewer_width": 1280,
"viewer_height": 720
},
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Wainscott_0_int",
"load_object_categories": ["floors", "walls", "countertop", "fridge", "sink", "stove"],
"scene_source": "OG",
},
"robots": [
{
"type": "Fetch",
"obs_modalities": [
"scan",
"rgb",
"depth"
],
"scale": 1,
"self_collisions": True,
"action_normalize": False,
"action_type": "continuous",
"grasping_mode": "sticky",
"disable_grasp_handling": True,
"rigid_trunk": False,
"default_trunk_offset": 0.365,
"default_arm_pose": "diagonal30",
"default_reset_mode": "tuck",
"controller_config": {
"base": {
"name": "DifferentialDriveController"
},
"arm_0": {
"name": "JointController",
"motor_type": "position",
"command_input_limits": None,
"command_output_limits": None,
"use_delta_commands": False
},
"gripper_0": {
"name": "JointController",
"motor_type": "position",
"command_input_limits": [
-1,
1
],
"command_output_limits": None,
"use_delta_commands": True,
},
"camera": {
"name": "JointController",
"use_delta_commands": False
}
}
}
],
"objects": [
{
"type": "DatasetObject",
"name": "pan",
"category": "frying_pan",
"model": "mhndon",
"position": [5.31, 10.75, 1.],
},
{
"type": "DatasetObject",
"name": "knife",
"category": "carving_knife",
"model": "awvoox",
"position": [5.31, 10.75, 1.2],
},
{
"type": "DatasetObject",
"name": "apple",
"category": "apple",
"model": "agveuv",
"position": [4.75, 10.75, 1.],
"bounding_box": [0.098, 0.098, 0.115]
},
{
"type": "DatasetObject",
"name": "sponge",
"category": "sponge",
"model": "qewotb",
"position": [4.75, 10.75, 1.],
},
]
}
env = og.Environment(configs=config)
return env
@pytest.fixture(scope="module")
def shared_env():
"""Load the environment just once using module scope."""
return start_env()
@pytest.fixture(scope="function")
def env(shared_env):
"""Reset the environment before each test function."""
og.sim.scene.reset()
return shared_env
@pytest.fixture
def robot(env):
return env.robots[0]
@pytest.fixture
def prim_gen(env):
return SymbolicSemanticActionPrimitives(env)
@pytest.fixture
def countertop(env):
return next(iter(env.scene.object_registry("category", "countertop")))
@pytest.fixture
def fridge(env):
return next(iter(env.scene.object_registry("category", "fridge")))
@pytest.fixture
def stove(env):
return next(iter(env.scene.object_registry("category", "stove")))
@pytest.fixture
def sink(env):
return next(iter(env.scene.object_registry("category", "sink")))
@pytest.fixture
def pan(env):
return next(iter(env.scene.object_registry("category", "frying_pan")))
@pytest.fixture
def apple(env):
return next(iter(env.scene.object_registry("category", "apple")))
@pytest.fixture
def sponge(env):
return next(iter(env.scene.object_registry("category", "sponge")))
@pytest.fixture
def knife(env):
return next(iter(env.scene.object_registry("category", "carving_knife")))
class TestSymbolicPrimitives:
@pytest.mark.skip(reason="primitives are broken")
def test_in_hand_state(self, env, robot, prim_gen, apple):
assert not robot.states[object_states.IsGrasping].get_value(apple)
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
env.step(action)
assert robot.states[object_states.IsGrasping].get_value(apple)
# def test_navigate():
# pass
@pytest.mark.skip(reason="primitives are broken")
def test_open(self, env, prim_gen, fridge):
assert not fridge.states[object_states.Open].get_value()
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.OPEN, fridge):
env.step(action)
assert fridge.states[object_states.Open].get_value()
@pytest.mark.skip(reason="primitives are broken")
def test_close(self, env, prim_gen, fridge):
fridge.states[object_states.Open].set_value(True)
assert fridge.states[object_states.Open].get_value()
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.CLOSE, fridge):
env.step(action)
assert not fridge.states[object_states.Open].get_value()
@pytest.mark.skip(reason="primitives are broken")
def test_place_inside(self, env, prim_gen, apple, fridge):
assert not apple.states[object_states.Inside].get_value(fridge)
assert not fridge.states[object_states.Open].get_value()
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.OPEN, fridge):
env.step(action)
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
env.step(action)
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE, fridge):
env.step(action)
assert apple.states[object_states.Inside].get_value(fridge)
@pytest.mark.skip(reason="primitives are broken")
def test_place_ontop(self, env, prim_gen, apple, pan):
assert not apple.states[object_states.OnTop].get_value(pan)
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
env.step(action)
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, pan):
env.step(action)
assert apple.states[object_states.OnTop].get_value(pan)
@pytest.mark.skip(reason="primitives are broken")
def test_toggle_on(self, env, prim_gen, stove):
assert not stove.states[object_states.ToggledOn].get_value()
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, stove):
env.step(action)
assert stove.states[object_states.ToggledOn].get_value()
@pytest.mark.skip(reason="primitives are broken")
def test_soak_under(self, env, prim_gen, robot, sponge, sink):
water_system = get_system("water", force_active=True)
assert not sponge.states[object_states.Saturated].get_value(water_system)
assert not sink.states[object_states.ToggledOn].get_value()
# First toggle on the sink
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, sink):
env.step(action)
assert sink.states[object_states.ToggledOn].get_value()
# Then grasp the sponge
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, sponge):
env.step(action)
assert robot.states[object_states.IsGrasping].get_value(sponge)
# Then soak the sponge under the water
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.SOAK_UNDER, sink):
env.step(action)
assert sponge.states[object_states.Saturated].get_value(water_system)
# def test_soak_inside():
# pass
@pytest.mark.skip(reason="primitives are broken")
def test_wipe(self, env, prim_gen, sponge, sink, countertop):
# Some pre-assertions
water_system = get_system("water", force_active=True)
assert not sponge.states[object_states.Saturated].get_value(water_system)
assert not sink.states[object_states.ToggledOn].get_value()
# Dirty the countertop as the setup
mud_system = get_system("mud", force_active=True)
countertop.states[object_states.Covered].set_value(mud_system, True)
assert countertop.states[object_states.Covered].get_value(mud_system)
# First toggle on the sink
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, sink):
env.step(action)
assert sink.states[object_states.ToggledOn].get_value()
# Then grasp the sponge
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, sponge):
env.step(action)
assert robot.states[object_states.IsGrasping].get_value(sponge)
# Then soak the sponge under the water
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.SOAK_UNDER, sink):
env.step(action)
assert sponge.states[object_states.Saturated].get_value(water_system)
# Wipe the countertop with the sponge
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.WIPE, countertop):
env.step(action)
assert not countertop.states[object_states.Covered].get_value(mud_system)
@pytest.mark.skip(reason="primitives are broken")
def test_cut(self, env, prim_gen, apple, knife, countertop):
# assert not apple.states[object_states.Cut].get_value(knife)
print("Grasping knife")
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, knife):
env.step(action)
for _ in range(60): env.step(prim_gen._empty_action())
print("Cutting apple")
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.CUT, apple):
env.step(action)
for _ in range(60): env.step(prim_gen._empty_action())
print("Putting knife back on countertop")
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, countertop):
env.step(action)
def test_persistent_sticky_grasping(self, env, robot, prim_gen, apple):
assert not robot.states[object_states.IsGrasping].get_value(apple)
for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple):
env.step(action)
assert robot.states[object_states.IsGrasping].get_value(apple)
state = og.sim.dump_state()
og.sim.stop()
og.sim.play()
og.sim.load_state(state)
assert robot.states[object_states.IsGrasping].get_value(apple)
for _ in range(10):
env.step(prim_gen._empty_action())
assert robot.states[object_states.IsGrasping].get_value(apple)
# def test_place_near_heating_element():
# pass
# def test_wait_for_cooked():
# pass
def teardown_class(cls):
og.sim.clear()
def main():
env = start_env()
prim_gen = SymbolicSemanticActionPrimitives(env)
apple = next(iter(env.scene.object_registry("category", "apple")))
knife = next(iter(env.scene.object_registry("category", "carving_knife")))
countertop = next(iter(env.scene.object_registry("category", "countertop")))
print("Will start in 3 seconds")
for _ in range(180): env.step(prim_gen._empty_action())
try:
test_cut(env, prim_gen, apple, knife, countertop)
except:
raise
while True:
og.sim.step()
if __name__ == "__main__":
main() | 11,284 | Python | 32.888889 | 145 | 0.663506 |
StanfordVL/OmniGibson/tests/test_transition_rules.py | from omnigibson.macros import macros as m
from omnigibson.object_states import *
from omnigibson.systems import get_system, is_physical_particle_system, is_visual_particle_system
from omnigibson.utils.constants import PrimType
from omnigibson.utils.physx_utils import apply_force_at_pos, apply_torque
import omnigibson.utils.transform_utils as T
from omnigibson.objects import DatasetObject
from omnigibson.transition_rules import REGISTERED_RULES
import omnigibson as og
from omnigibson.macros import macros as m
from scipy.spatial.transform import Rotation as R
from utils import og_test, get_random_pose, place_objA_on_objB_bbox, place_obj_on_floor_plane, retrieve_obj_cfg, remove_all_systems
import pytest
import numpy as np
@pytest.mark.skip(reason="dryer is not fillable yet.")
@og_test
def test_dryer_rule():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
clothes_dryer = og.sim.scene.object_registry("name", "clothes_dryer")
remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel")
bowl = og.sim.scene.object_registry("name", "bowl")
water = get_system("water")
place_obj_on_floor_plane(clothes_dryer)
og.sim.step()
# Place the two objects inside the dryer
remover_dishtowel.set_position_orientation([0.0, 0.0, 0.4], [0, 0, 0, 1])
bowl.set_position_orientation([0.0, 0.0, 0.5], [0, 0, 0, 1])
og.sim.step()
assert remover_dishtowel.states[Saturated].set_value(water, True)
assert bowl.states[Covered].set_value(water, True)
og.sim.step()
assert remover_dishtowel.states[Saturated].get_value(water)
assert clothes_dryer.states[Contains].get_value(water)
# The rule will not execute if Open is True
clothes_dryer.states[Open].set_value(True)
og.sim.step()
assert remover_dishtowel.states[Saturated].get_value(water)
assert clothes_dryer.states[Contains].get_value(water)
clothes_dryer.states[Open].set_value(False)
clothes_dryer.states[ToggledOn].set_value(True)
# The rule will execute when Open is False and ToggledOn is True
og.sim.step()
# Need to take one more step for the state setters to take effect
og.sim.step()
assert not remover_dishtowel.states[Saturated].get_value(water)
assert not clothes_dryer.states[Contains].get_value(water)
# Clean up
remove_all_systems()
@og_test
def test_washer_rule():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
washer = og.sim.scene.object_registry("name", "washer")
remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel")
bowl = og.sim.scene.object_registry("name", "bowl")
water = get_system("water")
dust = get_system("dust") # always remove
salt = get_system("salt") # always remove (not explicitly specified)
rust = get_system("rust") # never remove
spray_paint = get_system("spray_paint") # requires acetone
acetone = get_system("acetone") # solvent for spray paint
cooking_oil = get_system("cooking_oil") # requires vinegar, lemon_juice, vinegar, etc.
place_obj_on_floor_plane(washer)
og.sim.step()
# Place the two objects inside the washer
# (Hacky) use baking_sheet as a stepping stone to elevate the objects so that they are inside the container volume.
baking_sheet.set_position_orientation([0.0, 0.0, 0.04], T.euler2quat([np.pi, 0, 0]))
remover_dishtowel.set_position_orientation([0.0, 0.0, 0.05], [0, 0, 0, 1])
bowl.set_position_orientation([0.10, 0.0, 0.08], [0, 0, 0, 1])
og.sim.step()
assert bowl.states[Covered].set_value(dust, True)
assert bowl.states[Covered].set_value(salt, True)
assert bowl.states[Covered].set_value(rust, True)
assert bowl.states[Covered].set_value(spray_paint, True)
assert bowl.states[Covered].set_value(acetone, True)
assert bowl.states[Covered].set_value(cooking_oil, True)
assert not remover_dishtowel.states[Saturated].get_value(water)
assert not bowl.states[Covered].get_value(water)
# The rule will not execute if Open is True
washer.states[Open].set_value(True)
og.sim.step()
assert bowl.states[Covered].get_value(dust)
assert bowl.states[Covered].get_value(salt)
assert bowl.states[Covered].get_value(rust)
assert bowl.states[Covered].get_value(spray_paint)
assert bowl.states[Covered].get_value(acetone)
assert bowl.states[Covered].get_value(cooking_oil)
assert not remover_dishtowel.states[Saturated].get_value(water)
assert not bowl.states[Covered].get_value(water)
washer.states[Open].set_value(False)
washer.states[ToggledOn].set_value(True)
# The rule will execute when Open is False and ToggledOn is True
og.sim.step()
# Need to take one more step for the state setters to take effect
og.sim.step()
assert not bowl.states[Covered].get_value(dust)
assert not bowl.states[Covered].get_value(salt)
assert bowl.states[Covered].get_value(rust)
assert not bowl.states[Covered].get_value(spray_paint)
assert not bowl.states[Covered].get_value(acetone)
assert bowl.states[Covered].get_value(cooking_oil)
assert remover_dishtowel.states[Saturated].get_value(water)
assert bowl.states[Covered].get_value(water)
# Clean up
remove_all_systems()
@og_test
def test_slicing_rule():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
apple = og.sim.scene.object_registry("name", "apple")
table_knife = og.sim.scene.object_registry("name", "table_knife")
deleted_objs = [apple]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
assert apple.states[Cooked].set_value(True)
initial_half_apples = og.sim.scene.object_registry("category", "half_apple", set()).copy()
place_obj_on_floor_plane(apple)
og.sim.step()
table_knife.set_position_orientation([-0.05, 0.0, 0.15], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
assert not table_knife.states[Touching].get_value(apple)
final_half_apples = og.sim.scene.object_registry("category", "half_apple", set()).copy()
assert len(final_half_apples) == len(initial_half_apples)
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is not None
table_knife.set_position_orientation([-0.05, 0.0, 0.10], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
final_half_apples = og.sim.scene.object_registry("category", "half_apple", set()).copy()
assert len(final_half_apples) > len(initial_half_apples)
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# One more step for the half apples to be initialized
og.sim.step()
# All new half_apple should be cooked
new_half_apples = final_half_apples - initial_half_apples
for half_apple in new_half_apples:
assert half_apple.states[Cooked].get_value()
# Clean up
og.sim.remove_object(new_half_apples)
og.sim.step()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_dicing_rule_cooked():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
half_apple = og.sim.scene.object_registry("name", "half_apple")
table_knife = og.sim.scene.object_registry("name", "table_knife")
cooked_diced_apple = get_system("cooked__diced__apple")
deleted_objs = [half_apple]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
half_apple.set_orientation(T.euler2quat([0, -np.pi / 2, 0]))
place_obj_on_floor_plane(half_apple)
og.sim.step()
assert half_apple.states[Cooked].set_value(True)
assert cooked_diced_apple.n_particles == 0
table_knife.set_position_orientation([-0.05, 0.0, 0.15], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
assert not table_knife.states[Touching].get_value(half_apple)
assert cooked_diced_apple.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is not None
table_knife.set_position_orientation([-0.05, 0.0, 0.07], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
assert cooked_diced_apple.n_particles > 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Move the knife away so that it doesn't immediately dice the half_apple again once it's imported back
table_knife.set_position_orientation([-0.05, 0.0, 1.15], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_dicing_rule_uncooked():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
half_apple = og.sim.scene.object_registry("name", "half_apple")
table_knife = og.sim.scene.object_registry("name", "table_knife")
diced_apple = get_system("diced__apple")
deleted_objs = [half_apple]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
half_apple.set_orientation(T.euler2quat([0, -np.pi / 2, 0]))
place_obj_on_floor_plane(half_apple)
og.sim.step()
assert diced_apple.n_particles == 0
table_knife.set_position_orientation([-0.05, 0.0, 0.15], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
assert not table_knife.states[Touching].get_value(half_apple)
assert diced_apple.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is not None
table_knife.set_position_orientation([-0.05, 0.0, 0.07], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
assert diced_apple.n_particles > 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Move the knife away so that it doesn't immediately dice the half_apple again once it's imported back
table_knife.set_position_orientation([-0.05, 0.0, 1.15], T.euler2quat([-np.pi / 2, 0, 0]))
og.sim.step()
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_melting_rule():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
swiss_cheese = og.sim.scene.object_registry("name", "swiss_cheese")
melted_swiss_cheese = get_system("melted__swiss_cheese")
deleted_objs = [swiss_cheese]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
swiss_cheese.set_position_orientation([-0.24, 0.11, 0.92], [0, 0, 0, 1])
og.sim.step()
assert swiss_cheese.states[Inside].get_value(stockpot)
assert melted_swiss_cheese.n_particles == 0
# To save time, directly set the temperature of the swiss cheese to be below the melting point
assert swiss_cheese.states[Temperature].set_value(m.transition_rules.MELTING_TEMPERATURE - 1)
og.sim.step()
assert melted_swiss_cheese.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is not None
# To save time, directly set the temperature of the swiss cheese to be above the melting point
assert swiss_cheese.states[Temperature].set_value(m.transition_rules.MELTING_TEMPERATURE + 1)
og.sim.step()
# Recipe should execute successfully: new melted swiss cheese should be created, and the ingredients should be deleted
assert melted_swiss_cheese.n_particles > 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_cooking_physical_particle_rule_failure_recipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
arborio_rice = get_system("arborio_rice")
water = get_system("water")
cooked_water = get_system("cooked__water")
cooked_arborio_rice = get_system("cooked__arborio_rice")
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
arborio_rice.generate_particles(positions=[[-0.25, 0.13, 0.95]])
# This fails the recipe because water (recipe system) is not in the stockpot
water.generate_particles(positions=[[-0.25, 0.17, 1.95]])
assert stockpot.states[Contains].get_value(arborio_rice)
assert not stockpot.states[Contains].get_value(water)
assert cooked_arborio_rice.n_particles == 0
# To save time, directly set the stockpot to be heated
assert stockpot.states[Heated].set_value(True)
og.sim.step()
# Recipe should fail: no cooked arborio rice should be created
assert water.n_particles > 0
assert cooked_water.n_particles == 0
assert arborio_rice.n_particles > 0
assert cooked_arborio_rice.n_particles == 0
# Clean up
remove_all_systems()
@og_test
def test_cooking_physical_particle_rule_success():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
arborio_rice = get_system("arborio_rice")
water = get_system("water")
cooked_water = get_system("cooked__water")
cooked_arborio_rice = get_system("cooked__arborio_rice")
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
arborio_rice.generate_particles(positions=[[-0.25, 0.13, 0.95]])
water.generate_particles(positions=[[-0.25, 0.17, 0.95]])
assert stockpot.states[Contains].get_value(arborio_rice)
assert stockpot.states[Contains].get_value(water)
assert cooked_arborio_rice.n_particles == 0
assert cooked_water.n_particles == 0
# To save time, directly set the stockpot to be heated
assert stockpot.states[Heated].set_value(True)
og.sim.step()
assert water.n_particles == 0
assert cooked_water.n_particles > 0
assert arborio_rice.n_particles > 0
assert cooked_arborio_rice.n_particles == 0
# Recipe should execute successfully: new cooked arborio rice should be created, and the ingredients should be deleted
og.sim.step()
assert water.n_particles == 0
assert cooked_water.n_particles == 0
assert arborio_rice.n_particles == 0
assert cooked_arborio_rice.n_particles > 0
# Clean up
remove_all_systems()
@og_test
def test_mixing_rule_failure_recipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
bowl = og.sim.scene.object_registry("name", "bowl")
tablespoon = og.sim.scene.object_registry("name", "tablespoon")
water = get_system("water")
granulated_sugar = get_system("granulated_sugar")
lemon_juice = get_system("lemon_juice")
lemonade = get_system("lemonade")
sludge = get_system("sludge")
place_obj_on_floor_plane(bowl)
og.sim.step()
water.generate_particles(positions=[[-0.02, 0.0, 0.02]])
granulated_sugar.generate_particles(positions=[[0.0, 0.0, 0.02]])
# This fails the recipe because lemon juice (recipe system) is not in the bowl
lemon_juice.generate_particles(positions=[[0.02, 0.0, 1.02]])
assert bowl.states[Contains].get_value(water)
assert bowl.states[Contains].get_value(granulated_sugar)
assert not bowl.states[Contains].get_value(lemon_juice)
assert lemonade.n_particles == 0
assert sludge.n_particles == 0
tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1])
og.sim.step()
assert tablespoon.states[Touching].get_value(bowl)
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert lemonade.n_particles == 0
assert sludge.n_particles > 0
assert water.n_particles == 0
assert granulated_sugar.n_particles == 0
# Clean up
remove_all_systems()
@og_test
def test_mixing_rule_failure_nonrecipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
bowl = og.sim.scene.object_registry("name", "bowl")
tablespoon = og.sim.scene.object_registry("name", "tablespoon")
water = get_system("water")
granulated_sugar = get_system("granulated_sugar")
lemon_juice = get_system("lemon_juice")
lemonade = get_system("lemonade")
salt = get_system("salt")
sludge = get_system("sludge")
place_obj_on_floor_plane(bowl)
og.sim.step()
water.generate_particles(positions=[[-0.02, 0, 0.02]])
granulated_sugar.generate_particles(positions=[[0.0, 0.0, 0.02]])
lemon_juice.generate_particles(positions=[[0.02, 0.0, 0.02]])
# This fails the recipe because salt (nonrecipe system) is in the bowl
salt.generate_particles(positions=[[0.0, 0.02, 0.02]])
assert bowl.states[Contains].get_value(water)
assert bowl.states[Contains].get_value(granulated_sugar)
assert bowl.states[Contains].get_value(lemon_juice)
assert bowl.states[Contains].get_value(salt)
assert lemonade.n_particles == 0
assert sludge.n_particles == 0
tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1])
og.sim.step()
assert tablespoon.states[Touching].get_value(bowl)
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert lemonade.n_particles == 0
assert sludge.n_particles > 0
assert water.n_particles == 0
assert granulated_sugar.n_particles == 0
assert lemon_juice.n_particles == 0
assert salt.n_particles == 0
# Clean up
remove_all_systems()
@og_test
def test_mixing_rule_success():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
bowl = og.sim.scene.object_registry("name", "bowl")
tablespoon = og.sim.scene.object_registry("name", "tablespoon")
water = get_system("water")
granulated_sugar = get_system("granulated_sugar")
lemon_juice = get_system("lemon_juice")
lemonade = get_system("lemonade")
place_obj_on_floor_plane(bowl)
og.sim.step()
water.generate_particles(positions=[[-0.02, 0.0, 0.02]])
granulated_sugar.generate_particles(positions=[[0.0, 0.0, 0.02]])
lemon_juice.generate_particles(positions=[[0.02, 0.0, 0.02]])
assert bowl.states[Contains].get_value(water)
assert bowl.states[Contains].get_value(granulated_sugar)
assert bowl.states[Contains].get_value(lemon_juice)
assert lemonade.n_particles == 0
tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1])
og.sim.step()
assert tablespoon.states[Touching].get_value(bowl)
# Recipe should execute successfully: new lemonade should be created, and the ingredients should be deleted
assert lemonade.n_particles > 0
assert water.n_particles == 0
assert granulated_sugar.n_particles == 0
assert lemon_juice.n_particles == 0
# Clean up
remove_all_systems()
@og_test
def test_cooking_system_rule_failure_recipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
chicken = og.sim.scene.object_registry("name", "chicken")
chicken_broth = get_system("chicken_broth")
diced_carrot = get_system("diced__carrot")
diced_celery = get_system("diced__celery")
salt = get_system("salt")
rosemary = get_system("rosemary")
chicken_soup = get_system("cooked__chicken_soup")
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1])
# This fails the recipe because chicken broth (recipe system) is not in the stockpot
chicken_broth.generate_particles(positions=[[-0.33, 0.05, 1.93]])
diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]])
diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]])
salt.generate_particles(positions=[[-0.33, 0.15, 0.93]])
rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]])
og.sim.step()
assert chicken.states[Inside].get_value(stockpot)
assert not chicken.states[Cooked].get_value()
assert not stockpot.states[Contains].get_value(chicken_broth)
assert stockpot.states[Contains].get_value(diced_carrot)
assert stockpot.states[Contains].get_value(diced_celery)
assert stockpot.states[Contains].get_value(salt)
assert stockpot.states[Contains].get_value(rosemary)
assert chicken_soup.n_particles == 0
assert stove.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no chicken soup should be created
assert chicken_soup.n_particles == 0
assert chicken_broth.n_particles > 0
assert diced_carrot.n_particles > 0
assert diced_celery.n_particles > 0
assert salt.n_particles > 0
assert rosemary.n_particles > 0
assert og.sim.scene.object_registry("name", "chicken") is not None
# Clean up
remove_all_systems()
@og_test
def test_cooking_system_rule_failure_nonrecipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
chicken = og.sim.scene.object_registry("name", "chicken")
water = get_system("water")
chicken_broth = get_system("chicken_broth")
diced_carrot = get_system("diced__carrot")
diced_celery = get_system("diced__celery")
salt = get_system("salt")
rosemary = get_system("rosemary")
chicken_soup = get_system("cooked__chicken_soup")
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1])
# This fails the recipe because water (nonrecipe system) is inside the stockpot
water.generate_particles(positions=[[-0.24, 0.11, 0.93]])
chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]])
diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]])
diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]])
salt.generate_particles(positions=[[-0.33, 0.15, 0.93]])
rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]])
og.sim.step()
assert chicken.states[Inside].get_value(stockpot)
assert not chicken.states[Cooked].get_value()
assert stockpot.states[Contains].get_value(water)
assert stockpot.states[Contains].get_value(chicken_broth)
assert stockpot.states[Contains].get_value(diced_carrot)
assert stockpot.states[Contains].get_value(diced_celery)
assert stockpot.states[Contains].get_value(salt)
assert stockpot.states[Contains].get_value(rosemary)
assert chicken_soup.n_particles == 0
assert stove.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no chicken soup should be created
assert chicken_soup.n_particles == 0
assert chicken_broth.n_particles > 0
assert diced_carrot.n_particles > 0
assert diced_celery.n_particles > 0
assert salt.n_particles > 0
assert rosemary.n_particles > 0
assert water.n_particles > 0
assert og.sim.scene.object_registry("name", "chicken") is not None
# Clean up
remove_all_systems()
@og_test
def test_cooking_system_rule_failure_nonrecipe_objects():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
chicken = og.sim.scene.object_registry("name", "chicken")
bowl = og.sim.scene.object_registry("name", "bowl")
chicken_broth = get_system("chicken_broth")
diced_carrot = get_system("diced__carrot")
diced_celery = get_system("diced__celery")
salt = get_system("salt")
rosemary = get_system("rosemary")
chicken_soup = get_system("cooked__chicken_soup")
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1])
# This fails the recipe because the bowl (nonrecipe object) is inside the stockpot
bowl.set_position_orientation([-0.20, 0.15, 1], [0, 0, 0, 1])
chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]])
diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]])
diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]])
salt.generate_particles(positions=[[-0.33, 0.15, 0.93]])
rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]])
og.sim.step()
assert chicken.states[Inside].get_value(stockpot)
assert bowl.states[Inside].get_value(stockpot)
assert not chicken.states[Cooked].get_value()
assert stockpot.states[Contains].get_value(chicken_broth)
assert stockpot.states[Contains].get_value(diced_carrot)
assert stockpot.states[Contains].get_value(diced_celery)
assert stockpot.states[Contains].get_value(salt)
assert stockpot.states[Contains].get_value(rosemary)
assert chicken_soup.n_particles == 0
assert stove.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no chicken soup should be created
assert chicken_soup.n_particles == 0
assert chicken_broth.n_particles > 0
assert diced_carrot.n_particles > 0
assert diced_celery.n_particles > 0
assert salt.n_particles > 0
assert rosemary.n_particles > 0
assert og.sim.scene.object_registry("name", "chicken") is not None
assert og.sim.scene.object_registry("name", "bowl") is not None
# Clean up
remove_all_systems()
@og_test
def test_cooking_system_rule_success():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
stockpot = og.sim.scene.object_registry("name", "stockpot")
chicken = og.sim.scene.object_registry("name", "chicken")
chicken_broth = get_system("chicken_broth")
diced_carrot = get_system("diced__carrot")
diced_celery = get_system("diced__celery")
salt = get_system("salt")
rosemary = get_system("rosemary")
chicken_soup = get_system("cooked__chicken_soup")
deleted_objs = [chicken]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(stove)
og.sim.step()
stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[OnTop].get_value(stove)
chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1])
chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]])
diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]])
diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]])
salt.generate_particles(positions=[[-0.33, 0.15, 0.93]])
rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]])
og.sim.step()
assert chicken.states[Inside].get_value(stockpot)
assert not chicken.states[Cooked].get_value()
assert stockpot.states[Contains].get_value(chicken_broth)
assert stockpot.states[Contains].get_value(diced_carrot)
assert stockpot.states[Contains].get_value(diced_celery)
assert stockpot.states[Contains].get_value(salt)
assert stockpot.states[Contains].get_value(rosemary)
assert chicken_soup.n_particles == 0
assert stove.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should execute successfully: new chicken soup should be created, and the ingredients should be deleted
assert chicken_soup.n_particles > 0
assert chicken_broth.n_particles == 0
assert diced_carrot.n_particles == 0
assert diced_celery.n_particles == 0
assert salt.n_particles == 0
assert rosemary.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_cooking_object_rule_failure_wrong_container():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
oven = og.sim.scene.object_registry("name", "oven")
stockpot = og.sim.scene.object_registry("name", "stockpot")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
place_obj_on_floor_plane(oven)
og.sim.step()
# This fails the recipe because it requires the baking sheet to be inside the oven, not the stockpot
stockpot.set_position_orientation([0, 0, 0.47], [0, 0, 0, 1])
og.sim.step()
assert stockpot.states[Inside].get_value(oven)
bagel_dough.set_position_orientation([0, 0, 0.45], [0, 0, 0, 1])
raw_egg.set_position_orientation([0.02, 0, 0.50], [0, 0, 0, 1])
og.sim.step()
assert bagel_dough.states[Inside].get_value(stockpot)
assert raw_egg.states[OnTop].get_value(bagel_dough)
assert bagel_dough.states[Cooked].set_value(False)
assert raw_egg.states[Cooked].set_value(False)
og.sim.step()
assert bagel_dough.states[Covered].set_value(sesame_seed, True)
og.sim.step()
assert oven.states[ToggledOn].set_value(True)
og.sim.step()
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
assert len(final_bagels) == len(initial_bagels)
# Clean up
remove_all_systems()
@og_test
def test_cooking_object_rule_failure_recipe_objects():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
oven = og.sim.scene.object_registry("name", "oven")
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
place_obj_on_floor_plane(oven)
og.sim.step()
baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1])
og.sim.step()
assert baking_sheet.states[Inside].get_value(oven)
# This fails the recipe because it requires the bagel dough to be on top of the baking sheet
bagel_dough.set_position_orientation([1, 0, 0.5], [0, 0, 0, 1])
raw_egg.set_position_orientation([1.02, 0, 0.55], [0, 0, 0, 1])
og.sim.step()
assert not bagel_dough.states[OnTop].get_value(baking_sheet)
assert bagel_dough.states[Cooked].set_value(False)
assert raw_egg.states[Cooked].set_value(False)
og.sim.step()
assert bagel_dough.states[Covered].set_value(sesame_seed, True)
og.sim.step()
assert oven.states[ToggledOn].set_value(True)
og.sim.step()
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
assert len(final_bagels) == len(initial_bagels)
# Clean up
remove_all_systems()
@og_test
def test_cooking_object_rule_failure_unary_states():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
oven = og.sim.scene.object_registry("name", "oven")
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
place_obj_on_floor_plane(oven)
og.sim.step()
baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1])
og.sim.step()
assert baking_sheet.states[Inside].get_value(oven)
bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1])
raw_egg.set_position_orientation([0.02, 0, 0.55], [0, 0, 0, 1])
og.sim.step()
assert bagel_dough.states[OnTop].get_value(baking_sheet)
assert raw_egg.states[OnTop].get_value(bagel_dough)
# This fails the recipe because it requires the bagel dough and the raw egg to be not cooked
assert bagel_dough.states[Cooked].set_value(True)
assert raw_egg.states[Cooked].set_value(True)
og.sim.step()
assert bagel_dough.states[Covered].set_value(sesame_seed, True)
og.sim.step()
assert oven.states[ToggledOn].set_value(True)
og.sim.step()
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
assert len(final_bagels) == len(initial_bagels)
# Clean up
remove_all_systems()
@og_test
def test_cooking_object_rule_failure_binary_system_states():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
oven = og.sim.scene.object_registry("name", "oven")
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
place_obj_on_floor_plane(oven)
og.sim.step()
baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1])
og.sim.step()
assert baking_sheet.states[Inside].get_value(oven)
bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1])
raw_egg.set_position_orientation([0.02, 0, 0.55], [0, 0, 0, 1])
og.sim.step()
assert bagel_dough.states[OnTop].get_value(baking_sheet)
assert raw_egg.states[OnTop].get_value(bagel_dough)
assert bagel_dough.states[Cooked].set_value(False)
assert raw_egg.states[Cooked].set_value(False)
og.sim.step()
# This fails the recipe because it requires the bagel dough to be covered with sesame seed
assert bagel_dough.states[Covered].set_value(sesame_seed, False)
og.sim.step()
assert oven.states[ToggledOn].set_value(True)
og.sim.step()
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
assert len(final_bagels) == len(initial_bagels)
# Clean up
remove_all_systems()
@og_test
def test_cooking_object_rule_failure_binary_object_states():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
oven = og.sim.scene.object_registry("name", "oven")
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
place_obj_on_floor_plane(oven)
og.sim.step()
baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1])
og.sim.step()
assert baking_sheet.states[Inside].get_value(oven)
bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1])
raw_egg.set_position_orientation([0.12, 0.15, 0.47], [0, 0, 0, 1])
og.sim.step()
assert bagel_dough.states[OnTop].get_value(baking_sheet)
# This fails the recipe because it requires the raw egg to be on top of the bagel dough
assert not raw_egg.states[OnTop].get_value(bagel_dough)
assert bagel_dough.states[Cooked].set_value(False)
assert raw_egg.states[Cooked].set_value(False)
og.sim.step()
assert bagel_dough.states[Covered].set_value(sesame_seed, True)
og.sim.step()
assert oven.states[ToggledOn].set_value(True)
og.sim.step()
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
assert len(final_bagels) == len(initial_bagels)
# Clean up
remove_all_systems()
@og_test
def test_cooking_object_rule_failure_wrong_heat_source():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
stove = og.sim.scene.object_registry("name", "stove")
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
# This fails the recipe because it requires the oven to be the heat source, not the stove
place_obj_on_floor_plane(stove)
og.sim.step()
heat_source_position = stove.states[HeatSourceOrSink].link.get_position()
baking_sheet.set_position_orientation([-0.20, 0, 0.80], [0, 0, 0, 1])
og.sim.step()
bagel_dough.set_position_orientation([-0.20, 0, 0.84], [0, 0, 0, 1])
raw_egg.set_position_orientation([-0.18, 0, 0.89], [0, 0, 0, 1])
og.sim.step()
assert bagel_dough.states[OnTop].get_value(baking_sheet)
assert raw_egg.states[OnTop].get_value(bagel_dough)
assert bagel_dough.states[Cooked].set_value(True)
assert raw_egg.states[Cooked].set_value(True)
og.sim.step()
assert bagel_dough.states[Covered].set_value(sesame_seed, True)
og.sim.step()
assert stove.states[ToggledOn].set_value(True)
og.sim.step()
# Make sure the stove affects the baking sheet
assert stove.states[HeatSourceOrSink].affects_obj(baking_sheet)
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
assert len(final_bagels) == len(initial_bagels)
# Clean up
remove_all_systems()
@og_test
def test_cooking_object_rule_success():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
oven = og.sim.scene.object_registry("name", "oven")
baking_sheet = og.sim.scene.object_registry("name", "baking_sheet")
bagel_dough = og.sim.scene.object_registry("name", "bagel_dough")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
sesame_seed = get_system("sesame_seed")
deleted_objs = [bagel_dough, raw_egg]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
place_obj_on_floor_plane(oven)
og.sim.step()
baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1])
og.sim.step()
assert baking_sheet.states[Inside].get_value(oven)
bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1])
raw_egg.set_position_orientation([0.02, 0, 0.55], [0, 0, 0, 1])
og.sim.step()
assert bagel_dough.states[OnTop].get_value(baking_sheet)
assert raw_egg.states[OnTop].get_value(bagel_dough)
assert bagel_dough.states[Cooked].set_value(False)
assert raw_egg.states[Cooked].set_value(False)
og.sim.step()
assert bagel_dough.states[Covered].set_value(sesame_seed, True)
og.sim.step()
assert oven.states[ToggledOn].set_value(True)
og.sim.step()
final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy()
# Recipe should execute successfully: new bagels should be created, and the ingredients should be deleted
assert len(final_bagels) > len(initial_bagels)
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Need to step again for the new bagels to be initialized, placed in the container, and cooked.
og.sim.step()
# All new bagels should be cooked
new_bagels = final_bagels - initial_bagels
for bagel in new_bagels:
assert bagel.states[Cooked].get_value()
# This assertion occasionally fails, because when four bagels are sampled on top of the baking sheet one by one,
# there is no guarantee that all four of them will be on top of the baking sheet at the end.
# assert bagel.states[OnTop].get_value(baking_sheet)
assert bagel.states[Inside].get_value(oven)
# Clean up
remove_all_systems()
og.sim.remove_object(new_bagels)
og.sim.step()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_system_failure_wrong_container():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
food_processor = og.sim.scene.object_registry("name", "food_processor")
ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream")
milk = get_system("whole_milk")
chocolate_sauce = get_system("chocolate_sauce")
milkshake = get_system("milkshake")
sludge = get_system("sludge")
deleted_objs = [ice_cream]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
# This fails the recipe because it requires the blender to be the container, not the food processor
place_obj_on_floor_plane(food_processor)
og.sim.step()
milk.generate_particles(positions=np.array([[0.02, 0.06, 0.22]]))
chocolate_sauce.generate_particles(positions=np.array([[-0.05, -0.04, 0.22]]))
ice_cream.set_position_orientation([0.03, -0.02, 0.23], [0, 0, 0, 1])
og.sim.step()
assert food_processor.states[Contains].get_value(milk)
assert food_processor.states[Contains].get_value(chocolate_sauce)
assert ice_cream.states[Inside].get_value(food_processor)
assert milkshake.n_particles == 0
assert sludge.n_particles == 0
food_processor.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert milkshake.n_particles == 0
assert sludge.n_particles > 0
assert milk.n_particles == 0
assert chocolate_sauce.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_system_failure_recipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
blender = og.sim.scene.object_registry("name", "blender")
ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream")
milk = get_system("whole_milk")
chocolate_sauce = get_system("chocolate_sauce")
milkshake = get_system("milkshake")
sludge = get_system("sludge")
deleted_objs = [ice_cream]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(blender)
og.sim.step()
# This fails the recipe because it requires the milk to be in the blender
milk.generate_particles(positions=np.array([[0.02, 0, 1.57]]))
chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]]))
ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1])
og.sim.step()
assert not blender.states[Contains].get_value(milk)
assert blender.states[Contains].get_value(chocolate_sauce)
assert ice_cream.states[Inside].get_value(blender)
assert milkshake.n_particles == 0
assert sludge.n_particles == 0
blender.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert milkshake.n_particles == 0
assert sludge.n_particles > 0
assert chocolate_sauce.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_system_failure_recipe_objects():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
blender = og.sim.scene.object_registry("name", "blender")
ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream")
milk = get_system("whole_milk")
chocolate_sauce = get_system("chocolate_sauce")
milkshake = get_system("milkshake")
sludge = get_system("sludge")
place_obj_on_floor_plane(blender)
og.sim.step()
milk.generate_particles(positions=np.array([[0.02, 0, 0.57]]))
chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]]))
# This fails the recipe because it requires the ice cream to be inside the blender
ice_cream.set_position_orientation([0, 0, 1.51], [0, 0, 0, 1])
og.sim.step()
assert blender.states[Contains].get_value(milk)
assert blender.states[Contains].get_value(chocolate_sauce)
assert not ice_cream.states[Inside].get_value(blender)
assert milkshake.n_particles == 0
assert sludge.n_particles == 0
blender.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert milkshake.n_particles == 0
assert sludge.n_particles > 0
assert milk.n_particles == 0
assert chocolate_sauce.n_particles == 0
# Clean up
remove_all_systems()
@og_test
def test_single_toggleable_machine_rule_output_system_failure_nonrecipe_systems():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
blender = og.sim.scene.object_registry("name", "blender")
ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream")
milk = get_system("whole_milk")
chocolate_sauce = get_system("chocolate_sauce")
milkshake = get_system("milkshake")
sludge = get_system("sludge")
water = get_system("water")
deleted_objs = [ice_cream]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(blender)
og.sim.step()
milk.generate_particles(positions=np.array([[0.02, 0, 0.57]]))
chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]]))
# This fails the recipe because water (nonrecipe system) is in the blender
water.generate_particles(positions=np.array([[0, 0, 0.57]]))
ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1])
og.sim.step()
assert blender.states[Contains].get_value(milk)
assert blender.states[Contains].get_value(chocolate_sauce)
assert blender.states[Contains].get_value(water)
assert ice_cream.states[Inside].get_value(blender)
assert milkshake.n_particles == 0
assert sludge.n_particles == 0
blender.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert milkshake.n_particles == 0
assert sludge.n_particles > 0
assert milk.n_particles == 0
assert chocolate_sauce.n_particles == 0
assert water.n_particles == 0
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_system_failure_nonrecipe_objects():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
blender = og.sim.scene.object_registry("name", "blender")
ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream")
bowl = og.sim.scene.object_registry("name", "bowl")
milk = get_system("whole_milk")
chocolate_sauce = get_system("chocolate_sauce")
milkshake = get_system("milkshake")
sludge = get_system("sludge")
deleted_objs = [ice_cream, bowl]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(blender)
og.sim.step()
milk.generate_particles(positions=np.array([[0.02, 0, 0.57]]))
chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]]))
ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1])
# This fails the recipe because the bowl (nonrecipe object) is in the blender
bowl.set_position_orientation([0, 0, 0.58], [0, 0, 0, 1])
og.sim.step()
assert blender.states[Contains].get_value(milk)
assert blender.states[Contains].get_value(chocolate_sauce)
assert ice_cream.states[Inside].get_value(blender)
assert bowl.states[Inside].get_value(blender)
assert milkshake.n_particles == 0
assert sludge.n_particles == 0
blender.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no milkshake should be created, and sludge should be created.
assert milkshake.n_particles == 0
assert sludge.n_particles > 0
assert milk.n_particles == 0
assert chocolate_sauce.n_particles == 0
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_system_success():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
blender = og.sim.scene.object_registry("name", "blender")
ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream")
milk = get_system("whole_milk")
chocolate_sauce = get_system("chocolate_sauce")
milkshake = get_system("milkshake")
sludge = get_system("sludge")
deleted_objs = [ice_cream]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(blender)
og.sim.step()
milk.generate_particles(positions=np.array([[0.02, 0, 0.57]]))
chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]]))
ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1])
og.sim.step()
assert blender.states[Contains].get_value(milk)
assert blender.states[Contains].get_value(chocolate_sauce)
assert ice_cream.states[Inside].get_value(blender)
assert milkshake.n_particles == 0
assert sludge.n_particles == 0
blender.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should execute successfully: new milkshake should be created, and the ingredients should be deleted
assert milkshake.n_particles > 0
assert sludge.n_particles == 0
assert milk.n_particles == 0
assert chocolate_sauce.n_particles == 0
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_object_failure_unary_states():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
electric_mixer = og.sim.scene.object_registry("name", "electric_mixer")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
another_raw_egg = og.sim.scene.object_registry("name", "another_raw_egg")
flour = get_system("flour")
granulated_sugar = get_system("granulated_sugar")
vanilla = get_system("vanilla")
melted_butter = get_system("melted__butter")
baking_powder = get_system("baking_powder")
salt = get_system("salt")
sludge = get_system("sludge")
initial_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy()
deleted_objs = [raw_egg, another_raw_egg]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(electric_mixer)
og.sim.step()
another_raw_egg.set_position_orientation([-0.01, -0.14, 0.50], [0, 0, 0, 1])
raw_egg.set_position_orientation([-0.01, -0.14, 0.47], [0, 0, 0, 1])
flour.generate_particles(positions=np.array([[-0.01, -0.15, 0.43]]))
granulated_sugar.generate_particles(positions=np.array([[0.01, -0.15, 0.43]]))
vanilla.generate_particles(positions=np.array([[0.03, -0.15, 0.43]]))
melted_butter.generate_particles(positions=np.array([[-0.01, -0.13, 0.43]]))
baking_powder.generate_particles(positions=np.array([[0.01, -0.13, 0.43]]))
salt.generate_particles(positions=np.array([[0.03, -0.13, 0.43]]))
# This fails the recipe because the egg should not be cooked
raw_egg.states[Cooked].set_value(True)
og.sim.step()
assert electric_mixer.states[Contains].get_value(flour)
assert electric_mixer.states[Contains].get_value(granulated_sugar)
assert electric_mixer.states[Contains].get_value(vanilla)
assert electric_mixer.states[Contains].get_value(melted_butter)
assert electric_mixer.states[Contains].get_value(baking_powder)
assert electric_mixer.states[Contains].get_value(salt)
assert raw_egg.states[Inside].get_value(electric_mixer)
assert raw_egg.states[Cooked].get_value()
assert another_raw_egg.states[Inside].get_value(electric_mixer)
assert not another_raw_egg.states[Cooked].get_value()
assert sludge.n_particles == 0
electric_mixer.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should fail: no dough should be created, and sludge should be created.
final_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy()
# Recipe should execute successfully: new dough should be created, and the ingredients should be deleted
assert len(final_doughs) == len(initial_doughs)
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
assert flour.n_particles == 0
assert granulated_sugar.n_particles == 0
assert vanilla.n_particles == 0
assert melted_butter.n_particles == 0
assert baking_powder.n_particles == 0
assert salt.n_particles == 0
assert sludge.n_particles > 0
# Clean up
remove_all_systems()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
@og_test
def test_single_toggleable_machine_rule_output_object_success():
assert len(REGISTERED_RULES) > 0, "No rules registered!"
electric_mixer = og.sim.scene.object_registry("name", "electric_mixer")
raw_egg = og.sim.scene.object_registry("name", "raw_egg")
another_raw_egg = og.sim.scene.object_registry("name", "another_raw_egg")
flour = get_system("flour")
granulated_sugar = get_system("granulated_sugar")
vanilla = get_system("vanilla")
melted_butter = get_system("melted__butter")
baking_powder = get_system("baking_powder")
salt = get_system("salt")
sludge = get_system("sludge")
initial_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy()
deleted_objs = [raw_egg, another_raw_egg]
deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs]
place_obj_on_floor_plane(electric_mixer)
og.sim.step()
another_raw_egg.set_position_orientation([-0.01, -0.14, 0.50], [0, 0, 0, 1])
raw_egg.set_position_orientation([-0.01, -0.14, 0.47], [0, 0, 0, 1])
flour.generate_particles(positions=np.array([[-0.01, -0.15, 0.43]]))
granulated_sugar.generate_particles(positions=np.array([[0.01, -0.15, 0.43]]))
vanilla.generate_particles(positions=np.array([[0.03, -0.15, 0.43]]))
melted_butter.generate_particles(positions=np.array([[-0.01, -0.13, 0.43]]))
baking_powder.generate_particles(positions=np.array([[0.01, -0.13, 0.43]]))
salt.generate_particles(positions=np.array([[0.03, -0.13, 0.43]]))
og.sim.step()
assert electric_mixer.states[Contains].get_value(flour)
assert electric_mixer.states[Contains].get_value(granulated_sugar)
assert electric_mixer.states[Contains].get_value(vanilla)
assert electric_mixer.states[Contains].get_value(melted_butter)
assert electric_mixer.states[Contains].get_value(baking_powder)
assert electric_mixer.states[Contains].get_value(salt)
assert raw_egg.states[Inside].get_value(electric_mixer)
assert not raw_egg.states[Cooked].get_value()
assert another_raw_egg.states[Inside].get_value(electric_mixer)
assert not another_raw_egg.states[Cooked].get_value()
assert sludge.n_particles == 0
electric_mixer.states[ToggledOn].set_value(True)
og.sim.step()
# Recipe should execute successfully: new dough should be created, and the ingredients should be deleted
final_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy()
# Recipe should execute successfully: new dough should be created, and the ingredients should be deleted
assert len(final_doughs) > len(initial_doughs)
for obj in deleted_objs:
assert og.sim.scene.object_registry("name", obj.name) is None
assert flour.n_particles == 0
assert granulated_sugar.n_particles == 0
assert vanilla.n_particles == 0
assert melted_butter.n_particles == 0
assert baking_powder.n_particles == 0
assert salt.n_particles == 0
# Need to step again for the new dough to be initialized, placed in the container, and cooked.
og.sim.step()
# All new doughs should not be cooked
new_doughs = final_doughs - initial_doughs
for dough in new_doughs:
assert not dough.states[Cooked].get_value()
assert dough.states[OnTop].get_value(electric_mixer)
# Clean up
og.sim.remove_object(new_doughs)
og.sim.step()
for obj_cfg in deleted_objs_cfg:
obj = DatasetObject(**obj_cfg)
og.sim.import_object(obj)
og.sim.step()
| 58,046 | Python | 37.340158 | 131 | 0.68127 |
StanfordVL/OmniGibson/tests/test_object_removal.py | from omnigibson.objects import DatasetObject
import omnigibson as og
from omnigibson.utils.python_utils import NAMES
from utils import og_test
import pytest
@og_test
def test_removal_and_readdition():
# Make a copy of NAMES
initial_names = NAMES.copy()
# Add an apple
apple = DatasetObject(
name="apple_unique",
category="apple",
model="agveuv",
)
# Import it into the scene
og.sim.import_object(apple)
# Check that NAMES has changed
assert NAMES != initial_names
# Step a few times
for _ in range(5):
og.sim.step()
# Remove the apple
og.sim.remove_object(obj=apple)
# Check that NAMES is the same as before
extra_names = NAMES - initial_names
assert len(extra_names) == 0, f"Extra names: {extra_names}"
# Importing should work now
apple2 = DatasetObject(
name="apple_unique",
category="apple",
model="agveuv",
)
og.sim.import_object(apple2)
og.sim.step()
# Clear the stuff we added
og.sim.remove_object(apple2)
@og_test
def test_readdition():
# Make a copy of NAMES
initial_names = NAMES.copy()
# Add an apple
apple = DatasetObject(
name="apple_unique",
category="apple",
model="agveuv",
)
# Import it into the scene
og.sim.import_object(apple)
# Check that NAMES has changed
new_names = NAMES.copy()
assert new_names != initial_names
# Step a few times
for _ in range(5):
og.sim.step()
# Creating and importing a new apple should fail
with pytest.raises(AssertionError):
apple2 = DatasetObject(
name="apple_unique",
category="apple",
model="agveuv",
)
og.sim.import_object(apple2)
# Check that NAMES has not changed
assert NAMES == new_names
# Clear the stuff we added
og.sim.remove_object(apple)
| 1,931 | Python | 20.954545 | 63 | 0.617815 |
StanfordVL/OmniGibson/tests/test_dump_load_states.py | import omnigibson as og
from omnigibson.systems import *
from omnigibson.object_states import Covered
from utils import og_test, SYSTEM_EXAMPLES
import pytest
@og_test
def test_dump_load():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
for system_name, system_class in SYSTEM_EXAMPLES.items():
system = get_system(system_name)
assert issubclass(system, system_class)
if issubclass(system_class, VisualParticleSystem):
assert breakfast_table.states[Covered].set_value(system, True)
else:
system.generate_particles(positions=[[0, 0, 1]])
assert system.n_particles > 0
system.remove_all_particles()
state = og.sim.dump_state()
og.sim.load_state(state)
for system_name, system_class in SYSTEM_EXAMPLES.items():
system = get_system(system_name)
system.clear()
@og_test
def test_dump_load_serialized():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
for system_name, system_class in SYSTEM_EXAMPLES.items():
system = get_system(system_name)
assert issubclass(system, system_class)
if issubclass(system_class, VisualParticleSystem):
assert breakfast_table.states[Covered].set_value(system, True)
else:
system.generate_particles(positions=[[0, 0, 1]])
assert system.n_particles > 0
state = og.sim.dump_state(serialized=True)
og.sim.load_state(state, serialized=True)
for system_name, system_class in SYSTEM_EXAMPLES.items():
system = get_system(system_name)
system.clear()
| 1,636 | Python | 33.829787 | 77 | 0.675428 |
StanfordVL/OmniGibson/tests/test_envs.py | import omnigibson as og
from omnigibson.macros import gm
def task_tester(task_type):
cfg = {
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Rs_int",
"load_object_categories": ["floors", "breakfast_table"],
},
"robots": [
{
"type": "Fetch",
"obs_modalities": [],
}
],
# Task kwargs
"task": {
"type": task_type,
# BehaviorTask-specific
"activity_name": "assembling_gift_baskets",
"online_object_sampling": True
},
}
# Make sure sim is stopped
if og.sim is not None:
og.sim.stop()
# Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_FLATCACHE = True
# Create the environment
env = og.Environment(configs=cfg)
env.reset()
for _ in range(5):
env.step(env.robots[0].action_space.sample())
# Clear the sim
og.sim.clear()
def test_dummy_task():
task_tester("DummyTask")
def test_point_reaching_task():
task_tester("PointReachingTask")
def test_point_navigation_task():
task_tester("PointNavigationTask")
def test_behavior_task():
task_tester("BehaviorTask")
def test_rs_int_full_load():
cfg = {
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Rs_int",
},
"robots": [
{
"type": "Fetch",
"obs_modalities": [],
}
],
# Task kwargs
"task": {
"type": "DummyTask",
},
}
# Make sure sim is stopped
og.sim.stop()
# Make sure GPU dynamics are enabled (GPU dynamics needed for cloth)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_FLATCACHE = True
# Create the environment
env = og.Environment(configs=cfg)
env.reset()
for _ in range(5):
env.step(env.robots[0].action_space.sample())
# Clear the sim
og.sim.clear()
| 2,175 | Python | 20.979798 | 89 | 0.538391 |
StanfordVL/OmniGibson/tests/create_tests_of_examples.py | import importlib
import os
import pkgutil
import shutil
from string import Template
import omnigibson
from omnigibson import examples
from omnigibson.utils.asset_utils import download_assets
download_assets()
def main():
examples_list = []
for package in pkgutil.walk_packages(examples.__path__, examples.__name__ + "."):
if (
not package.ispkg
and package.name[17:] != "example_selector"
and "web_ui" not in package.name[17:] # The WebUI examples require additional server setup
and "vr_" not in package.name[17:] # The VR examples require additional dependencies
and "ray_" not in package.name[17:] # The Ray/RLLib example does not run in a subprocess
): # Consider removing the last condition if we have runnable VR tests
examples_list += [package.name[17:]]
temp_folder_of_test = os.path.join("/", "tmp", "tests_of_examples")
shutil.rmtree(temp_folder_of_test, ignore_errors=True)
os.makedirs(temp_folder_of_test, exist_ok=True)
for example in examples_list:
template_file_name = os.path.join(omnigibson.__path__[0], "..", "tests", "test_of_example_template.txt")
with open(template_file_name, "r") as f:
substitutes = dict()
substitutes["module"] = example
name = example.rsplit(".", 1)[-1]
substitutes["name"] = name
src = Template(f.read())
dst = src.substitute(substitutes)
test_file = open(os.path.join(temp_folder_of_test, name + "_test.py"), "w")
n = test_file.write(dst)
test_file.close()
if __name__ == "__main__":
main()
| 1,687 | Python | 35.695651 | 112 | 0.617072 |
StanfordVL/OmniGibson/tests/utils.py | import omnigibson as og
from omnigibson.macros import gm
from omnigibson.object_states import *
from omnigibson.utils.constants import PrimType, ParticleModifyCondition, ParticleModifyMethod
from omnigibson.systems import *
import omnigibson.utils.transform_utils as T
import numpy as np
TEMP_RELATED_ABILITIES = {"cookable": {}, "freezable": {}, "burnable": {}, "heatable": {}}
SYSTEM_EXAMPLES = {
"water": FluidSystem,
"white_rice": GranularSystem,
"diced__apple": MacroPhysicalParticleSystem,
"stain": MacroVisualParticleSystem,
}
def og_test(func):
def wrapper():
assert_test_scene()
try:
func()
finally:
og.sim.scene.reset()
return wrapper
num_objs = 0
def retrieve_obj_cfg(obj):
return {
"name": obj.name,
"category": obj.category,
"model": obj.model,
"prim_type": obj.prim_type,
"position": obj.get_position(),
"scale": obj.scale,
"abilities": obj.abilities,
"visual_only": obj.visual_only,
}
def get_obj_cfg(name, category, model, prim_type=PrimType.RIGID, scale=None, bounding_box=None, abilities=None, visual_only=False):
global num_objs
num_objs += 1
return {
"type": "DatasetObject",
"fit_avg_dim_volume": scale is None and bounding_box is None,
"name": name,
"category": category,
"model": model,
"prim_type": prim_type,
"position": [150, 150, 150 + num_objs * 5],
"scale": scale,
"bounding_box": bounding_box,
"abilities": abilities,
"visual_only": visual_only,
}
def assert_test_scene():
if og.sim is None or og.sim.scene is None:
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
get_obj_cfg("breakfast_table", "breakfast_table", "skczfi"),
get_obj_cfg("bottom_cabinet", "bottom_cabinet", "immwzb"),
get_obj_cfg("dishtowel", "dishtowel", "dtfspn", prim_type=PrimType.CLOTH, abilities={"cloth": {}}),
get_obj_cfg("carpet", "carpet", "ctclvd", prim_type=PrimType.CLOTH, abilities={"cloth": {}}),
get_obj_cfg("bowl", "bowl", "ajzltc"),
get_obj_cfg("bagel", "bagel", "zlxkry", abilities=TEMP_RELATED_ABILITIES),
get_obj_cfg("cookable_dishtowel", "dishtowel", "dtfspn", prim_type=PrimType.CLOTH, abilities={**TEMP_RELATED_ABILITIES, **{"cloth": {}}}),
get_obj_cfg("microwave", "microwave", "hjjxmi"),
get_obj_cfg("stove", "stove", "yhjzwg"),
get_obj_cfg("fridge", "fridge", "dszchb"),
get_obj_cfg("plywood", "plywood", "fkmkqa", abilities={"flammable": {}}),
get_obj_cfg("shelf_back_panel", "shelf_back_panel", "gjsnrt", abilities={"attachable": {}}),
get_obj_cfg("shelf_shelf", "shelf_shelf", "ymtnqa", abilities={"attachable": {}}),
get_obj_cfg("shelf_baseboard", "shelf_baseboard", "hlhneo", abilities={"attachable": {}}),
get_obj_cfg("bracelet", "bracelet", "thqqmo"),
get_obj_cfg("oyster", "oyster", "enzocs"),
get_obj_cfg("sink", "sink", "egwapq", scale=np.ones(3)),
get_obj_cfg("stockpot", "stockpot", "dcleem", abilities={"fillable": {}, "heatable": {}}),
get_obj_cfg("applier_dishtowel", "dishtowel", "dtfspn", abilities={"particleApplier": {"method": ParticleModifyMethod.ADJACENCY, "conditions": {"water": []}}}),
get_obj_cfg("remover_dishtowel", "dishtowel", "dtfspn", abilities={"particleRemover": {"method": ParticleModifyMethod.ADJACENCY, "conditions": {"water": []}}}),
get_obj_cfg("spray_bottle", "spray_bottle", "asztxi", visual_only=True, abilities={"toggleable": {}, "particleApplier": {"method": ParticleModifyMethod.PROJECTION, "conditions": {"water": [(ParticleModifyCondition.TOGGLEDON, True)]}}}),
get_obj_cfg("vacuum", "vacuum", "bdmsbr", visual_only=True, abilities={"toggleable": {}, "particleRemover": {"method": ParticleModifyMethod.PROJECTION, "conditions": {"water": [(ParticleModifyCondition.TOGGLEDON, True)]}}}),
get_obj_cfg("blender", "blender", "cwkvib", bounding_box=[0.316, 0.318, 0.649], abilities={"fillable": {}, "toggleable": {}, "heatable": {}}),
get_obj_cfg("oven", "oven", "cgtaer", bounding_box=[0.943, 0.837, 1.297]),
get_obj_cfg("baking_sheet", "baking_sheet", "yhurut", bounding_box=[0.41607812, 0.43617093, 0.02281223]),
get_obj_cfg("bagel_dough", "bagel_dough", "iuembm", scale=np.ones(3) * 0.8),
get_obj_cfg("raw_egg", "raw_egg", "ydgivr"),
get_obj_cfg("scoop_of_ice_cream", "scoop_of_ice_cream", "dodndj", bounding_box=[0.076, 0.077, 0.065]),
get_obj_cfg("food_processor", "food_processor", "gamkbo"),
get_obj_cfg("electric_mixer", "electric_mixer", "qornxa"),
get_obj_cfg("another_raw_egg", "raw_egg", "ydgivr"),
get_obj_cfg("chicken", "chicken", "nppsmz", scale=np.ones(3) * 0.7),
get_obj_cfg("tablespoon", "tablespoon", "huudhe"),
get_obj_cfg("swiss_cheese", "swiss_cheese", "hwxeto"),
get_obj_cfg("apple", "apple", "agveuv"),
get_obj_cfg("table_knife", "table_knife", "jxdfyy"),
get_obj_cfg("half_apple", "half_apple", "sguztn"),
get_obj_cfg("washer", "washer", "dobgmu"),
get_obj_cfg("carpet_sweeper", "carpet_sweeper", "xboreo"),
],
"robots": [
{
"type": "Fetch",
"obs_modalities": ["seg_semantic", "seg_instance", "seg_instance_id"],
"position": [150, 150, 100],
"orientation": [0, 0, 0, 1],
}
]
}
# Make sure sim is stopped
if og.sim is not None:
og.sim.stop()
# Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_FLATCACHE = False
# Create the environment
env = og.Environment(configs=cfg)
# Additional processing for the tests to pass more deterministically
og.sim.stop()
bounding_box_object_names = ["bagel_dough", "raw_egg"]
for name in bounding_box_object_names:
obj = og.sim.scene.object_registry("name", name)
for collision_mesh in obj.root_link.collision_meshes.values():
collision_mesh.set_collision_approximation("boundingCube")
og.sim.play()
def get_random_pose(pos_low=10.0, pos_hi=20.0):
pos = np.random.uniform(pos_low, pos_hi, 3)
orn = T.euler2quat(np.random.uniform(-np.pi, np.pi, 3))
return pos, orn
def place_objA_on_objB_bbox(objA, objB, x_offset=0.0, y_offset=0.0, z_offset=0.001):
objA.keep_still()
objB.keep_still()
# Reset pose if cloth object
if objA.prim_type == PrimType.CLOTH:
objA.root_link.reset()
objA_aabb_center, objA_aabb_extent = objA.aabb_center, objA.aabb_extent
objB_aabb_center, objB_aabb_extent = objB.aabb_center, objB.aabb_extent
objA_aabb_offset = objA.get_position() - objA_aabb_center
target_objA_aabb_pos = objB_aabb_center + np.array([0, 0, (objB_aabb_extent[2] + objA_aabb_extent[2]) / 2.0]) + \
np.array([x_offset, y_offset, z_offset])
objA.set_position(target_objA_aabb_pos + objA_aabb_offset)
def place_obj_on_floor_plane(obj, x_offset=0.0, y_offset=0.0, z_offset=0.01):
obj.keep_still()
# Reset pose if cloth object
if obj.prim_type == PrimType.CLOTH:
obj.root_link.reset()
obj_aabb_center, obj_aabb_extent = obj.aabb_center, obj.aabb_extent
obj_aabb_offset = obj.get_position() - obj_aabb_center
target_obj_aabb_pos = np.array([0, 0, obj_aabb_extent[2] / 2.0]) + np.array([x_offset, y_offset, z_offset])
obj.set_position(target_obj_aabb_pos + obj_aabb_offset)
def remove_all_systems():
for system in ParticleRemover.supported_active_systems.values():
system.remove_all_particles()
og.sim.step() | 8,329 | Python | 46.329545 | 252 | 0.581102 |
StanfordVL/OmniGibson/tests/test_robot_teleoperation.py | import omnigibson as og
import numpy as np
from omnigibson.macros import gm
from telemoma.human_interface.teleop_core import TeleopAction
from omnigibson.utils.transform_utils import quat2euler
import pytest
@pytest.mark.skip(reason="test hangs on CI")
def test_teleop():
cfg = {
"env": {"action_timestep": 1 / 60., "physics_timestep": 1 / 120.},
"scene": {"type": "Scene"},
"robots": [
{
"type": "Fetch",
"action_normalize": False,
"controller_config": {
"arm_0": {
"name": "InverseKinematicsController",
"command_input_limits": None,
},
}
}
],
}
# Make sure sim is stopped
if og.sim is not None:
og.sim.stop()
# Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_FLATCACHE = False
# Create the environment
env = og.Environment(configs=cfg)
robot = env.robots[0]
env.reset()
teleop_action = TeleopAction()
start_base_pose = robot.get_position_orientation()
start_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation()
# test moving robot arm
teleop_action.right = np.concatenate(([0.01], np.zeros(6)))
for _ in range(50):
action = robot.teleop_data_to_action(teleop_action)
env.step(action)
cur_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation()
assert cur_eef_pose[0][0] - start_eef_pose[0][0] > 0.02, "Robot arm not moving forward"
# test moving robot base
teleop_action.right = np.zeros(7)
teleop_action.base = np.array([0.1, 0, 0.1])
for _ in range(50):
action = robot.teleop_data_to_action(teleop_action)
env.step(action)
cur_base_pose = robot.get_position_orientation()
assert cur_base_pose[0][0] - start_base_pose[0][0] > 0.02, "robot base not moving forward"
assert quat2euler(cur_base_pose[1])[2] - quat2euler(start_base_pose[1])[2] > 0.02, "robot base not rotating counter-clockwise"
# Clear the sim
og.sim.clear()
| 2,248 | Python | 34.698412 | 130 | 0.606317 |
StanfordVL/OmniGibson/tests/test_robot_states.py | import numpy as np
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.lazy as lazy
from omnigibson.sensors import VisionSensor
from omnigibson.object_states import ObjectsInFOVOfRobot
from omnigibson.utils.transform_utils import pose2mat, mat2pose, relative_pose_transform
from omnigibson.utils.usd_utils import PoseAPI
from omnigibson.utils.constants import semantic_class_name_to_id
def setup_environment(flatcache=True):
"""
Sets up the environment with or without flatcache based on the flatcache parameter.
"""
# Ensure any existing simulation is stopped
if og.sim is not None:
og.sim.stop()
# Set global flags
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_FLATCACHE = flatcache # Set based on function parameter
# Define the environment configuration
config = {
"scene": {
"type": "Scene",
},
"robots": [
{
"type": "Fetch",
"obs_modalities": 'all',
"position": [150, 150, 100],
"orientation": [0, 0, 0, 1],
"controller_config": {
"arm_0": {
"name": "NullJointController",
"motor_type": "position",
},
},
}
]
}
env = og.Environment(configs=config)
return env
def camera_pose_test(flatcache):
env = setup_environment(flatcache)
robot = env.robots[0]
env.reset()
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
# Get vision sensor world pose via directly calling get_position_orientation
robot_world_pos, robot_world_ori = robot.get_position_orientation()
sensor_world_pos, sensor_world_ori = vision_sensor.get_position_orientation()
robot_to_sensor_mat = pose2mat(relative_pose_transform(sensor_world_pos, sensor_world_ori, robot_world_pos, robot_world_ori))
sensor_world_pos_gt = np.array([150.16513062, 150., 101.38952637])
sensor_world_ori_gt = np.array([-0.29444987, 0.29444981, 0.64288363, -0.64288352])
assert np.allclose(sensor_world_pos, sensor_world_pos_gt, atol=1e-3)
assert np.allclose(sensor_world_ori, sensor_world_ori_gt, atol=1e-3)
# Now, we want to move the robot and check if the sensor pose has been updated
old_camera_local_pose = vision_sensor.get_local_pose()
robot.set_position_orientation(position=[100, 100, 100])
new_camera_local_pose = vision_sensor.get_local_pose()
new_camera_world_pose = vision_sensor.get_position_orientation()
robot_pose_mat = pose2mat(robot.get_position_orientation())
expected_camera_world_pos, expected_camera_world_ori = mat2pose(robot_pose_mat @ robot_to_sensor_mat)
assert np.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3)
assert np.allclose(new_camera_world_pose[0], expected_camera_world_pos, atol=1e-3)
assert np.allclose(new_camera_world_pose[1], expected_camera_world_ori, atol=1e-3)
# Then, we want to move the local pose of the camera and check
# 1) if the world pose is updated 2) if the robot stays in the same position
old_camera_local_pose = vision_sensor.get_local_pose()
vision_sensor.set_local_pose(position=[10, 10, 10], orientation=[0, 0, 0, 1])
new_camera_world_pose = vision_sensor.get_position_orientation()
camera_parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(vision_sensor.prim)
camera_parent_path = str(camera_parent_prim.GetPath())
camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path)
expected_new_camera_world_pos, expected_new_camera_world_ori = mat2pose(camera_parent_world_transform @ pose2mat([[10, 10, 10], [0, 0, 0, 1]]))
assert np.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3)
assert np.allclose(new_camera_world_pose[1], expected_new_camera_world_ori, atol=1e-3)
assert np.allclose(robot.get_position(), [100, 100, 100], atol=1e-3)
# Finally, we want to move the world pose of the camera and check
# 1) if the local pose is updated 2) if the robot stays in the same position
robot.set_position_orientation(position=[150, 150, 100])
old_camera_local_pose = vision_sensor.get_local_pose()
vision_sensor.set_position_orientation([150, 150, 101.36912537], [-0.29444987, 0.29444981, 0.64288363, -0.64288352])
new_camera_local_pose = vision_sensor.get_local_pose()
assert not np.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3)
assert not np.allclose(old_camera_local_pose[1], new_camera_local_pose[1], atol=1e-3)
assert np.allclose(robot.get_position(), [150, 150, 100], atol=1e-3)
# Another test we want to try is setting the camera's parent scale and check if the world pose is updated
camera_parent_prim.GetAttribute('xformOp:scale').Set(lazy.pxr.Gf.Vec3d([2.0, 2.0, 2.0]))
camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path)
camera_local_pose = vision_sensor.get_local_pose()
expected_new_camera_world_pos, _ = mat2pose(camera_parent_world_transform @ pose2mat(camera_local_pose))
new_camera_world_pose = vision_sensor.get_position_orientation()
assert np.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3)
og.sim.clear()
def test_camera_pose_flatcache_on():
camera_pose_test(True)
def test_camera_pose_flatcache_off():
camera_pose_test(False)
def test_camera_semantic_segmentation():
env = setup_environment(False)
robot = env.robots[0]
env.reset()
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
env.reset()
all_observation, all_info = vision_sensor.get_obs()
seg_semantic = all_observation['seg_semantic']
seg_semantic_info = all_info['seg_semantic']
agent_label = semantic_class_name_to_id()['agent']
background_label = semantic_class_name_to_id()['background']
assert np.all(np.isin(seg_semantic, [agent_label, background_label]))
assert set(seg_semantic_info.keys()) == {agent_label, background_label}
og.sim.clear()
def test_object_in_FOV_of_robot():
env = setup_environment(False)
robot = env.robots[0]
env.reset()
assert robot.states[ObjectsInFOVOfRobot].get_value() == [robot]
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
vision_sensor.set_position_orientation(position=[100, 150, 100])
og.sim.step()
og.sim.step()
assert robot.states[ObjectsInFOVOfRobot].get_value() == []
og.sim.clear()
| 6,867 | Python | 43.888889 | 147 | 0.678462 |
StanfordVL/OmniGibson/tests/conftest.py | import omnigibson as og
def pytest_unconfigure(config):
og.shutdown()
| 75 | Python | 14.199997 | 31 | 0.746667 |
StanfordVL/OmniGibson/tests/test_sensors.py | from omnigibson.systems import get_system, is_physical_particle_system, is_visual_particle_system
import omnigibson.utils.transform_utils as T
import omnigibson as og
from omnigibson.sensors import VisionSensor
from utils import og_test, place_obj_on_floor_plane, SYSTEM_EXAMPLES
import pytest
import numpy as np
@og_test
def test_seg():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
robot = og.sim.scene.robots[0]
place_obj_on_floor_plane(breakfast_table)
dishtowel.set_position_orientation([-0.4, 0.0, 0.55], [0, 0, 0, 1])
robot.set_position_orientation([0, 0.8, 0.0], T.euler2quat([0, 0, -np.pi/2]))
robot.reset()
systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items()]
for i, system in enumerate(systems):
# Sample two particles for each system
pos = np.array([-0.2 + i * 0.2, 0, 0.55])
if is_physical_particle_system(system_name=system.name):
system.generate_particles(positions=[pos, pos + np.array([0.1, 0.0, 0.0])])
else:
if system.get_group_name(breakfast_table) not in system.groups:
system.create_attachment_group(breakfast_table)
system.generate_group_particles(
group=system.get_group_name(breakfast_table),
positions=np.array([pos, pos + np.array([0.1, 0.0, 0.0])]),
link_prim_paths=[breakfast_table.root_link.prim_path],
)
og.sim.step()
og.sim.render()
sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)]
assert len(sensors) > 0
vision_sensor = sensors[0]
all_observation, all_info = vision_sensor.get_obs()
seg_semantic = all_observation['seg_semantic']
seg_semantic_info = all_info['seg_semantic']
assert set(np.unique(seg_semantic)) == set(seg_semantic_info.keys())
expected_dict = {
335706086: 'diced__apple',
825831922: 'floors',
884110082: 'stain',
1949122937: 'breakfast_table',
2814990211: 'agent',
3051938632: 'white_rice',
3330677804: 'water',
4207839377: 'dishtowel'
}
assert set(seg_semantic_info.values()) == set(expected_dict.values())
seg_instance = all_observation['seg_instance']
seg_instance_info = all_info['seg_instance']
assert set(np.unique(seg_instance)) == set(seg_instance_info.keys())
expected_dict = {
2: 'robot0',
3: 'groundPlane',
4: 'dishtowel',
5: 'breakfast_table',
6: 'stain',
7: 'water',
8: 'white_rice',
9: 'diced__apple'
}
assert set(seg_instance_info.values()) == set(expected_dict.values())
seg_instance_id = all_observation['seg_instance_id']
seg_instance_id_info = all_info['seg_instance_id']
assert set(np.unique(seg_instance_id)) == set(seg_instance_id_info.keys())
expected_dict = {
3: '/World/robot0/gripper_link/visuals',
4: '/World/robot0/wrist_roll_link/visuals',
5: '/World/robot0/forearm_roll_link/visuals',
6: '/World/robot0/wrist_flex_link/visuals',
8: '/World/groundPlane/geom',
9: '/World/dishtowel/base_link_cloth',
10: '/World/robot0/r_gripper_finger_link/visuals',
11: '/World/robot0/l_gripper_finger_link/visuals',
12: '/World/breakfast_table/base_link/visuals',
13: 'stain',
14: 'white_rice',
15: 'diced__apple',
16: 'water'
}
# Temporarily disable this test because og_assets are outdated on CI machines
# assert set(seg_instance_id_info.values()) == set(expected_dict.values())
def test_clear_sim():
og.sim.clear()
| 3,768 | Python | 37.85567 | 97 | 0.628185 |
StanfordVL/OmniGibson/tests/test_primitives.py | import numpy as np
import pytest
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet
import omnigibson.utils.transform_utils as T
from omnigibson.objects.dataset_object import DatasetObject
def execute_controller(ctrl_gen, env):
for action in ctrl_gen:
env.step(action)
def primitive_tester(load_object_categories, objects, primitives, primitives_args):
cfg = {
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Rs_int",
"load_object_categories": load_object_categories,
},
"robots": [
{
"type": "Fetch",
"obs_modalities": ["scan", "rgb", "depth"],
"scale": 1.0,
"self_collisions": True,
"action_normalize": False,
"action_type": "continuous",
"grasping_mode": "sticky",
"rigid_trunk": False,
"default_arm_pose": "diagonal30",
"default_trunk_offset": 0.365,
"controller_config": {
"base": {
"name": "DifferentialDriveController",
},
"arm_0": {
"name": "InverseKinematicsController",
"command_input_limits": "default",
"command_output_limits": [[-0.2, -0.2, -0.2, -0.5, -0.5, -0.5], [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]],
"mode": "pose_absolute_ori",
"kp": 300.0
},
"gripper_0": {
"name": "JointController",
"motor_type": "position",
"command_input_limits": [-1, 1],
"command_output_limits": None,
"use_delta_commands": True
},
"camera": {
"name": "JointController",
"use_delta_commands": False
}
}
}
],
}
# Make sure sim is stopped
if og.sim is not None:
og.sim.stop()
# Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_FLATCACHE = False
# Create the environment
env = og.Environment(configs=cfg)
robot = env.robots[0]
env.reset()
for obj in objects:
og.sim.import_object(obj['object'])
obj['object'].set_position_orientation(obj['position'], obj['orientation'])
og.sim.step()
controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)
try:
for primitive, args in zip(primitives, primitives_args):
try:
execute_controller(controller.apply_ref(primitive, *args), env)
except Exception as e:
return False
finally:
# Clear the sim
og.sim.clear()
return True
def test_navigate():
categories = ["floors", "ceilings", "walls"]
objects = []
obj_1 = {
"object": DatasetObject(
name="cologne",
category="bottle_of_cologne",
model="lyipur"
),
"position": [-0.3, -0.8, 0.5],
"orientation": [0, 0, 0, 1]
}
objects.append(obj_1)
primitives = [StarterSemanticActionPrimitiveSet.NAVIGATE_TO]
primitives_args = [(obj_1['object'],)]
assert primitive_tester(categories, objects, primitives, primitives_args)
def test_grasp():
categories = ["floors", "ceilings", "walls", "coffee_table"]
objects = []
obj_1 = {
"object": DatasetObject(
name="cologne",
category="bottle_of_cologne",
model="lyipur"
),
"position": [-0.3, -0.8, 0.5],
"orientation": [0, 0, 0, 1]
}
objects.append(obj_1)
primitives = [StarterSemanticActionPrimitiveSet.GRASP]
primitives_args = [(obj_1['object'],)]
assert primitive_tester(categories, objects, primitives, primitives_args)
def test_place():
categories = ["floors", "ceilings", "walls", "coffee_table"]
objects = []
obj_1 = {
"object": DatasetObject(
name="table",
category="breakfast_table",
model="rjgmmy",
scale=[0.3, 0.3, 0.3]
),
"position": [-0.7, 0.5, 0.2],
"orientation": [0, 0, 0, 1]
}
obj_2 = {
"object": DatasetObject(
name="cologne",
category="bottle_of_cologne",
model="lyipur"
),
"position": [-0.3, -0.8, 0.5],
"orientation": [0, 0, 0, 1]
}
objects.append(obj_1)
objects.append(obj_2)
primitives = [StarterSemanticActionPrimitiveSet.GRASP, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP]
primitives_args = [(obj_2['object'],), (obj_1['object'],)]
assert primitive_tester(categories, objects, primitives, primitives_args)
@pytest.mark.skip(reason="primitives are broken")
def test_open_prismatic():
categories = ["floors"]
objects = []
obj_1 = {
"object": DatasetObject(
name="bottom_cabinet",
category="bottom_cabinet",
model="bamfsz",
scale=[0.7, 0.7, 0.7]
),
"position": [-1.2, -0.4, 0.5],
"orientation": [0, 0, 0, 1]
}
objects.append(obj_1)
primitives = [StarterSemanticActionPrimitiveSet.OPEN]
primitives_args = [(obj_1['object'],)]
assert primitive_tester(categories, objects, primitives, primitives_args)
@pytest.mark.skip(reason="primitives are broken")
def test_open_revolute():
categories = ["floors"]
objects = []
obj_1 = {
"object": DatasetObject(
name="fridge",
category="fridge",
model="dszchb",
scale=[0.7, 0.7, 0.7]
),
"position": [-1.2, -0.4, 0.5],
"orientation": [0, 0, 0, 1]
}
objects.append(obj_1)
primitives = [StarterSemanticActionPrimitiveSet.OPEN]
primitives_args = [(obj_1['object'],)]
assert primitive_tester(categories, objects, primitives, primitives_args) | 6,423 | Python | 30.336585 | 142 | 0.528414 |
StanfordVL/OmniGibson/tests/test_object_states.py | from omnigibson.macros import macros as m
from omnigibson.object_states import *
from omnigibson.systems import get_system, is_physical_particle_system, is_visual_particle_system, VisualParticleSystem
from omnigibson.utils.constants import PrimType
from omnigibson.utils.physx_utils import apply_force_at_pos, apply_torque
import omnigibson.utils.transform_utils as T
import omnigibson as og
from utils import og_test, get_random_pose, place_objA_on_objB_bbox, place_obj_on_floor_plane, SYSTEM_EXAMPLES
import pytest
import numpy as np
@og_test
def test_on_top():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(breakfast_table)
for i, obj in enumerate((bowl, dishtowel)):
place_objA_on_objB_bbox(obj, breakfast_table)
for _ in range(5):
og.sim.step()
assert obj.states[OnTop].get_value(breakfast_table)
obj.set_position(np.ones(3) * 10 * (i + 1))
og.sim.step()
assert not obj.states[OnTop].get_value(breakfast_table)
assert bowl.states[OnTop].set_value(breakfast_table, True)
assert dishtowel.states[OnTop].set_value(breakfast_table, True)
with pytest.raises(NotImplementedError):
bowl.states[OnTop].set_value(breakfast_table, False)
@og_test
def test_inside():
bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(bottom_cabinet)
bowl.set_position([0., 0., 0.08])
dishtowel.set_position([0, 0., 0.5])
for _ in range(5):
og.sim.step()
assert bowl.states[Inside].get_value(bottom_cabinet)
assert dishtowel.states[Inside].get_value(bottom_cabinet)
bowl.set_position([10., 10., 1.])
dishtowel.set_position([20., 20., 1.])
for _ in range(5):
og.sim.step()
assert not bowl.states[Inside].get_value(bottom_cabinet)
assert not dishtowel.states[Inside].get_value(bottom_cabinet)
assert bowl.states[Inside].set_value(bottom_cabinet, True)
assert dishtowel.states[Inside].set_value(bottom_cabinet, True)
with pytest.raises(NotImplementedError):
bowl.states[OnTop].set_value(bottom_cabinet, False)
@og_test
def test_under():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(breakfast_table)
for i, obj in enumerate((bowl, dishtowel)):
place_obj_on_floor_plane(obj)
for _ in range(5):
og.sim.step()
assert obj.states[Under].get_value(breakfast_table)
obj.set_position(np.ones(3) * 10 * (i + 1))
og.sim.step()
assert not obj.states[Under].get_value(breakfast_table)
assert bowl.states[Under].set_value(breakfast_table, True)
assert dishtowel.states[Under].set_value(breakfast_table, True)
with pytest.raises(NotImplementedError):
bowl.states[Under].set_value(breakfast_table, False)
@og_test
def test_touching():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(breakfast_table)
for i, obj in enumerate((bowl, dishtowel)):
place_objA_on_objB_bbox(obj, breakfast_table)
for _ in range(5):
og.sim.step()
assert obj.states[Touching].get_value(breakfast_table)
assert breakfast_table.states[Touching].get_value(obj)
obj.set_position(np.ones(3) * 10 * (i + 1))
og.sim.step()
assert not obj.states[Touching].get_value(breakfast_table)
assert not breakfast_table.states[Touching].get_value(obj)
with pytest.raises(NotImplementedError):
bowl.states[Touching].set_value(breakfast_table, None)
@og_test
def test_contact_bodies():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(breakfast_table)
for i, obj in enumerate((bowl, dishtowel)):
place_objA_on_objB_bbox(obj, breakfast_table)
for _ in range(5):
og.sim.step()
# TODO: rigid body's ContactBodies should include cloth
if obj.prim_type != PrimType.CLOTH:
assert obj.root_link in breakfast_table.states[ContactBodies].get_value()
assert breakfast_table.root_link in obj.states[ContactBodies].get_value()
obj.set_position(np.ones(3) * 10 * (i + 1))
og.sim.step()
assert obj.root_link not in breakfast_table.states[ContactBodies].get_value()
assert breakfast_table.root_link not in obj.states[ContactBodies].get_value()
with pytest.raises(NotImplementedError):
bowl.states[ContactBodies].set_value(None)
@og_test
def test_next_to():
bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(bottom_cabinet)
for i, (axis, obj) in enumerate(zip(("x", "y"), (bowl, dishtowel))):
place_obj_on_floor_plane(obj, **{f"{axis}_offset": 0.3})
for _ in range(5):
og.sim.step()
assert obj.states[NextTo].get_value(bottom_cabinet)
assert bottom_cabinet.states[NextTo].get_value(obj)
obj.set_position(np.ones(3) * 10 * (i + 1))
og.sim.step()
assert not obj.states[NextTo].get_value(bottom_cabinet)
assert not bottom_cabinet.states[NextTo].get_value(obj)
with pytest.raises(NotImplementedError):
bowl.states[NextTo].set_value(bottom_cabinet, None)
@og_test
def test_overlaid():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
carpet = og.sim.scene.object_registry("name", "carpet")
place_obj_on_floor_plane(breakfast_table)
place_objA_on_objB_bbox(carpet, breakfast_table)
for _ in range(5):
og.sim.step()
assert carpet.states[Overlaid].get_value(breakfast_table)
carpet.set_position(np.ones(3) * 20.0)
og.sim.step()
assert not carpet.states[Overlaid].get_value(breakfast_table)
assert carpet.states[Overlaid].set_value(breakfast_table, True)
with pytest.raises(NotImplementedError):
carpet.states[Overlaid].set_value(breakfast_table, False)
@og_test
def test_pose():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
pos1, orn1 = get_random_pose()
breakfast_table.set_position_orientation(pos1, orn1)
pos2, orn2 = get_random_pose()
dishtowel.set_position_orientation(pos2, orn2)
assert np.allclose(breakfast_table.states[Pose].get_value()[0], pos1)
assert np.allclose(breakfast_table.states[Pose].get_value()[1], orn1) or np.allclose(breakfast_table.states[Pose].get_value()[1], -orn1)
assert np.allclose(dishtowel.states[Pose].get_value()[0], pos2)
assert np.allclose(dishtowel.states[Pose].get_value()[1], orn2) or np.allclose(dishtowel.states[Pose].get_value()[1], -orn2)
with pytest.raises(NotImplementedError):
breakfast_table.states[Pose].set_value(None)
@og_test
def test_aabb():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
pos1, orn1 = get_random_pose()
breakfast_table.set_position_orientation(pos1, orn1)
pos2, orn2 = get_random_pose()
dishtowel.set_position_orientation(pos2, orn2)
# Need to take one sim step
og.sim.step()
assert np.allclose(breakfast_table.states[AABB].get_value(), breakfast_table.aabb)
assert np.all((breakfast_table.states[AABB].get_value()[0] < pos1) & (pos1 < breakfast_table.states[AABB].get_value()[1]))
pp = dishtowel.root_link.compute_particle_positions()
offset = dishtowel.root_link.cloth_system.particle_contact_offset
assert np.allclose(dishtowel.states[AABB].get_value(), (pp.min(axis=0) - offset, pp.max(axis=0) + offset))
assert np.all((dishtowel.states[AABB].get_value()[0] < pos2) & (pos2 < dishtowel.states[AABB].get_value()[1]))
with pytest.raises(NotImplementedError):
breakfast_table.states[AABB].set_value(None)
@og_test
def test_adjacency():
bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet")
bowl = og.sim.scene.object_registry("name", "bowl")
dishtowel = og.sim.scene.object_registry("name", "dishtowel")
place_obj_on_floor_plane(bottom_cabinet)
for i, (axis, obj) in enumerate(zip(("x", "y"), (bowl, dishtowel))):
place_obj_on_floor_plane(obj, **{f"{axis}_offset": 0.4})
og.sim.step()
assert bottom_cabinet in set.union(
*(axis.positive_neighbors | axis.negative_neighbors
for coordinate in obj.states[HorizontalAdjacency].get_value() for axis in coordinate)
)
bowl.set_position([0., 0., 1.])
dishtowel.set_position([0., 0., 2.0])
# Need to take one sim step
og.sim.step()
assert bowl in bottom_cabinet.states[VerticalAdjacency].get_value().positive_neighbors
# TODO: adjacency relies on raytest, which doesn't take particle systems into account
# assert dishtowel in bottom_cabinet.states[VerticalAdjacency].get_value().positive_neighbors
assert bottom_cabinet in bowl.states[VerticalAdjacency].get_value().negative_neighbors
# TODO: adjacency relies on raytest, which doesn't take particle systems into account
# assert dishtowel in bowl.states[VerticalAdjacency].get_value().positive_neighbors
assert bottom_cabinet in dishtowel.states[VerticalAdjacency].get_value().negative_neighbors
assert bowl in dishtowel.states[VerticalAdjacency].get_value().negative_neighbors
with pytest.raises(NotImplementedError):
bottom_cabinet.states[HorizontalAdjacency].set_value(None)
bottom_cabinet.states[VerticalAdjacency].set_value(None)
@og_test
def test_temperature():
microwave = og.sim.scene.object_registry("name", "microwave")
stove = og.sim.scene.object_registry("name", "stove")
fridge = og.sim.scene.object_registry("name", "fridge")
plywood = og.sim.scene.object_registry("name", "plywood")
bagel = og.sim.scene.object_registry("name", "bagel")
dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel")
place_obj_on_floor_plane(microwave)
place_obj_on_floor_plane(stove, x_offset=1.0)
place_obj_on_floor_plane(fridge, x_offset=2.0)
place_obj_on_floor_plane(plywood, x_offset=3.0)
# Set the objects to be far away
place_obj_on_floor_plane(bagel, x_offset=-0.5)
place_obj_on_floor_plane(dishtowel, x_offset=-1.0)
for _ in range(5):
og.sim.step()
# Not affected by any heat source
assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
# Open the microwave
microwave.joints["j_link_0"].set_pos(np.pi / 2)
# Set the objects to be inside the microwave
bagel.set_position_orientation([0, 0, 0.11], [0, 0, 0, 1])
dishtowel.set_position_orientation([-0.15, 0, 0.11], [0, 0, 0, 1])
for _ in range(5):
og.sim.step()
# Not affected by any heat source (the microwave is NOT toggled on)
assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
microwave.states[ToggledOn].set_value(True)
for _ in range(5):
og.sim.step()
# Not affected by any heat source (the microwave is open)
assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
microwave.joints["j_link_0"].set_pos(0.)
for _ in range(5):
og.sim.step()
# Affected by the microwave
bagel_new_temp = bagel.states[Temperature].get_value()
dishtowel_new_temp = dishtowel.states[Temperature].get_value()
assert bagel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
# Set the objects to be far away
place_obj_on_floor_plane(bagel, x_offset=-0.5)
place_obj_on_floor_plane(dishtowel, x_offset=-1.0)
for _ in range(5):
og.sim.step()
# Not affected by any heat source (should cool down by itself towards the default temp)
assert bagel.states[Temperature].get_value() < bagel_new_temp
assert dishtowel.states[Temperature].get_value() < dishtowel_new_temp
# Setter should work
assert bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE)
assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE)
assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
# Set the objects to be on top of the stove
bagel.set_position_orientation([0.71, 0.11, 0.88], [0, 0, 0, 1])
dishtowel.set_position_orientation([0.84, 0.11, 0.88], [0, 0, 0, 1])
for _ in range(5):
og.sim.step()
# Not affected by any heat source (the stove is off)
assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
stove.states[ToggledOn].set_value(True)
for _ in range(5):
og.sim.step()
# Affected by the stove
assert bagel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
# Reset
assert bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE)
assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE)
# Set the objects to be inside the fridge
bagel.set_position_orientation([1.9, 0, 0.89], [0, 0, 0, 1])
dishtowel.set_position_orientation([2.1, 0, 0.89], [0, 0, 0, 1])
for _ in range(5):
og.sim.step()
# Affected by the fridge
assert bagel.states[Temperature].get_value() < m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() < m.object_states.temperature.DEFAULT_TEMPERATURE
# Reset temp
assert bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE)
assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE)
# Set the objects to be near the plywood
place_obj_on_floor_plane(bagel, x_offset=2.9)
place_obj_on_floor_plane(dishtowel, x_offset=3.1)
for _ in range(5):
og.sim.step()
# Not affected by any heat source (the plywood is NOT onfire)
assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
plywood.states[OnFire].set_value(True)
for _ in range(5):
og.sim.step()
assert bagel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
@og_test
def test_max_temperature():
bagel = og.sim.scene.object_registry("name", "bagel")
dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel")
assert bagel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE
assert bagel.states[MaxTemperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE - 1)
assert dishtowel.states[MaxTemperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE - 1)
assert bagel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE - 1
assert dishtowel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE - 1
bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE + 1)
dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE + 1)
og.sim.step()
assert bagel.states[MaxTemperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
assert dishtowel.states[MaxTemperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE
@og_test
def test_heat_source_or_sink():
microwave = og.sim.scene.object_registry("name", "microwave")
stove = og.sim.scene.object_registry("name", "stove")
fridge = og.sim.scene.object_registry("name", "fridge")
assert microwave.states[HeatSourceOrSink].requires_inside
assert microwave.states[HeatSourceOrSink].requires_closed
assert microwave.states[HeatSourceOrSink].requires_toggled_on
microwave.joints["j_link_0"].set_pos(np.pi / 2)
microwave.states[ToggledOn].set_value(False)
og.sim.step()
assert not microwave.states[HeatSourceOrSink].get_value()
microwave.joints["j_link_0"].set_pos(0.0)
og.sim.step()
assert not microwave.states[HeatSourceOrSink].get_value()
microwave.states[ToggledOn].set_value(True)
og.sim.step()
assert microwave.states[HeatSourceOrSink].get_value()
assert fridge.states[HeatSourceOrSink].requires_inside
assert fridge.states[HeatSourceOrSink].requires_closed
assert not fridge.states[HeatSourceOrSink].requires_toggled_on
fridge.joints["j_link_0"].set_pos(np.pi / 2)
og.sim.step()
assert not fridge.states[HeatSourceOrSink].get_value()
fridge.joints["j_link_0"].set_pos(0.0)
og.sim.step()
assert fridge.states[HeatSourceOrSink].get_value()
assert not stove.states[HeatSourceOrSink].requires_inside
assert not stove.states[HeatSourceOrSink].requires_closed
assert stove.states[HeatSourceOrSink].requires_toggled_on
stove.states[ToggledOn].set_value(False)
og.sim.step()
assert not stove.states[HeatSourceOrSink].get_value()
stove.states[ToggledOn].set_value(True)
og.sim.step()
assert stove.states[HeatSourceOrSink].get_value()
@og_test
def test_cooked():
bagel = og.sim.scene.object_registry("name", "bagel")
dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel")
assert not bagel.states[Cooked].get_value()
assert not dishtowel.states[Cooked].get_value()
bagel.states[MaxTemperature].set_value(bagel.states[Cooked].cook_temperature)
dishtowel.states[MaxTemperature].set_value(dishtowel.states[Cooked].cook_temperature)
og.sim.step()
assert bagel.states[Cooked].get_value()
assert dishtowel.states[Cooked].get_value()
assert bagel.states[Cooked].set_value(False)
assert dishtowel.states[Cooked].set_value(False)
assert not bagel.states[Cooked].get_value()
assert not dishtowel.states[Cooked].get_value()
assert bagel.states[MaxTemperature].get_value() < bagel.states[Cooked].cook_temperature
assert dishtowel.states[MaxTemperature].get_value() < dishtowel.states[Cooked].cook_temperature
assert bagel.states[Cooked].set_value(True)
assert dishtowel.states[Cooked].set_value(True)
assert bagel.states[Cooked].get_value()
assert dishtowel.states[Cooked].get_value()
assert bagel.states[MaxTemperature].get_value() >= bagel.states[Cooked].cook_temperature
assert dishtowel.states[MaxTemperature].get_value() >= dishtowel.states[Cooked].cook_temperature
@og_test
def test_burnt():
bagel = og.sim.scene.object_registry("name", "bagel")
dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel")
assert not bagel.states[Burnt].get_value()
assert not dishtowel.states[Burnt].get_value()
bagel.states[MaxTemperature].set_value(bagel.states[Burnt].burn_temperature)
dishtowel.states[MaxTemperature].set_value(dishtowel.states[Burnt].burn_temperature)
og.sim.step()
assert bagel.states[Burnt].get_value()
assert dishtowel.states[Burnt].get_value()
assert bagel.states[Burnt].set_value(False)
assert dishtowel.states[Burnt].set_value(False)
assert not bagel.states[Burnt].get_value()
assert not dishtowel.states[Burnt].get_value()
assert bagel.states[MaxTemperature].get_value() < bagel.states[Burnt].burn_temperature
assert dishtowel.states[MaxTemperature].get_value() < dishtowel.states[Burnt].burn_temperature
assert bagel.states[Burnt].set_value(True)
assert dishtowel.states[Burnt].set_value(True)
assert bagel.states[Burnt].get_value()
assert dishtowel.states[Burnt].get_value()
assert bagel.states[MaxTemperature].get_value() >= bagel.states[Burnt].burn_temperature
assert dishtowel.states[MaxTemperature].get_value() >= dishtowel.states[Burnt].burn_temperature
@og_test
def test_frozen():
bagel = og.sim.scene.object_registry("name", "bagel")
dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel")
assert not bagel.states[Frozen].get_value()
assert not dishtowel.states[Frozen].get_value()
bagel.states[Temperature].set_value(bagel.states[Frozen].freeze_temperature - 1)
dishtowel.states[Temperature].set_value(dishtowel.states[Frozen].freeze_temperature - 1)
og.sim.step()
assert bagel.states[Frozen].get_value()
assert dishtowel.states[Frozen].get_value()
assert bagel.states[Frozen].set_value(False)
assert dishtowel.states[Frozen].set_value(False)
assert not bagel.states[Frozen].get_value()
assert not dishtowel.states[Frozen].get_value()
assert bagel.states[Temperature].get_value() > bagel.states[Frozen].freeze_temperature
assert dishtowel.states[Temperature].get_value() > dishtowel.states[Frozen].freeze_temperature
assert bagel.states[Frozen].set_value(True)
assert dishtowel.states[Frozen].set_value(True)
assert bagel.states[Frozen].get_value()
assert dishtowel.states[Frozen].get_value()
assert bagel.states[Temperature].get_value() <= bagel.states[Frozen].freeze_temperature
assert dishtowel.states[Temperature].get_value() <= dishtowel.states[Frozen].freeze_temperature
@og_test
def test_heated():
bagel = og.sim.scene.object_registry("name", "bagel")
dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel")
assert not bagel.states[Heated].get_value()
assert not dishtowel.states[Heated].get_value()
bagel.states[Temperature].set_value(bagel.states[Heated].heat_temperature + 1)
dishtowel.states[Temperature].set_value(dishtowel.states[Heated].heat_temperature + 1)
og.sim.step()
assert bagel.states[Heated].get_value()
assert dishtowel.states[Heated].get_value()
assert bagel.states[Heated].set_value(False)
assert dishtowel.states[Heated].set_value(False)
assert not bagel.states[Heated].get_value()
assert not dishtowel.states[Heated].get_value()
assert bagel.states[Temperature].get_value() < bagel.states[Heated].heat_temperature
assert dishtowel.states[Temperature].get_value() < dishtowel.states[Heated].heat_temperature
assert bagel.states[Heated].set_value(True)
assert dishtowel.states[Heated].set_value(True)
assert bagel.states[Heated].get_value()
assert dishtowel.states[Heated].get_value()
assert bagel.states[Temperature].get_value() >= bagel.states[Heated].heat_temperature
assert dishtowel.states[Temperature].get_value() >= dishtowel.states[Heated].heat_temperature
@og_test
def test_on_fire():
plywood = og.sim.scene.object_registry("name", "plywood")
assert not plywood.states[OnFire].get_value()
plywood.states[Temperature].set_value(plywood.states[OnFire].ignition_temperature + 1)
og.sim.step()
assert plywood.states[OnFire].get_value()
assert plywood.states[OnFire].set_value(False)
assert not plywood.states[OnFire].get_value()
assert plywood.states[Temperature].get_value() < plywood.states[OnFire].ignition_temperature
assert plywood.states[OnFire].set_value(True)
assert plywood.states[OnFire].get_value()
assert plywood.states[Temperature].get_value() == plywood.states[OnFire].temperature
for _ in range(5):
og.sim.step()
assert plywood.states[Temperature].get_value() == plywood.states[OnFire].temperature
@og_test
def test_toggled_on():
stove = og.sim.scene.object_registry("name", "stove")
robot = og.sim.scene.object_registry("name", "robot0")
stove.set_position_orientation([1.48, 0.3, 0.443], T.euler2quat([0, 0, -np.pi / 2.0]))
robot.set_position_orientation([0.0, 0.38, 0.0], [0, 0, 0, 1])
assert not stove.states[ToggledOn].get_value()
q = robot.get_joint_positions()
jnt_idxs = {name: i for i, name in enumerate(robot.joints.keys())}
q[jnt_idxs["torso_lift_joint"]] = 0.0
q[jnt_idxs["shoulder_pan_joint"]] = np.deg2rad(90.0)
q[jnt_idxs["shoulder_lift_joint"]] = np.deg2rad(9.0)
q[jnt_idxs["upperarm_roll_joint"]] = 0.0
q[jnt_idxs["elbow_flex_joint"]] = 0.0
q[jnt_idxs["forearm_roll_joint"]] = 0.0
q[jnt_idxs["wrist_flex_joint"]] = 0.0
q[jnt_idxs["wrist_roll_joint"]] = 0.0
q[jnt_idxs["l_gripper_finger_joint"]] = 0.0
q[jnt_idxs["r_gripper_finger_joint"]] = 0.0
robot.set_joint_positions(q, drive=False)
steps = m.object_states.toggle.CAN_TOGGLE_STEPS
for _ in range(steps):
og.sim.step()
# End-effector not close to the button, stays False
assert not stove.states[ToggledOn].get_value()
q[jnt_idxs["shoulder_pan_joint"]] = 0.0
robot.set_joint_positions(q, drive=False)
for _ in range(steps - 1):
og.sim.step()
# End-effector close to the button, but not enough time has passed, still False
assert not stove.states[ToggledOn].get_value()
og.sim.step()
# Enough time has passed, turns True
assert stove.states[ToggledOn].get_value()
# Setter should work
assert stove.states[ToggledOn].set_value(False)
assert not stove.states[ToggledOn].get_value()
@pytest.mark.skip(reason="skipping attachment for now")
@og_test
def test_attached_to():
shelf_back_panel = og.sim.scene.object_registry("name", "shelf_back_panel")
shelf_shelf = og.sim.scene.object_registry("name", "shelf_shelf")
shelf_baseboard = og.sim.scene.object_registry("name", "shelf_baseboard")
shelf_back_panel.set_position_orientation([0, 0, 0.01], [0, 0, 0, 1])
shelf_back_panel.keep_still()
shelf_shelf.set_position_orientation([0, 0.03, 0.17], [0, 0, 0, 1])
shelf_shelf.keep_still()
# The shelf should not be attached to the back panel (no contact yet)
assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
# Let the shelf fall
for _ in range(10):
og.sim.step()
# The shelf should be attached to the back panel
assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True)
# The shelf should still be attached to the back panel
assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, False)
# The shelf should not be attached to the back panel
assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True)
# shelf should be attached to the back panel
assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
force_dir = np.array([0, 0, 1])
# A small force will not break the attachment
force_mag = 10
apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position())
og.sim.step()
assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
# A large force will break the attachment
force_mag = 1000
apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position())
og.sim.step()
assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
shelf_shelf.set_position_orientation([0, 0, 10], [0, 0, 0, 1])
assert not shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True)
# The shelf should not be attached to the back panel because the alignment is wrong
assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True, bypass_alignment_checking=True)
# The shelf should be attached to the back panel because the alignment checking is bypassed
assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel)
# The shelf baseboard should NOT be attached because the attachment has the wrong type
shelf_baseboard.set_position_orientation([0.37, -0.93, 0.03], [0, 0, 0, 1])
assert not shelf_baseboard.states[AttachedTo].set_value(shelf_back_panel, True, bypass_alignment_checking=True)
assert not shelf_baseboard.states[AttachedTo].get_value(shelf_back_panel)
@og_test
def test_particle_source():
sink = og.sim.scene.object_registry("name", "sink")
place_obj_on_floor_plane(sink)
for _ in range(3):
og.sim.step()
assert not sink.states[ToggledOn].get_value()
water_system = get_system("water")
# Sink is toggled off, no water should be present
assert water_system.n_particles == 0
sink.states[ToggledOn].set_value(True)
for _ in range(sink.states[ParticleSource].n_steps_per_modification):
og.sim.step()
# Sink is toggled on, some water should be present
assert water_system.n_particles > 0
# Cannot set this state
with pytest.raises(NotImplementedError):
sink.states[ParticleSource].set_value(True)
water_system.remove_all_particles()
@og_test
def test_particle_sink():
sink = og.sim.scene.object_registry("name", "sink")
place_obj_on_floor_plane(sink)
for _ in range(3):
og.sim.step()
water_system = get_system("water")
# There should be no water particles.
assert water_system.n_particles == 0
sink_pos = sink.states[ParticleSink].link.get_position()
water_system.generate_particles(positions=[sink_pos + np.array([0, 0, 0.05])])
# There should be exactly 1 water particle.
assert water_system.n_particles == 1
for _ in range(sink.states[ParticleSink].n_steps_per_modification):
og.sim.step()
# There should be no water particles because the fluid source absorbs them.
assert water_system.n_particles == 0
# Cannot set this state
with pytest.raises(NotImplementedError):
sink.states[ParticleSink].set_value(True)
water_system.remove_all_particles()
@og_test
def test_particle_applier():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
spray_bottle = og.sim.scene.object_registry("name", "spray_bottle")
applier_dishtowel = og.sim.scene.object_registry("name", "applier_dishtowel")
# Test projection
place_obj_on_floor_plane(breakfast_table)
place_objA_on_objB_bbox(spray_bottle, breakfast_table, z_offset=0.1)
spray_bottle.set_orientation(np.array([0.707, 0, 0, 0.707]))
for _ in range(3):
og.sim.step()
assert not spray_bottle.states[ToggledOn].get_value()
water_system = get_system("water")
# Spray bottle is toggled off, no water should be present
assert water_system.n_particles == 0
# Take number of steps for water to be generated, make sure there is still no water
n_applier_steps = spray_bottle.states[ParticleApplier].n_steps_per_modification
for _ in range(n_applier_steps):
og.sim.step()
assert water_system.n_particles == 0
# Turn particle applier on, and verify particles are generated after the same number of steps are taken
spray_bottle.states[ToggledOn].set_value(True)
for _ in range(n_applier_steps):
og.sim.step()
# Some water should be present
assert water_system.n_particles > 0
# Test adjacency
water_system.remove_all_particles()
spray_bottle.set_position_orientation(position=np.ones(3) * 50.0, orientation=np.array([0, 0, 0, 1.0]))
place_objA_on_objB_bbox(applier_dishtowel, breakfast_table)
og.sim.step()
# no water should be present
assert water_system.n_particles == 0
# Take number of steps for water to be generated
n_applier_steps = applier_dishtowel.states[ParticleApplier].n_steps_per_modification
for _ in range(n_applier_steps):
og.sim.step()
# Some water should be present
assert water_system.n_particles > 0
# Cannot set this state
with pytest.raises(NotImplementedError):
spray_bottle.states[ParticleApplier].set_value(True)
water_system.remove_all_particles()
@og_test
def test_particle_remover():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
vacuum = og.sim.scene.object_registry("name", "vacuum")
remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel")
# Test projection
place_obj_on_floor_plane(breakfast_table)
place_objA_on_objB_bbox(vacuum, breakfast_table, z_offset=0.02)
for _ in range(3):
og.sim.step()
assert not vacuum.states[ToggledOn].get_value()
water_system = get_system("water")
# Place single particle of water on middle of table
water_system.generate_particles(positions=[np.array([0, 0, breakfast_table.aabb[1][2] + water_system.particle_radius])])
assert water_system.n_particles > 0
# Take number of steps for water to be removed, make sure there is still water
n_remover_steps = vacuum.states[ParticleRemover].n_steps_per_modification
for _ in range(n_remover_steps):
og.sim.step()
assert water_system.n_particles > 0
# Turn particle remover on, and verify particles are generated after the same number of steps are taken
vacuum.states[ToggledOn].set_value(True)
for _ in range(n_remover_steps):
og.sim.step()
# No water should be present
assert water_system.n_particles == 0
# Test adjacency
vacuum.set_position(np.ones(3) * 50.0)
place_objA_on_objB_bbox(remover_dishtowel, breakfast_table, z_offset=0.03)
og.sim.step()
# Place single particle of water on middle of table
water_system.generate_particles(positions=[np.array([0, 0, breakfast_table.aabb[1][2] + water_system.particle_radius])])
# Water should be present
assert water_system.n_particles > 0
# Take number of steps for water to be removed
n_remover_steps = remover_dishtowel.states[ParticleRemover].n_steps_per_modification
for _ in range(n_remover_steps):
og.sim.step()
# No water should be present
assert water_system.n_particles == 0
# Cannot set this state
with pytest.raises(NotImplementedError):
vacuum.states[ParticleRemover].set_value(True)
water_system.remove_all_particles()
@og_test
def test_saturated():
remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel")
place_obj_on_floor_plane(remover_dishtowel)
for _ in range(5):
og.sim.step()
water_system = get_system("water")
# Place single row of water above dishtowel
n_particles = 5
remover_dishtowel.states[Saturated].set_limit(water_system, n_particles)
water_system.generate_particles(positions=[np.array([0, 0, remover_dishtowel.aabb[1][2] + water_system.particle_radius * (1 + 2 * i)]) for i in range(n_particles)])
# Take a few steps
for _ in range(20):
og.sim.step()
# Make sure Saturated is True, and no particles exist
assert water_system.n_particles == 0
assert remover_dishtowel.states[Saturated].get_value(water_system)
# Make sure we can toggle saturated to be true and false
assert remover_dishtowel.states[Saturated].set_value(water_system, False)
assert remover_dishtowel.states[Saturated].set_value(water_system, True)
water_system.remove_all_particles()
@og_test
def test_open():
microwave = og.sim.scene.object_registry("name", "microwave")
bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet")
# By default, objects should not be open.
assert not microwave.states[Open].get_value()
assert not bottom_cabinet.states[Open].get_value()
# Set the joints to their upper limits.
microwave.joints["j_link_0"].set_pos(microwave.joints["j_link_0"].upper_limit)
bottom_cabinet.joints["j_link_2"].set_pos(bottom_cabinet.joints["j_link_2"].upper_limit)
og.sim.step()
# The objects should be open.
assert microwave.states[Open].get_value()
assert bottom_cabinet.states[Open].get_value()
# Set the joints to their lower limits.
microwave.joints["j_link_0"].set_pos(microwave.joints["j_link_0"].lower_limit)
bottom_cabinet.joints["j_link_2"].set_pos(bottom_cabinet.joints["j_link_2"].lower_limit)
og.sim.step()
# The objects should not be open.
assert not microwave.states[Open].get_value()
assert not bottom_cabinet.states[Open].get_value()
# Setters should work.
assert microwave.states[Open].set_value(True)
assert bottom_cabinet.states[Open].set_value(True)
# The objects should be open.
assert microwave.states[Open].get_value()
assert bottom_cabinet.states[Open].get_value()
# Setters should work.
assert microwave.states[Open].set_value(False)
assert bottom_cabinet.states[Open].set_value(False)
# The objects should not be open.
assert not microwave.states[Open].get_value()
assert not bottom_cabinet.states[Open].get_value()
@og_test
def test_folded_unfolded():
carpet = og.sim.scene.object_registry("name", "carpet")
place_obj_on_floor_plane(carpet)
for _ in range(5):
og.sim.step()
assert not carpet.states[Folded].get_value()
assert carpet.states[Unfolded].get_value()
pos = carpet.root_link.compute_particle_positions()
x_min, x_max = np.min(pos, axis=0)[0], np.max(pos, axis=0)[0]
x_extent = x_max - x_min
# Get indices for the bottom 10 percent vertices in the x-axis
indices = np.argsort(pos, axis=0)[:, 0][:(pos.shape[0] // 10)]
start = np.copy(pos[indices])
# lift up a bit
mid = np.copy(start)
mid[:, 2] += x_extent * 0.2
# move towards x_max
end = np.copy(mid)
end[:, 0] += x_extent * 0.9
increments = 25
for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]):
carpet.root_link.set_particle_positions(ctrl_pts, idxs=indices)
og.sim.step()
assert carpet.states[Folded].get_value()
assert not carpet.states[Unfolded].get_value()
assert carpet.states[Unfolded].set_value(True)
with pytest.raises(NotImplementedError):
carpet.states[Unfolded].set_value(False)
with pytest.raises(NotImplementedError):
carpet.states[Folded].set_value(True)
@og_test
def test_draped():
breakfast_table = og.sim.scene.object_registry("name", "breakfast_table")
carpet = og.sim.scene.object_registry("name", "carpet")
place_obj_on_floor_plane(breakfast_table)
place_objA_on_objB_bbox(carpet, breakfast_table)
for _ in range(10):
og.sim.step()
assert carpet.states[Draped].get_value(breakfast_table)
carpet.set_position([20., 20., 1.])
for _ in range(5):
og.sim.step()
assert not carpet.states[Draped].get_value(breakfast_table)
assert carpet.states[Draped].set_value(breakfast_table, True)
with pytest.raises(NotImplementedError):
carpet.states[Draped].set_value(breakfast_table, False)
@og_test
def test_filled():
stockpot = og.sim.scene.object_registry("name", "stockpot")
systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items() if not issubclass(system_class, VisualParticleSystem)]
for system in systems:
stockpot.set_position_orientation(position=np.ones(3) * 50.0, orientation=[0, 0, 0, 1.0])
place_obj_on_floor_plane(stockpot)
for _ in range(5):
og.sim.step()
assert stockpot.states[Filled].set_value(system, True)
og.sim.step()
assert stockpot.states[Filled].get_value(system)
# Cannot set Filled state False
with pytest.raises(NotImplementedError):
stockpot.states[Filled].set_value(system, False)
system.remove_all_particles()
og.sim.step()
assert not stockpot.states[Filled].get_value(system)
@og_test
def test_contains():
stockpot = og.sim.scene.object_registry("name", "stockpot")
systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items()]
for system in systems:
print(f"Testing Contains {stockpot.name} with {system.name}")
stockpot.set_position_orientation(position=np.ones(3) * 50.0, orientation=[0, 0, 0, 1.0])
place_obj_on_floor_plane(stockpot)
for _ in range(5):
og.sim.step()
# Sample single particle
if is_physical_particle_system(system_name=system.name):
system.generate_particles(positions=[np.array([0, 0, stockpot.aabb[1][2] - 0.1])])
else:
if system.get_group_name(stockpot) not in system.groups:
system.create_attachment_group(stockpot)
system.generate_group_particles(
group=system.get_group_name(stockpot),
positions=np.array([np.array([0, 0, stockpot.aabb[1][2] - 0.1])]),
link_prim_paths=[stockpot.root_link.prim_path],
)
og.sim.step()
assert stockpot.states[Contains].get_value(system)
# Remove all particles and make sure contains returns False
stockpot.states[Contains].set_value(system, False)
og.sim.step()
assert not stockpot.states[Contains].get_value(system)
# Cannot set Contains state
with pytest.raises(NotImplementedError):
stockpot.states[Contains].set_value(system, True)
system.remove_all_particles()
@og_test
def test_covered():
bracelet = og.sim.scene.object_registry("name", "bracelet")
bowl = og.sim.scene.object_registry("name", "bowl")
microwave = og.sim.scene.object_registry("name", "microwave")
systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items()]
for obj in (bracelet, bowl, microwave):
for system in systems:
# bracelet is too small to sample physical particles on it
sampleable = is_visual_particle_system(system.name) or obj != bracelet
if sampleable:
print(f"Testing Covered {obj.name} with {system.name}")
obj.set_position_orientation(position=np.ones(3) * 50.0, orientation=[0, 0, 0, 1.0])
place_obj_on_floor_plane(obj)
for _ in range(5):
og.sim.step()
assert obj.states[Covered].set_value(system, True)
og.sim.step()
assert obj.states[Covered].get_value(system)
assert obj.states[Covered].set_value(system, False)
# We don't call og.sim.step() here because it's possible for the "second" layer of particles to fall down
# and make Covered to be True again. Instead, we clear the caches and check that Covered is False.
obj.states[Covered].clear_cache()
obj.states[ContactParticles].clear_cache()
assert not obj.states[Covered].get_value(system)
system.remove_all_particles()
obj.set_position_orientation(position=np.ones(3) * 75.0, orientation=[0, 0, 0, 1.0])
def test_clear_sim():
og.sim.clear()
| 44,690 | Python | 37.593264 | 168 | 0.69237 |
StanfordVL/OmniGibson/tests/benchmark/benchmark_object_count.py | """
Script to benchmark speed vs. no. of objects in the scene.
"""
import os
import time
import matplotlib.pyplot as plt
import numpy as np
from omnigibson import app, launch_simulator
from omnigibson.objects.primitive_object import PrimitiveObject
from omnigibson.scenes.scene_base import Scene
from omnigibson.utils.asset_utils import get_og_assets_version
# Params to be set as needed.
MAX_NUM_OBJS = 400 # Maximum no. of objects to add.
NUM_OBJS_PER_ITER = 20 # No. of objects to add per iteration.
NUM_STEPS_PER_ITER = 30 # No. of steps to take for each n of objects.
OBJ_SCALE = 0.05 # Object scale to be set appropriately to sim collisions.
RAND_POSITION = True # True to randomize positions.
OUTPUT_DIR = os.path.join(os.path.expanduser("~"), "Desktop")
# Internal constants.
_N_PER_ROW = int(np.sqrt(MAX_NUM_OBJS))
_MIN_VAL = -2.0
_MAX_VAL = 2.0
_STEP_SIZE = (_MAX_VAL - _MIN_VAL) / _N_PER_ROW
def _get_position(obj_idx, is_random=False):
if is_random:
pos_arange = np.arange(_MIN_VAL, _MAX_VAL, step=0.1, dtype=np.float32)
x, y, z = np.random.choice(pos_arange, size=3)
return x, y, z
x = _MIN_VAL + _STEP_SIZE * (obj_idx % _N_PER_ROW)
y = _MIN_VAL + _STEP_SIZE * (obj_idx // _N_PER_ROW)
return x, y, 0.1
def benchmark_scene(sim):
assets_version = get_og_assets_version()
print("assets_version", assets_version)
scene = Scene(floor_plane_visible=True)
sim.import_scene(scene)
sim.play()
xs = []
ys = []
yerrs = []
for i in range(MAX_NUM_OBJS // NUM_OBJS_PER_ITER):
new_objs = []
for j in range(NUM_OBJS_PER_ITER):
obj_idx = i * NUM_OBJS_PER_ITER + j
obj = PrimitiveObject(
prim_path=f"/World/obj{obj_idx}",
primitive_type="Sphere",
name=f"obj{obj_idx}",
scale=OBJ_SCALE,
visual_only=False,
)
sim.import_object(obj=obj, auto_initialize=False)
# x, y, z = _get_position(obj_idx, RAND_POSITION)
x, y = 0, 0
z = 0.5 + j * OBJ_SCALE * 2.25
obj.set_position(position=np.array([x, y, z]))
new_objs.append(obj)
# Take a step to initialize the new objects (done in _non_physics_step()).
sim.step()
step_freqs = []
for _ in range(NUM_STEPS_PER_ITER):
start = time.time()
sim.step()
end = time.time()
step_freqs.append(1 / (end - start))
xs.append(i * NUM_OBJS_PER_ITER)
max_freq, min_freq = np.max(step_freqs), np.min(step_freqs)
ys.append(np.mean((max_freq, min_freq)))
yerrs.append(max_freq - ys[-1])
plt.figure(figsize=(9, 6))
ax = plt.subplot(1, 1, 1)
plt.errorbar(xs, ys, yerr=yerrs, elinewidth=0.75)
ax.set_xlabel("No. of objects")
ax.set_ylabel("Step fps")
ax.set_title(f"Version {assets_version}")
plt.tight_layout()
plt.savefig(os.path.join(
OUTPUT_DIR, f"scene_objs_benchmark_{MAX_NUM_OBJS}_{OBJ_SCALE}.png"))
def main():
assert MAX_NUM_OBJS <= 1000
sim = launch_simulator()
benchmark_scene(sim)
app.close()
if __name__ == "__main__":
main()
| 3,243 | Python | 29.603773 | 82 | 0.590811 |
StanfordVL/OmniGibson/tests/benchmark/profiling.py | import os
import argparse
import json
import omnigibson as og
import numpy as np
import omnigibson.utils.transform_utils as T
import time
from omnigibson.macros import gm
from omnigibson.systems import get_system
from omnigibson.object_states import Covered
from omnigibson.utils.profiling_utils import ProfilingEnv
from omnigibson.utils.constants import PrimType
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--robot", type=int, default=0)
parser.add_argument("-s", "--scene", default="")
parser.add_argument("-c", "--cloth", action='store_true')
parser.add_argument("-w", "--fluids", action='store_true')
parser.add_argument("-g", "--gpu_denamics", action='store_true')
parser.add_argument("-p", "--macro_particle_system", action='store_true')
PROFILING_FIELDS = ["FPS", "Omni step time", "Non-omni step time", "Memory usage", "Vram usage"]
NUM_CLOTH = 5
NUM_SLICE_OBJECT = 3
SCENE_OFFSET = {
"": [0, 0],
"Rs_int": [0, 0],
"Pomaria_0_garden": [0.3, 0],
"grocery_store_cafe": [-3.5, 3.5],
"house_single_floor": [-3, -1],
"Ihlen_0_int": [-1, 2]
}
def main():
args = parser.parse_args()
# Modify macros settings
gm.ENABLE_HQ_RENDERING = args.fluids
gm.ENABLE_OBJECT_STATES = True
gm.ENABLE_TRANSITION_RULES = True
gm.ENABLE_FLATCACHE = not args.cloth
gm.USE_GPU_DYNAMICS = args.gpu_denamics
cfg = {
"env": {
"action_frequency": 30,
"physics_frequency": 120,
}
}
if args.robot > 0:
cfg["robots"] = []
for i in range(args.robot):
cfg["robots"].append({
"type": "Fetch",
"obs_modalities": "all",
"position": [-1.3 + 0.75 * i + SCENE_OFFSET[args.scene][0], 0.5 + SCENE_OFFSET[args.scene][1], 0],
"orientation": [0., 0., 0.7071, -0.7071]
})
if args.scene:
assert args.scene in SCENE_OFFSET, f"Scene {args.scene} not found in SCENE_OFFSET"
cfg["scene"] = {
"type": "InteractiveTraversableScene",
"scene_model": args.scene,
}
else:
cfg["scene"] = {"type": "Scene"}
cfg["objects"] = [{
"type": "DatasetObject",
"name": "table",
"category": "breakfast_table",
"model": "rjgmmy",
"fixed_base": True,
"scale": [0.75] * 3,
"position": [0.5 + SCENE_OFFSET[args.scene][0], -1 + SCENE_OFFSET[args.scene][1], 0.3],
"orientation": [0., 0., 0.7071, -0.7071]
}]
if args.cloth:
cfg["objects"].extend([{
"type": "DatasetObject",
"name": f"cloth_{n}",
"category": "t_shirt",
"model": "kvidcx",
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"bounding_box": [0.3, 0.5, 0.7],
"position": [-0.4, -1, 0.7 + n * 0.4],
"orientation": [0.7071, 0., 0.7071, 0.],
} for n in range(NUM_CLOTH)])
cfg["objects"].extend([{
"type": "DatasetObject",
"name": f"apple_{n}",
"category": "apple",
"model": "agveuv",
"scale": [1.5] * 3,
"position": [0.5 + SCENE_OFFSET[args.scene][0], -1.25 + SCENE_OFFSET[args.scene][1] + n * 0.2, 0.5],
"abilities": {"diceable": {}} if args.macro_particle_system else {}
} for n in range(NUM_SLICE_OBJECT)])
cfg["objects"].extend([{
"type": "DatasetObject",
"name": f"knife_{n}",
"category": "table_knife",
"model": "jxdfyy",
"scale": [2.5] * 3
} for n in range(NUM_SLICE_OBJECT)])
load_start = time.time()
env = ProfilingEnv(configs=cfg)
table = env.scene.object_registry("name", "table")
apples = [env.scene.object_registry("name", f"apple_{n}") for n in range(NUM_SLICE_OBJECT)]
knifes = [env.scene.object_registry("name", f"knife_{n}") for n in range(NUM_SLICE_OBJECT)]
if args.cloth:
clothes = [env.scene.object_registry("name", f"cloth_{n}") for n in range(NUM_CLOTH)]
for cloth in clothes:
cloth.root_link.mass = 1.0
env.reset()
for n, knife in enumerate(knifes):
knife.set_position_orientation(
position=apples[n].get_position() + np.array([-0.15, 0.0, 0.1 * (n + 2)]),
orientation=T.euler2quat([-np.pi / 2, 0, 0]),
)
knife.keep_still()
if args.fluids:
table.states[Covered].set_value(get_system("water"), True)
output, results = [], []
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position([SCENE_OFFSET[args.scene][0], -3 + SCENE_OFFSET[args.scene][1], 1])
# record total load time
total_load_time = time.time() - load_start
for i in range(300):
if args.robot:
result = env.step(np.array([np.random.uniform(-0.3, 0.3, env.robots[i].action_dim) for i in range(args.robot)]).flatten())[4]
else:
result = env.step(None)[4]
results.append(result)
field = f"{args.scene}" if args.scene else "Empty scene"
if args.robot:
field += f", with {args.robot} Fetch"
if args.cloth:
field += ", cloth"
if args.fluids:
field += ", fluids"
if args.macro_particle_system:
field += ", macro particles"
output.append({
"name": field,
"unit": "time (ms)",
"value": total_load_time,
"extra": ["Loading time", "Loading time"]
})
results = np.array(results)
for i, title in enumerate(PROFILING_FIELDS):
unit = "time (ms)" if 'time' in title else "GB"
value = np.mean(results[:, i])
if title == "FPS":
value = 1000 / value
unit = "fps"
output.append({"name": field, "unit": unit, "value": value, "extra": [title, title]})
ret = []
if os.path.exists("output.json"):
with open("output.json", "r") as f:
ret = json.load(f)
ret.extend(output)
with open("output.json", "w") as f:
json.dump(ret, f, indent=4)
og.shutdown()
if __name__ == "__main__":
main()
| 6,127 | Python | 32.670329 | 137 | 0.555247 |
StanfordVL/OmniGibson/tests/benchmark/benchmark_interactive_scene.py | #!/usr/bin/env python
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import omnigibson as og
from omnigibson.objects import DatasetObject
from omnigibson.macros import gm
from omnigibson.robots.turtlebot import Turtlebot
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
from omnigibson.simulator import launch_simulator
from omnigibson.utils.asset_utils import get_og_assets_version
from omnigibson.utils.constants import PrimType
from omnigibson.systems import get_system
# Params to be set as needed.
SCENES = ["Rs_int"] # house_single_floor
OUTPUT_DIR = os.path.join(os.path.expanduser("~"), "Desktop")
NUM_STEPS = 2000
gm.HEADLESS = False
gm.GUI_VIEWPORT_ONLY = True
gm.RENDER_VIEWER_CAMERA = False
gm.ENABLE_FLATCACHE = True
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_OBJECT_STATES = False
gm.ENABLE_TRANSITION_RULES = False
gm.DEFAULT_VIEWER_WIDTH = 128
gm.DEFAULT_VIEWER_HEIGHT = 128
# Launch the simulator
launch_simulator(physics_dt=1/60., rendering_dt=1/60.)
def benchmark_scene(scene_name, non_rigid_simulation=False, import_robot=True):
assets_version = get_og_assets_version()
print("assets_version", assets_version)
scene = InteractiveTraversableScene(scene_name)
start = time.time()
og.sim.import_scene(scene)
if gm.RENDER_VIEWER_CAMERA:
og.sim.viewer_camera.set_position_orientation([0, 0, 0.2], [0.5, -0.5, -0.5, 0.5])
print(time.time() - start)
if import_robot:
turtlebot = Turtlebot(prim_path="/World/robot", name="agent", obs_modalities=['rgb'])
og.sim.import_object(turtlebot)
og.sim.step()
if non_rigid_simulation:
cloth = DatasetObject(
name="cloth",
prim_path="/World/cloth",
category="t_shirt",
model="kvidcx",
prim_type=PrimType.CLOTH,
abilities={"cloth": {}},
bounding_box=[0.3, 0.5, 0.7],
)
og.sim.import_object(cloth)
og.sim.step()
water_system = get_system("water")
for i in range(100):
water_system.generate_particles(
positions=[np.array([0.5, 0, 0.5]) + np.random.randn(3) * 0.1]
)
og.sim.step()
og.sim.play()
if non_rigid_simulation:
cloth.set_position([1, 0, 1])
og.sim.step()
fps = []
physics_fps = []
render_fps = []
print(len(og.sim.scene.objects))
for i in range(NUM_STEPS):
start = time.time()
if import_robot:
# Apply random actions.
turtlebot.apply_action(np.zeros(2))
og.sim.step(render=False)
physics_end = time.time()
og.sim.render()
end = time.time()
if i % 100 == 0:
print("Elapsed time: ", end - start)
print("Render Frequency: ", 1 / (end - physics_end))
print("Physics Frequency: ", 1 / (physics_end - start))
print("Step Frequency: ", 1 / (end - start))
fps.append(1 / (end - start))
physics_fps.append(1 / (physics_end - start))
render_fps.append(1 / (end - physics_end))
plt.figure(figsize=(7, 25))
ax = plt.subplot(6, 1, 1)
plt.hist(render_fps)
ax.set_xlabel("Render fps")
ax.set_title(
"Scene {} version {}\nnon_physics {} num_obj {}\n import_robot {}".format(
scene_name, assets_version, non_rigid_simulation, scene.n_objects, import_robot
)
)
ax = plt.subplot(6, 1, 2)
plt.hist(physics_fps)
ax.set_xlabel("Physics fps")
ax = plt.subplot(6, 1, 3)
plt.hist(fps)
ax.set_xlabel("Step fps")
ax = plt.subplot(6, 1, 4)
plt.plot(render_fps)
ax.set_xlabel("Render fps with time, converge to {}".format(np.mean(render_fps[-100:])))
ax.set_ylabel("fps")
ax = plt.subplot(6, 1, 5)
plt.plot(physics_fps)
ax.set_xlabel("Physics fps with time, converge to {}".format(np.mean(physics_fps[-100:])))
ax.set_ylabel("fps")
ax = plt.subplot(6, 1, 6)
plt.plot(fps)
ax.set_xlabel("Overall fps with time, converge to {}".format(np.mean(fps[-100:])))
ax.set_ylabel("fps")
plt.tight_layout()
plt.savefig(os.path.join(
OUTPUT_DIR,
"scene_benchmark_{}_np_{}_r_{}.pdf".format(scene_name, non_rigid_simulation, import_robot)))
def main():
for scene in SCENES:
benchmark_scene(scene, non_rigid_simulation=False, import_robot=True)
og.shutdown()
if __name__ == "__main__":
main()
| 4,494 | Python | 30.215278 | 100 | 0.62016 |
StanfordVL/OmniGibson/docs/gen_ref_pages.py | """Generate the code reference pages."""
from pathlib import Path
import mkdocs_gen_files
nav = mkdocs_gen_files.Nav()
src = "omnigibson"
for path in sorted(Path(src).rglob("*.py")): #
module_path = path.relative_to(src).with_suffix("") #
doc_path = path.relative_to(src).with_suffix(".md") #
full_doc_path = Path("reference", doc_path) #
parts = tuple(module_path.parts)
if parts[-1] == "__init__":
parts = parts[:-1]
doc_path = doc_path.with_name("index.md")
full_doc_path = full_doc_path.with_name("index.md")
elif parts[-1] == "__main__":
continue
# print(f"parts: {parts}")
if parts == ():
continue
# parts = (src,)
# input(f"parts: {parts}")
nav[parts] = doc_path.as_posix()
with mkdocs_gen_files.open(full_doc_path, "w") as fd:
ident = ".".join(parts)
fd.write(f"# {parts[-1]}\n\n::: {ident}")
mkdocs_gen_files.set_edit_path(full_doc_path, Path("../../") / path)
with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file:
nav_file.writelines(nav.build_literate_nav())
# parts = list(module_path.parts)
# if parts[-1] == "__init__": #
# parts = parts[:-1]
# elif parts[-1] == "__main__":
# continue
# with mkdocs_gen_files.open(full_doc_path, "w") as fd: #
# identifier = ".".join(parts) #
# print("::: " + identifier, file=fd) #
# mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path) #
| 1,520 | Python | 27.698113 | 75 | 0.554605 |
StanfordVL/OmniGibson/docs/modules/scene.md | ---
icon: material/home-outline
---
# 🏠 **Scene**
Scene are one level higher than objects. A scene consists of multiple objects that interacts with each other. OmniGibson currently supports two types of scenes:
- `EmptyScene`: This is an empty scene that can be used to create custom scenes. It does not contain any pre-defined objects.
- `InteractiveTraversableScene`: This type of scene are interactive and traversible. It comes with traversable maps that enables robots to perform navigation tasks. Users can choose from the predefined 51 scenes in the OmniGibson dataset.
Here's a list of all the `InteractiveTraversableScene` scenes available in OmniGibson:
<table markdown="span">
<tr>
<td valign="top" width="30%">
**`Beechwood_0_garden`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Beechwood_0_garden.png" alt="Beechwood_0_garden">
</td>
<td>
<img src="../assets/scenes/scene-views/Beechwood_0_garden.png" alt="Beechwood_0_garden">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Beechwood_0_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Beechwood_0_int.png" alt="Beechwood_0_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Beechwood_0_int.png" alt="Beechwood_0_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Beechwood_1_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Beechwood_1_int.png" alt="Beechwood_1_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Beechwood_1_int.png" alt="Beechwood_1_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Benevolence_0_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Benevolence_0_int.png" alt="Benevolence_0_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Benevolence_0_int.png" alt="Benevolence_0_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Benevolence_1_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Benevolence_1_int.png" alt="Benevolence_1_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Benevolence_1_int.png" alt="Benevolence_1_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Benevolence_2_int`**
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Benevolence_2_int.png" alt="Benevolence_2_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Benevolence_2_int.png" alt="Benevolence_2_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Ihlen_0_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Ihlen_0_int.png" alt="Ihlen_0_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Ihlen_0_int.png" alt="Ihlen_0_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Ihlen_1_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Ihlen_1_int.png" alt="Ihlen_1_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Ihlen_1_int.png" alt="Ihlen_1_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Merom_0_garden`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Merom_0_garden.png" alt="Merom_0_garden">
</td>
<td>
<img src="../assets/scenes/scene-views/Merom_0_garden.png" alt="Merom_0_garden">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Merom_0_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Merom_0_int.png" alt="Merom_0_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Merom_0_int.png" alt="Merom_0_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Merom_1_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Merom_1_int.png" alt="Merom_1_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Merom_1_int.png" alt="Merom_1_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Pomaria_0_garden`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Pomaria_0_garden.png" alt="Pomaria_0_garden">
</td>
<td>
<img src="../assets/scenes/scene-views/Pomaria_0_garden.png" alt="Pomaria_0_garden">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Pomaria_0_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Pomaria_0_int.png" alt="Pomaria_0_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Pomaria_0_int.png" alt="Pomaria_0_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Pomaria_1_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Pomaria_1_int.png" alt="Pomaria_1_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Pomaria_1_int.png" alt="Pomaria_1_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Pomaria_2_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Pomaria_2_int.png" alt="Pomaria_2_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Pomaria_2_int.png" alt="Pomaria_2_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Rs_garden`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Rs_garden.png" alt="Rs_garden">
</td>
<td>
<img src="../assets/scenes/scene-views/Rs_garden.png" alt="Rs_garden">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Rs_int`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Rs_int.png" alt="Rs_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Rs_int.png" alt="Rs_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`Wainscott_0_garden`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Wainscott_0_garden.png" alt="Wainscott_0_garden">
</td>
<td>
<img src="../assets/scenes/scene-views/Wainscott_0_garden.png" alt="Wainscott_0_garden">
</td>
</tr>
<tr>
<td valign="top" width="30%">
[**`Wainscott_0_int`**](../reference/scene/Wainscott_0_int.html)<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Wainscott_0_int.png" alt="Wainscott_0_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Wainscott_0_int.png" alt="Wainscott_0_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
[**`Wainscott_1_int`**](../reference/scene/Wainscott_1_int.html)<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/Wainscott_1_int.png" alt="Wainscott_1_int">
</td>
<td>
<img src="../assets/scenes/scene-views/Wainscott_1_int.png" alt="Wainscott_1_int">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`grocery_store_asian`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/grocery_store_asian.png" alt="grocery_store_asian">
</td>
<td>
<img src="../assets/scenes/scene-views/grocery_store_asian.png" alt="grocery_store_asian">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`grocery_store_cafe`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/grocery_store_cafe.png" alt="grocery_store_cafe">
</td>
<td>
<img src="../assets/scenes/scene-views/grocery_store_cafe.png" alt="grocery_store_cafe">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`grocery_store_convenience`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/grocery_store_convenience.png" alt="grocery_store_convenience">
</td>
<td>
<img src="../assets/scenes/scene-views/grocery_store_convenience.png" alt="grocery_store_convenience">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`grocery_store_half_stocked`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/grocery_store_half_stocked.png" alt="grocery_store_half_stocked">
</td>
<td>
<img src="../assets/scenes/scene-views/grocery_store_half_stocked.png" alt="grocery_store_half_stocked">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hall_arch_wood`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hall_arch_wood.png" alt="hall_arch_wood">
</td>
<td>
<img src="../assets/scenes/scene-views/hall_arch_wood.png" alt="hall_arch_wood">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hall_conference_large`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hall_conference_large.png" alt="hall_conference_large">
</td>
<td>
<img src="../assets/scenes/scene-views/hall_conference_large.png" alt="hall_conference_large">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hall_glass_ceiling`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hall_glass_ceiling.png
" alt="hall_glass_ceiling">
</td>
<td>
<img src="../assets/scenes/scene-views/hall_glass_ceiling.png" alt="hall_glass_ceiling">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hall_train_station`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hall_train_station.png" alt="hall_train_station">
</td>
<td>
<img src="../assets/scenes/scene-views/hall_train_station.png" alt="hall_train_station">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hotel_gym_spa`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hotel_gym_spa.png" alt="hotel_gym_spa">
</td>
<td>
<img src="../assets/scenes/scene-views/hotel_gym_spa.png" alt="hotel_gym_spa">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hotel_suite_large`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hotel_suite_large.png" alt="hotel_suite_large">
</td>
<td>
<img src="../assets/scenes/scene-views/hotel_suite_large.png" alt="hotel_suite_large">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`hotel_suite_small`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/hotel_suite_small.png" alt="hotel_suite_small">
</td>
<td>
<img src="../assets/scenes/scene-views/hotel_suite_small.png" alt="hotel_suite_small">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`house_double_floor_lower`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/house_double_floor_lower.png" alt="house_double_floor_lower">
</td>
<td>
<img src="../assets/scenes/scene-views/house_double_floor_lower.png" alt="house_double_floor_lower">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`house_double_floor_upper`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/house_double_floor_upper.png" alt="house_double_floor_upper">
</td>
<td>
<img src="../assets/scenes/scene-views/house_double_floor_upper.png" alt="house_double_floor_upper">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`house_single_floor`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/house_single_floor.png" alt="house_single_floor">
</td>
<td>
<img src="../assets/scenes/scene-views/house_single_floor.png" alt="house_single_floor">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`office_bike`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/office_bike.png" alt="office_bike">
</td>
<td>
<img src="../assets/scenes/scene-views/office_bike.png" alt="office_bike">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`office_cubicles_left`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/office_cubicles_left.png" alt="office_cubicles_left">
</td>
<td>
<img src="../assets/scenes/scene-views/office_cubicles_left.png" alt="office_cubicles_left">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`office_cubicles_right`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/office_cubicles_right.png" alt="office_cubicles_right">
</td>
<td>
<img src="../assets/scenes/scene-views/office_cubicles_right.png" alt="office_cubicles_right">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`office_large`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/office_large.png" alt="office_large">
</td>
<td>
<img src="../assets/scenes/scene-views/office_large.png" alt="office_large">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`office_vendor_machine`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/office_vendor_machine.png" alt="office_vendor_machine">
</td>
<td>
<img src="../assets/scenes/scene-views/office_vendor_machine.png" alt="office_vendor_machine">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`restaurant_asian`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/restaurant_asian.png" alt="restaurant_asian">
</td>
<td>
<img src="../assets/scenes/scene-views/restaurant_asian.png" alt="restaurant_asian">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`restaurant_brunch`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/restaurant_brunch.png" alt="restaurant_brunch">
</td>
<td>
<img src="../assets/scenes/scene-views/restaurant_brunch.png" alt="restaurant_brunch">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`restaurant_cafeteria`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/restaurant_cafeteria.png" alt="restaurant_cafeteria">
</td>
<td>
<img src="../assets/scenes/scene-views/restaurant_cafeteria.png" alt="restaurant_cafeteria">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`restaurant_diner`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/restaurant_diner.png" alt="restaurant_diner">
</td>
<td>
<img src="../assets/scenes/scene-views/restaurant_diner.png" alt="restaurant_diner">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`restaurant_hotel`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/restaurant_hotel.png" alt="restaurant_hotel">
</td>
<td>
<img src="../assets/scenes/scene-views/restaurant_hotel.png" alt="restaurant_hotel">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`restaurant_urban`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/restaurant_urban.png" alt="restaurant_urban">
</td>
<td>
<img src="../assets/scenes/scene-views/restaurant_urban.png" alt="restaurant_urban">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`school_biology`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/school_biology.png" alt="school_biology">
</td>
<td>
<img src="../assets/scenes/scene-views/school_biology.png" alt="school_biology">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`school_chemistry`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/school_chemistry.png" alt="school_chemistry">
</td>
<td>
<img src="../assets/scenes/scene-views/school_chemistry.png" alt="school_chemistry">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`school_computer_lab_and_infirmary`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/school_computer_lab_and_infirmary.png" alt="school_computer_lab_and_infirmary">
</td>
<td>
<img src="../assets/scenes/scene-views/school_computer_lab_and_infirmary.png" alt="school_computer_lab_and_infirmary">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`school_geography`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/school_geography.png" alt="school_geography">
</td>
<td>
<img src="../assets/scenes/scene-views/school_geography.png" alt="school_geography">
</td>
</tr>
<tr>
<td valign="top" width="30%">
**`school_gym`**<br><br>
</td>
<td>
<img src="../assets/scenes/birds-eye-views/school_gym.png" alt="school_gym">
</td>
<td>
<img src="../assets/scenes/scene-views/school_gym.png" alt="school_gym">
</td>
</tr>
</table>
| 19,299 | Markdown | 32.919156 | 238 | 0.498109 |
StanfordVL/OmniGibson/docs/modules/controllers.md | ---
icon: material/knob
---
# 🎛️ **Controllers**
## Description
In **`OmniGibson`**, `Controller`s convert high-level actions into low-level joint motor (position, velocity, or effort) controls for a subset of an individual [`Robot`](./robots.md)'s joints.
In an [`Environment`](./environment.md) instance, actions are passed to controllers via the `env.step(action)` call, resulting in the following behavior:
<div class="annotate" markdown>
- When `env.step(action)` is called, actions are parsed and passed to the respective robot owned by the environment (`env.robots`) via `robot.apply_action(action)`
- For a given robot, its `action` is parsed and passed to the respective controllers owned by the robot (`robot.controllers`) via `controller.update_goal(command)`
- For a given controller, the inputted `command` is preprocessed (re-scaled and shifted) and then converted into an internally tracked `goal`
- Each time a physic step occurs (1), all controllers computes and deploys their desired joint controls via `controller.compute_control()` towards reaching their respective `goal`s
</div>
1. Note that because environments operate at `action_frequency <= physics_frequency`, this means that a controller may take _multiple_ control steps per single `env.step(action)` call!
**`OmniGibson`** supports multiple types of controllers, which are intended to control a specific subset of a robot's set of joints. Some are more general (such as the `JointController`, which can broadly be applied to any part of a robot), while others are more specific to a robot's morphology (such as the `InverseKinematicsController`, which is intended to be used to control a manipulation robot's end-effector pose).
It is important to note that a single robot can potentially own multiple controllers. For example, `Turtlebot` only owns a single controller (to control its two-wheeled base), whereas the mobile-manipulator `Fetch` robot owns four (one to control its base, head, trunk + arm, and gripper). This allows for modular action space composition, where fine-grained modification of the action space can be achieved by modifying / swapping out individual controllers. For more information about the specific number of controllers each robot has, please see our [list of robots](./robots.md#models).
## Usage
### Definition
Controllers can be specified in the config that is passed to the `Environment` constructor via the `['robots'][i]['controller_config']` key. This is expected to be a nested dictionary, mapping controller name (1) to the desired specific controller configuration. the desired configuration for a single robot to be created. For each individual controller dict, the `name` key is required and specifies the desired controller class. Additional keys can be specified and will be passed directly to the specific controller class constructor. An example of a robot controller configuration is shown below in `.yaml` form:
{ .annotate }
1. See `robot.controller_order` for the full list of expected controller names for a given robot
??? code "single_fetch_controller_config_example.yaml"
``` yaml linenums="1"
robots:
- type: Fetch
controller_config:
base:
name: DifferentialDriveController
arm_0:
name: InverseKinematicsController
kv: 2.0
gripper_0:
name: MultiFingerGripperController
mode: binary
camera:
name: JointController
use_delta_commands: False
```
### Runtime
Usually, actions are passed to robots, parsed, and passed to individual controllers via `env.step(action)` --> `robot.apply_action(action)` --> `controller.update_goal(command)`. However, specific controller commands can be directly deployed with this API outside of the `env.step()` loop. A controller's internal state can be cleared by calling `controller.reset()`, and no-op actions can computed via `compute_no_op_goal`.
Relevant properties, such as `control_type`, `control_dim`, `command_dim`, etc. are all queryable at runtime as well.
## Types
**`OmniGibson`** currently supports 6 controllers, consisting of 2 general joint controllers, 1 locomotion-specific controller, 2 arm manipulation-specific controllers, and 1 gripper-specific controller. Below, we provide a brief overview of each controller type:
### General Controllers
These are general-purpose controllers that are agnostic to a robot's morphology, and therefore can be used on any robot.
<table markdown="span">
<tr>
<td valign="top">
[**`JointController`**](../reference/controllers/joint_controller.html)<br><br>
Directly controls individual joints. Either outputs low-level joint position or velocity controls if `use_impedance=False`, otherwise will internally compensate the desired gains with the robot's mass matrix and output joint effort controls.<br><br>
<ul>
<li>_Command Dim_: n_joints</li>
<li>_Command Description_: desired per-joint `[q_0, q_1, ...q_n]` position / velocity / effort setpoints, which are assumed to be absolute joint values unless `use_delta` is set</li>
<li>_Control Dim_: n_joints</li>
<li>_Control Type_: position / velocity / effort</li>
</ul>
</td>
</tr>
<tr>
<td valign="top">
[**`NullJointController`**](../reference/controllers/null_joint_controller.html)<br><br>
Directly controls individual joints via an internally stored `default_command`. Inputted commands will be ignored unless `default_command` is updated.<br><br>
<ul>
<li>_Command Dim_: n_joints</li>
<li>_Command Description_: `[q_0, ..., q_n]` N/A </li>
<li>_Control Dim_: n_joints</li>
<li>_Control Type_: position / velocity / effort</li>
</ul>
</td>
</tr>
</table>
### Locomotion Controllers
These are controllers specifically meant for robots with navigation capabilities.
<table markdown="span" width="100%">
<tr>
<td valign="top" width="100%">
[**`DifferentialDriveController`**](../reference/controllers/dd_controller.html)<br><br>
Commands 2-wheeled robots by setting linear / angular velocity setpoints and converting them into per-joint velocity control.<br><br>
<ul>
<li>_Command Dim_: n_joints</li>
<li>_Command Description_: desired `[lin_vel, ang_vel]` setpoints </li>
<li>_Control Dim_: 2</li>
<li>_Control Type_: velocity</li>
</ul>
</td>
</tr>
</table>
### Manipulation Arm Controllers
These are controllers specifically meant for robots with manipulation capabilities, and are intended to control a robot's end-effector pose
<table markdown="span">
<tr>
<td valign="top">
[**`InverseKinematicsController`**](../reference/controllers/ik_controller.html)<br><br>
Controls a robot's end-effector by iteratively solving inverse kinematics to output a desired joint configuration to reach the desired end effector pose, and then runs an underlying `JointController` to reach the target joint configuration. Multiple modes are available, and dictate both the command dimension and behavior of the controller. `condition_on_current_position` can be set to seed the IK solver with the robot's current joint state, and `use_impedance` can be set if the robot's per-joint inertia should be taken into account when attempting to reach the target joint configuration.<br><br>
Note: Orientation convention is axis-angle `[ax,ay,az]` representation, and commands are expressed in the robot base frame unless otherwise noted.<br><br>
<ul>
<li>_Command Dim_: 3 / 6</li>
<li>_Command Description_: desired pose command, depending on `mode`: <ul>
<li>`absolute_pose`: 6DOF `[x,y,z,ax,ay,az]` absolute position, absolute orientation</li>
<li>`pose_absolute_ori`: 6DOF `[dx,dy,dz,ax,ay,az]` delta position, absolute orientation</li>
<li>`pose_delta_ori`: 6DOF `[dx,dy,dz,dax,day,daz]` delta position, delta orientation</li>
<li>`position_fixed_ori`: 3DOF `[dx,dy,dz]` delta position, orientation setpoint is kept as fixed initial absolute orientation</li>
<li>`position_compliant_ori`: 3DOF `[dx,dy,dz]` delta position, delta orientation setpoint always kept as 0s (so can drift over time)</li>
</ul></li>
<li>_Control Dim_: n_arm_joints</li>
<li>_Control Type_: position / effort</li>
</ul>
</td>
</tr>
<tr>
<td valign="top">
[**`OperationalSpaceController`**](../reference/controllers/osc_controller.html)<br><br>
Controls a robot's end-effector by applying the [operational space control](https://khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf) algorithm to apply per-joint efforts to perturb the robot's end effector with impedances ("force") along all six (x,y,z,ax,ay,az) axes. Unlike `InverseKinematicsController`, this controller is inherently compliant and especially useful for contact-rich tasks or settings where fine-grained forces are required. For robots with >6 arm joints, an additional null command is used as a secondary objective and is defined as joint state `reset_joint_pos`.<br><br>
Note: Orientation convention is axis-angle `[ax,ay,az]` representation, and commands are expressed in the robot base frame unless otherwise noted.<br><br>
<ul>
<li>_Command Dim_: 3 / 6</li>
<li>_Command Description_: desired pose command, depending on `mode`: <ul>
<li>`absolute_pose`: 6DOF `[x,y,z,ax,ay,az]` absolute position, absolute orientation</li>
<li>`pose_absolute_ori`: 6DOF `[dx,dy,dz,ax,ay,az]` delta position, absolute orientation</li>
<li>`pose_delta_ori`: 6DOF `[dx,dy,dz,dax,day,daz]` delta position, delta orientation</li>
<li>`position_fixed_ori`: 3DOF `[dx,dy,dz]` delta position, orientation setpoint is kept as fixed initial absolute orientation</li>
<li>`position_compliant_ori`: 3DOF `[dx,dy,dz]` delta position, delta orientation setpoint always kept as 0s (so can drift over time)</li>
</ul></li>
<li>_Control Dim_: n_arm_joints</li>
<li>_Control Type_: effort</li>
</ul>
</td>
</tr>
</table>
### Manipulation Gripper Controllers
These are controllers specifically meant for robots with manipulation capabilities, and are intended to control a robot's end-effector gripper
<table markdown="span" width="100%">
<tr>
<td valign="top" width="100%">
[**`MultiFingerGripperController`**](../reference/controllers/multi_finger_gripper_controller.html)<br><br>
Commands a robot's gripper joints, with behavior defined via `mode`. By default, <closed, open> is assumed to correspond to <q_lower_limit, q_upper_limit> for each joint, though this can be manually set via the `closed_qpos` and `open_qpos` arguments.<br><br>
<ul>
<li>_Command Dim_: 1 / n_gripper_joints</li>
<li>_Command Description_: desired gripper command, depending on `mode`: <ul>
<li>`binary`: 1DOF `[open / close]` binary command, where >0 corresponds to open unless `inverted` is set, in which case <0 corresponds to open</li>
<li>`smooth`: 1DOF `[q]` command, which gets broadcasted across all finger joints</li>
<li>`independent`: NDOF `[q_0, ..., q_n]` per-finger joint commands</li>
</ul></li>
<li>_Control Dim_: n_gripper_joints</li>
<li>_Control Type_: position / velocity / effort</li>
</ul>
</td>
</tr>
</table>
| 12,148 | Markdown | 66.871508 | 616 | 0.670975 |
StanfordVL/OmniGibson/docs/modules/object.md | ---
icon: material/food-apple-outline
---
# 🍎 **Object**
Objects, such as furnitures, are essential to building manipulation environments. We designed the MujocoObject interfaces to standardize and simplify the procedure for importing 3D models into the scene or procedurally generate new objects. MuJoCo defines models via the MJCF XML format. These MJCF files can either be stored as XML files on disk and loaded into simulator, or be created on-the-fly by code prior to simulation.
## Usage
### Importing Objects
Objects can be added to a given `Environment` instance by specifying them in the config that is passed to the environment constructor via the `objects` key. This is expected to be a list of dictionaries, each of which specifies the desired configuration for a single object to be created. For each dict, the `type` key is required and specifies the desired object class, and global `position` and `orientation` (in (x,y,z,w) quaternion form) can also be specified. Additional keys can be specified and will be passed directly to the specific robot class constructor. An example of a robot configuration is shown below in `.yaml` form:
??? code "single_object_config_example.yaml"
``` yaml linenums="1"
robots:
- type: USDObject
name: some_usd_object
usd_path: your_path_to_model.usd
visual_only: False
position: [0, 0, 0]
orientation: [0, 0, 0, 1]
scale: [0.5, 0.6, 0.7]
```
`OmniGibson` supports 6 types of objects shown as follows:
- `ControllableObject`: This class represents objects that can be controller through joint controllers. It is used as the parent class of the robot classes and provide functionalities to apply control actions to the objects. In general, users should not create object of this class, but rather directly spawn the desired robot type in the `robots` section of the config.
- `StatefulObject`: This class represents objects that comes with object states. For more information regarding object states please take a look at `object_states`. This is also meant to be a parent class, and should generally not be instantiated directly.
- `PrimitiveObject`: This class represents primitive shape objects (Cubes, Spheres, Cones, etc.) This are usually used as visual objects in the scene. For example, users can instantiate a sphere object to visualize the target location of a robot reaching task, and set it's property `visual_only` to true to disable it's kinematics and collision with other objects.
- `LightObject`: This class specifically represents lights in the scene, and provide funtionalities to modify the properties of lights. There are 6 types of lights users can instantiate in OmniGibson, cylinder light, disk light, distant light, dome light, geometry ligtht, rectangle light, and sphere light. Users can choose whichever type of light that works for the best, and set the `intensity` property to control the brightness of it.
- `USDObject`: This class represents objects loaded through a USD file. This is useful when users want to load a custom USD asset into the simulator. Users should specify the `usd_path` parameter of the `USDObject` in order to load the desired file of their choice.
- `DatasetObject`: This class inherits from `USDObject` and represents object from the OmniGibson dataset. Users should specify the category of objects they want to load, as well as the model id, which is a 6 character string unique to each dataset object. For the possible categories and models, please refer to our [Knowledgebase Dashboard](https://behavior.stanford.edu/knowledgebase/)
### Runtime
Usually, objects are instantiated upon startup. We can modify certain properties of the object when the simulator is running. For example, one might desire to teleop the object from one place to another, then simply call `object.set_position_orientation(new_pos, new_orn)` will do the job. Another example might be to highlight an object by setting `object.highlighed = True`, the object when then be highlighted in pick in the scene.
To access the objects from the environment, one can call `env.scene.object_registry`. Here are a couple examples:
- `env.scene.object_registry("name", OBJECT_NAME): get the object by its name
- `env.scene.object_registry("category", CATEGORY): get the object by its category
- `env.scene.object_registry("prim_path", PRIM_PATH): get the object by its prim path
| 4,438 | Markdown | 79.709089 | 634 | 0.770392 |
StanfordVL/OmniGibson/docs/modules/robots.md | ---
icon: material/robot-outline
---
# 🤖 **Robots**
## Description
In **`OmniGibson`**, `Robot`s define agents that can interact with other objects in a given environment. Each robot can _interact_ by deploying joint
commands via its set of [`Controller`](./controllers.md)s, and can _perceive_ its surroundings via its set of [`Sensor`](./sensor.md)s.
**`OmniGibson`** supports both navigation and manipulation robots, and allows for modular specification of individual controllers for controlling the different components of a given robot. For example, the `Fetch` robot is a mobile manipulator composed of a mobile (two-wheeled) base, two head joints, a trunk, seven arm joints, and two gripper finger joints. `Fetch` owns 4 controllers, one for controlling the base, the head, the trunk + arm, and the gripper. There are multiple options for each controller depending on the desired action space. For more information, check out our [robot examples](../getting_started/examples.md#robots).
It is important to note that robots are full-fledged `StatefulObject`s, and thus leverage the same APIs as normal scene objects and can be treated as such. Robots can be thought of as `StatefulObject`s that additionally own controllers (`robot.controllers`) and sensors (`robot.sensors`).
## Usage
### Importing
Robots can be added to a given `Environment` instance by specifying them in the config that is passed to the environment constructor via the `robots` key. This is expected to be a list of dictionaries, where each dictionary specifies the desired configuration for a single robot to be created. For each dict, the `type` key is required and specifies the desired robot class, and global `position` and `orientation` (in (x,y,z,w) quaternion form) can also be specified. Additional keys can be specified and will be passed directly to the specific robot class constructor. An example of a robot configuration is shown below in `.yaml` form:
??? code "single_fetch_config_example.yaml"
``` yaml linenums="1"
robots:
- type: Fetch
position: [0, 0, 0]
orientation: [0, 0, 0, 1]
obs_modalities: [scan, rgb, depth]
scale: 1.0
self_collision: false
action_normalize: true
action_type: continuous
grasping_mode: physical
rigid_trunk: false
default_trunk_offset: 0.365
default_arm_pose: diagonal30
reset_joint_pos: tuck
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController
arm_0:
name: InverseKinematicsController
kv: 2.0
gripper_0:
name: MultiFingerGripperController
mode: binary
camera:
name: JointController
use_delta_commands: False
```
### Runtime
Usually, actions are passed to robots and observations retrieved via the `obs, info, reward, done = env.step(action)`. However, actions can be directly deployed and observations retrieved from the robot using the following APIs:
<div class="annotate" markdown>
- **Applying actions**: `robot.apply_action(action)` (1)
- **Retrieving observations**: `obs, info = robot.get_obs()` (2)
</div>
1. `action` is a 1D-numpy array. For more information, please see the [Controller](./controllers.md) section!
2. `obs` is a dict mapping observation name to observation data, and `info` is a dict of relevant metadata about the observations. For more information, please see the [Sensor](./sensor.md) section!
Controllers and sensors can be accessed directly via the `controllers` and `sensors` properties, respectively. And, like all objects in **`OmniGibson`**, common information such as joint data and object states can also be directly accessed from the `robot` class.
## Models
**`OmniGibson`** currently supports 9 robots, consisting of 4 mobile robots, 2 manipulation robots, 2 mobile manipulation robots, and 1 anthropomorphic "robot" (a bimanual agent proxy used for VR teleoperation). Below, we provide a brief overview of each model:
### Mobile Robots
These are navigation-only robots (an instance of [`LocomotionRobot`](../reference/robots/locomotion_robot.html)) that solely consist of a base that can move.
<table markdown="span">
<tr>
<td valign="top" width="60%">
[**`Turtlebot`**](../reference/robots/turtlebot.html)<br><br>
The two-wheeled <a href="https://www.turtlebot.com/turtlebot2/">Turtlebot 2</a> model with the Kobuki base.<br><br>
<ul>
<li>_Controllers_: Base</li>
<li>_Sensors_: Camera, LIDAR</li>
</ul>
</td>
<td>
<img src="../assets/robots/Turtlebot.png" alt="rgb">
</td>
</tr>
<tr>
<td valign="top" width="60%">
[**`Locobot`**](../reference/robots/locobot.html)<br><br>
The two-wheeled, open-source <a href="http://www.locobot.org/">LoCoBot</a> model.<br><br> Note that in our model the arm is disabled and is fixed to the base.<br><br>
<ul>
<li>_Controllers_: Base</li>
<li>_Sensors_: Camera, LIDAR</li>
</ul>
</td>
<td>
<img src="../assets/robots/Locobot.png" alt="rgb">
</td>
</tr>
<tr>
<td valign="top" width="60%">
[**`Husky`**](../reference/robots/husky.html)<br><br>
The four-wheeled <a href="https://clearpathrobotics.com/husky-unmanned-ground-vehicle-robot/">Husky UAV</a> model from Clearpath Robotics.<br><br>
<ul>
<li>_Controllers_: Base</li>
<li>_Sensors_: Camera, LIDAR</li>
</ul>
</td>
<td>
<img src="../assets/robots/Husky.png" alt="rgb">
</td>
</tr>
<tr>
<td valign="top" width="60%">
[**`Freight`**](../reference/robots/freight.html)<br><br>
The two-wheeled <a href="https://docs.fetchrobotics.com/">Freight</a> model which serves as the base for the Fetch robot.<br><br>
<ul>
<li>_Controllers_: Base</li>
<li>_Sensors_: Camera, LIDAR</li>
</ul>
</td>
<td>
<img src="../assets/robots/Freight.png" alt="rgb">
</td>
</tr>
</table>
### Manipulation Robots
These are manipulation-only robots (an instance of [`ManipulationRobot`](../reference/robots/manipulation_robot.html)) that cannot move and solely consist of an actuated arm with a gripper attached to its end effector.
<table markdown="span">
<tr>
<td valign="top" width="60%">
[**`Franka`**](../reference/robots/franka.html)<br><br>
The popular 7-DOF <a href="https://franka.de/">Franka Research 3</a> model equipped with a parallel jaw gripper. Note that OmniGibson also includes two alternative versions of Franka: FrankaAllegro (equipped with an Allegro hand) and FrankaLeap (equipped with a Leap hand).<br><br>
<ul>
<li>_Controllers_: Arm, Gripper</li>
<li>_Sensors_: Wrist Camera</li>
</ul>
</td>
<td>
<img src="../assets/robots/FrankaPanda.png" alt="rgb">
</td>
</tr>
<tr>
<td valign="top" width="60%">
[**`VX300S`**](../reference/robots/vx300s.html)<br><br>
The 6-DOF <a href="https://www.trossenrobotics.com/viperx-300-robot-arm-6dof.aspx">ViperX 300 6DOF</a> model from Trossen Robotics equipped with a parallel jaw gripper.<br><br>
<ul>
<li>_Controllers_: Arm, Gripper</li>
<li>_Sensors_: Wrist Camera</li>
</ul>
</td>
<td>
<img src="../assets/robots/VX300S.png" alt="rgb">
</td>
</tr>
</table>
### Mobile Manipulation Robots
These are robots that can both navigate and manipulate (and inherit from both [`LocomotionRobot`](../reference/robots/locomotion_robot.html) and [`ManipulationRobot`](../reference/robots/manipulation_robot.html)), and are equipped with both a base that can move as well as one or more gripper-equipped arms that can actuate.
<table markdown="span">
<tr>
<td valign="top" width="60%">
[**`Fetch`**](../reference/robots/fetch.html)<br><br>
The <a href="https://docs.fetchrobotics.com/">Fetch</a> model, composed of a two-wheeled base, linear trunk, 2-DOF head, 7-DOF arm, and 2-DOF parallel jaw gripper.<br><br>
<ul>
<li>_Controllers_: Base, Head, Arm, Gripper</li>
<li>_Sensors_: Head Camera, LIDAR</li>
</ul>
</td>
<td>
<img src="../assets/robots/Fetch.png" alt="rgb">
</td>
</tr>
<tr>
<td valign="top" width="60%">
[**`Tiago`**](../reference/robots/tiago.html)<br><br>
The bimanual <a href="https://pal-robotics.com/robots/tiago/">Tiago</a> model from PAL robotics, composed of a holonomic base (which we model as a 3-DOF (x,y,rz) set of joints), linear trunk, 2-DOF head, x2 7-DOF arm, and x2 2-DOF parallel jaw grippers.<br><br>
<ul>
<li>_Controllers_: Base, Head, Left Arm, Right Arm, Left Gripper, Right Gripper</li>
<li>_Sensors_: Head Camera, Rear LIDAR, Front LIDAR</li>
</ul>
</td>
<td>
<img src="../assets/robots/Tiago.png" alt="rgb">
</td>
</tr>
</table>
### Additional Robots
<table markdown="span">
<tr>
<td valign="top" width="60%">
[**`BehaviorRobot`**](../reference/robots/behavior_robot.html#robots.behavior_robot.BehaviorRobot)<br><br>
A hand-designed model intended to be used exclusively for VR teleoperation.<br><br>
<ul>
<li>_Controllers_: Base, Head, Left Arm, Right Arm, Left Gripper, Right Gripper</li>
<li>_Sensors_: Head Camera</li>
</ul>
</td>
<td>
<img src="../assets/robots/BehaviorRobot.png" alt="rgb">
</td>
</tr>
</table>
| 10,379 | Markdown | 45.756757 | 640 | 0.613161 |
StanfordVL/OmniGibson/docs/modules/prim.md | ---
icon: material/cube-outline
---
# 🧱 **Prim**
A Prim, short for "primitive," is a fundamental building block of a scene, representing an individual object or entity within the scene's hierarchy. It is essentially a container that encapsulates data, attributes, and relationships, allowing it to represent various scene components like models, cameras, lights, or groups of prims. These prims are systematically organized into a hierarchical framework, creating a scene graph that depicts the relationships and transformations between objects.
Every prim is uniquely identified by a path, which serves as a locator within the scene graph. This path includes the names of all parent prims leading up to it. For example, a prim's path might be `/World/robot0/gripper_link`, indicating that the `gripper_link` is a child of `robot0`.
Additionally, prims carry a range of attributes, including position, rotation, scale, and material properties. These attributes define the properties and characteristics of the objects they represent.
| 1,040 | Markdown | 79.076917 | 496 | 0.796154 |
StanfordVL/OmniGibson/docs/modules/sensor.md | ---
icon: material/camera-outline
---
# 📷 **Sensor**
## Description
Sensors play a crucial role in OmniGibson, as they facilitate the robots' observation of their environment. We offer two main classes of sensors:
- `ScanSensor`: This includes a 2D LiDAR range sensor and an occupancy grid sensor.
- `VisionSensor`: This sensor type features a camera equipped with various modalities, including RGB, depth, normals, three types of segmentation, optical flow, 2D and 3D bounding boxes.
## Usage
To obtain sensor readings, the `get_obs()` function can be invoked at multiple levels within our hierarchy:
- From `Environment`: Provides
1. All observations from all robots
2. All task-related observations
3. Observations from external sensors, if available
- From `Robot`: Provides
1. Readings from all sensors associated with the robot
2. Proprioceptive observations for the robot (e.g., base pose, joint position, joint velocity)
- From `Sensor`: Delivers all sensor readings based on the sensor's modalities. Additionally, our API allows for the simulation of real-world sensor behaviors by:
1. Adding noise
2. Dropping out sensor values to emulate missing data in sensor readings
Besides the actual data, `get_obs()` also returns a secondary dictionary containing information about the data, such as segmentation labels for vision sensors.
For instance, calling `get_obs()` on an environment with a single robot, which has all modalities enabled, might produce results similar to this:
<details>
<summary>Click to see code!</summary>
<pre><code>
data:
{
"robot0": {
"robot0:laser_link:Lidar:0": {
"scan": np.array(...),
"occupancy_grid": np.array(...)
},
"robot0:eyes:Camera:0": {
"rgb": np.array(...),
"depth": np.array(...),
"depth_linear": np.array(...),
"normal": np.array(...),
"flow": np.array(...),
"bbox_2d_tight": np.array(...),
"bbox_2d_loose": np.array(...),
"bbox_3d": np.array(...),
"seg_semantic": np.array(...),
"seg_instance": np.array(...),
"seg_instance_id": np.array(...)
},
"proprio": np.array(...)
}
"task": {
"low_dim": np.array(...)
}
}
info:
{
'robot0': {
'robot0:laser_link:Lidar:0': {},
'robot0:eyes:Camera:0': {
'seg_semantic': {'298104422': 'object', '764121901': 'background', '2814990211': 'agent'},
'seg_instance': {...},
'seg_instance_id': {...}
},
'proprio': {}
}
}
</code></pre>
</details>
## Observations
### Vision Sensor
<table markdown="span">
<tr>
<td valign="top" width="60%">
<strong>RGB</strong><br><br>
RGB image of the scene from the camera perspective.<br><br>
Size: (height, width, 4), numpy.uint8<br><br>
</td>
<td>
<img src="../assets/sensor_asset/rgb.png" alt="rgb">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Depth</strong><br><br>
Distance between the camera and everything else in the scene.<br><br>
Size: (height, width), numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/depth.png" alt="Depth Map">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Depth Linear</strong><br><br>
Distance between the camera and everything else in the scene, where distance measurement is linearly proportional to the actual distance.<br><br>
Size: (height, width), numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/depth_linear.png" alt="Depth Map Linear">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Normal</strong><br><br>
Surface normals - vectors perpendicular to the surface of objects in the scene.<br><br>
Size: (height, width, 4), numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/normal.png" alt="Normal">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Semantic Segmentation</strong><br><br>
Each pixel is assigned a label, indicating the object category it belongs to (e.g., table, chair).<br><br>
Size: (height, width), numpy.uint32<br><br>
We also provide a dictionary containing the mapping of semantic IDs to object categories. You can get this here: <br><br>
from omnigibson.utils.constants import semantic_class_id_to_name
</td>
<td>
<img src="../assets/sensor_asset/seg_semantic.png" alt="Semantic Segmentation">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Instance Segmentation</strong><br><br>
Each pixel is assigned a label, indicating the specific object instance it belongs to (e.g., table1, chair2).<br><br>
Size: (height, width), numpy.uint32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/seg_instance.png" alt="Instance Segmentation">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Instance Segmentation ID</strong><br><br>
Each pixel is assigned a label, indicating the specific object instance it belongs to (e.g., /World/table1/visuals, /World/chair2/visuals).<br><br>
Size: (height, width), numpy.uint32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/seg_instance_id.png" alt="Instance Segmentation ID">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Optical Flow</strong><br><br>
Optical flow - motion of pixels belonging to objects caused by the relative motion between the camera and the scene.<br><br>
Size: (height, width, 4), numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/optical_flow.png" alt="Optical Flow">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>2D Bounding Box Tight</strong><br><br>
2D bounding boxes wrapping individual objects, excluding any parts that are occluded.<br><br>
Size: a list of <br>
semanticID, numpy.uint32;<br>
x_min, numpy.int32;<br>
y_min, numpy.int32;<br>
x_max, numpy.int32;<br>
y_max, numpy.int32;<br>
occlusion_ratio, numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/bbox_2d_tight.png" alt="2D Bounding Box Tight">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>2D Bounding Box Loose</strong><br><br>
2D bounding boxes wrapping individual objects, including occluded parts.<br><br>
Size: a list of <br>
semanticID, numpy.uint32;<br>
x_min, numpy.int32;<br>
y_min, numpy.int32;<br>
x_max, numpy.int32;<br>
y_max, numpy.int32;<br>
occlusion_ratio, numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/bbox_2d_loose.png" alt="2D Bounding Box Loose">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>3D Bounding Box</strong><br><br>
3D bounding boxes wrapping individual objects.<br><br>
Size: a list of <br>
semanticID, numpy.uint32;<br>
x_min, numpy.float32;<br>
y_min, numpy.float32;<br>
z_min, numpy.float32;<br>
x_max, numpy.float32;<br>
y_max, numpy.float32;<br>
z_max, numpy.float32;<br>
transform (4x4), numpy.float32;<br>
occlusion_ratio, numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/bbox_3d.png" alt="3D Bounding Box">
</td>
</tr>
</table>
### Range Sensor
<table markdown="span">
<tr>
<td valign="top" width="60%">
<strong>2D LiDAR</strong><br><br>
Distances to surrounding objects by emitting laser beams and detecting the reflected light.<br><br>
Size: # of rays, numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/lidar.png" alt="2D LiDAR">
</td>
</tr>
<tr>
<td valign="top" width="60%">
<strong>Occupancy Grid</strong><br><br>
A representation of the environment as a 2D grid where each cell indicates the presence (or absence) of an obstacle.<br><br>
Size: (grid resolution, grid resolution), numpy.float32<br><br>
</td>
<td>
<img src="../assets/sensor_asset/occupancy_grid.png" alt="Occupancy Grid">
</td>
</tr>
</table>
### Proprioception
<table markdown="span">
<tr>
<td valign="top" width="100%">
<strong>Joint Positions</strong><br><br>
Joint positions.<br><br>
Size: # of joints, numpy.float64<br><br>
</td>
<td>
</td>
</tr>
<tr>
<td valign="top" width="100%">
<strong>Joint Velocities</strong><br><br>
Joint velocities.<br><br>
Size: # of joints, numpy.float64<br><br>
</td>
<td>
</td>
</tr>
<tr>
<td valign="top" width="100%">
<strong>Joint Efforts</strong><br><br>
Torque measured at each joint.<br><br>
Size: # of joints, numpy.float64<br><br>
</td>
<td>
</td>
</tr>
<tr>
<td valign="top" width="100%">
<strong>Robot Position</strong><br><br>
Robot position in the world frame.<br><br>
Size: (x, y, z), numpy.float64<br><br>
</td>
<td>
</td>
</tr>
<tr>
<td valign="top" width="100%">
<strong>Robot Orientation</strong><br><br>
Robot global euler orientation.<br><br>
Size: (roll, pitch, yaw), numpy.float64<br><br>
</td>
<td>
</td>
</tr>
<tr>
<td>
<strong>Robot 2D Orientation</strong><br><br>
Robot orientation on the XY plane of the world frame.<br><br>
Size: angle, numpy.float64<br><br>
</td>
<td>
</td>
</tr>
<tr>
<td valign="top" width="100%">
<strong>Robot Linear Velocity</strong><br><br>
Robot linear velocity.<br><br>
Size: (x_vel, y_vel, z_vel), numpy.float64<br><br>
</td>
</tr>
<tr>
<td valign="top" width="100%">
<strong>Robot Angular Velocity</strong><br><br>
Robot angular velocity.<br><br>
Size: (x_vel, y_vel, z_vel), numpy.float64<br><br>
</td>
<td>
</td>
</tr>
</table>
### Task Observation
<table markdown="span" style="width: 100%;">
<tr>
<td valign="top" width="100%">
<strong>Low-dim task observation</strong><br><br>
Task-specific observation, e.g. navigation goal position.<br><br>
Size: # of low-dim observation, numpy.float64<br><br>
</td>
<td>
</td>
</tr>
</table>
| 11,560 | Markdown | 33.822289 | 187 | 0.54109 |
StanfordVL/OmniGibson/docs/modules/overview.md | ---
icon: material/graph-outline
---
# **Overview**
<figure markdown="span">
{ width="100%" }
</figure>
**`OmniGibson`**'s framework provides **modular APIs** for (a) quickly interacting with different components of a created environment and (b) prototyping and developing custom environments.
**`OmniGibson`** is built upon NVIDIA's [IsaacSim](https://docs.omniverse.nvidia.com/isaacsim/latest/index.html), a powerful simulation platform that uses [PhysX](https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/index.html) as the physics backend.
We build upon IsaacSim's `Simulator` interface to construct our `Environment` class, which is an [OpenAI gym-compatible](https://gymnasium.farama.org/content/gym_compatibility/) interface and the main entry point into **`OmniGibson`**. An `Environment` instance generally consists of the following:
- A [`Scene`](./scene.md) instance, which by default is a "dummy" (empty) or a full-populated (`InteractiveTraversableScene`) instance,
- A [`BaseTask`](./task.md) instance, which can range from a complex `BehaviorTask`, navigation `PointNavigationTask`, or no-op `DummyTask`,
- Optionally, one or more [`BaseRobot`](./robots.md)s, which define the action space for the given environment instance,
- Optionally, one or more additional [`BaseObject`](./object.md)s, which are additional object models not explicitly defined in the environment's scene
The above figure describes **`OmniGibson`**'s simulation loop:
1. **Action Execution:** An externally defined `action` is passed to `Robot` instances in the `Environment`, which is processed by each robot's own set of `Controller`s and converted into low-level joint commands that are then deployed on the robot.
2. **Simulation Stepping:** The simulator takes at least one (and potentially multiple) physics steps, updating its internal state.
3. **Observation Retrieval:** Sensors on each `Robot` instance grab observations from the updated simulator state, and the loaded `Task` instance also computes its task-relevant observations and updates its internal state. The observations as well as task-relevant data is then returned from the `Environment` instance.
Each of the modules in **`OmniGibson`** can be extended by the user, and allow for custom subclass implementations to be used without needing to directly modify **`OmniGibson`** source code. This section provides high-level overviews of each of the modules, as well as general insight into the purpose and intended use-cases of each module.
| 2,584 | Markdown | 88.137928 | 340 | 0.772446 |
StanfordVL/OmniGibson/docs/modules/environment.md | ---
icon: material/earth
---
# 🌎 **Environment**
The OpenAI Gym Environment serves as a top-level simulation object, offering a suite of common interfaces. These include methods such as `step`, `reset`, `render`, and properties like `observation_space` and `action_space`. The OmniGibson Environment builds upon this foundation by also supporting the loading of scenes, robots, and tasks. Following the OpenAI Gym interface, the OmniGibson environment further provides access to both the action space and observation space of the robots and external sensors.
Creating a minimal environment requires the definition of a config dictionary. This dictionary should contain details about the scene, objects, robots, and specific characteristics of the environment:
<details>
<summary>Click to see code!</summary>
<pre><code>
import omnigibson as og
cfg = {
"env": {
"action_frequency": 10,
"physics_frequency": 120,
},
"scene": {
"type": "Scene",
},
"objects": [],
"robots": [
{
"type": "Fetch",
"obs_modalities": 'all',
"controller_config": {
"arm_0": {
"name": "NullJointController",
"motor_type": "position",
},
},
}
]
}
env = og.Environment(configs=cfg)
</code></pre>
</details>
| 1,373 | Markdown | 30.953488 | 509 | 0.631464 |
StanfordVL/OmniGibson/docs/tutorials/demo_collection.md | ---
icon: octicons/rocket-16
---
# 🕹️ **Collecting Demonstrations**
## Devices
I/O Devices can be used to read user input and teleoperate simulated robots in real-time. OmniGibson leverages [TeleMoMa](https://robin-lab.cs.utexas.edu/telemoma-web/), a modular and versatile library for manipulating mobile robots in the scene. This is achieved by using devies such as keyboards, SpaceMouse, cameras, VR devices, mobile phones, or any combination thereof. More generally, we support any interface that implements the `telemoma.human_interface.teleop_core.BaseTeleopInterface` class. In order to support your own custom device, simply subclass this base class and implement the required methods. For more information on this, checkout the [TeleMoMa codebase](https://github.com/UT-Austin-RobIn/telemoma).
## Teleoperation
The following section will go through `robot_teleoperation_example.py`, which lets users to choose a robot to complete a simple pick and place task. Users are also encouraged to take a look at `vr_simple_demo.py`, which show how to actually render to VR headset and teleoperate `BehaviorRobot` with VR controllers (HTC VIVE).
We assume that we already have the scene and task setup. To initialize a teleoperation system, we first need to specify the configuration for it.
After the config simply instantiate teh teleoperation system.
```
teleop_sys = TeleopSystem(config=teleop_config, robot=robot, show_control_marker=True)
```
`TeleopSystem` takes in the config dictionary, which we just created. It also takes in the robot instance we want to teleoperate, as well as `show_control_marker`, which if set to `True`, will also create a green visual marker indicates the desired pose of the robot end effector that the user wants to robot to go.
After the `TeleopSystem` is created, start by calling
```
teleop_sys.start()
```
Then, within the simulation loop, simply call
```
action = teleop_sys.get_action(teleop_sys.get_obs())
```
to get the action based on the user teleoperation input, and pass the action to the `env.step` function.
## (Optional) Saving and Loading Simulation State
You can save the current state of the simulator to a json file by calling `save`:
```
og.sim.save(JSON_PATH)
```
To restore any saved state, simply call `restore`
```
og.sim.restore(JSON_PATH)
```
Alternatively, if you just want to save all the scene and objects info at the current tiemframe, you can also call `self.scene.dump_state(serialized=True)`, which will return a numpy array containing all the relavant information. You can then stack the array together to get the full trajectory of states.
| 2,630 | Markdown | 49.596153 | 722 | 0.774525 |
StanfordVL/OmniGibson/docs/miscellaneous/known_issues.md | # **Known Issues & Troubleshooting**
## 🤔 **Known Issues**
??? question "How can I parallelize running multiple scenes in OmniGibson?"
Currently, to run multiple scenes in parallel, you will need to launch separate instances of the OmniGibson environment. While this introduces some overhead due to running multiple instances of IsaacSim, we are actively working on implementing parallelization capabilities. Our goal is to enable running multiple scenes within a single instance, streamlining the process and reducing the associated overhead.
## 🧯 **Troubleshooting**
??? question "I cannot open Omniverse Launcher AppImage on Linux"
You probably need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage.
??? question "OmniGibson is stuck at `HydraEngine rtx failed creating scene renderer.`"
`OmniGibson` is likely using an unsupported GPU (default is id 0). Run `nvidia-smi` to see the active list of GPUs, and select an NVIDIA-supported GPU and set its corresponding ID when running `OmniGibson` with `export OMNIGIBSON_GPU_ID=<ID NUMBER>`. | 1,120 | Markdown | 64.941173 | 412 | 0.776786 |
StanfordVL/OmniGibson/docs/miscellaneous/contact.md | # **Contact**
If you have any questions, comments, or concerns, please feel free to reach out to use by joining our Discord server:
<a href="https://discord.gg/bccR5vGFEx"><img src="https://discordapp.com/api/guilds/1166422812160966707/widget.png?style=banner3"></a> | 268 | Markdown | 52.799989 | 134 | 0.757463 |
StanfordVL/OmniGibson/docs/getting_started/examples.md | ---
icon: material/laptop
---
# 💻 **Examples**
**`OmniGibson`** ships with many demo scripts highlighting its modularity and diverse feature set intended as a set of building blocks enabling your research. Let's try them out!
***
## ⚙️ **A quick word about macros**
??? question annotate "Why macros?"
Macros enforce global behavior that is consistent within an individual python process but can differ between processes. This is useful because globally enabling all of **`OmniGibson`**'s features can cause unnecessary slowdowns, and so configuring the macros for your specific use case can optimize performance.
For example, Omniverse provides a so-called `flatcache` feature which provides significant performance boosts, but cannot be used when fluids or soft bodies are present. So, we ideally should always have `gm.USE_FLATCACHE=True` unless we have fluids or soft bodies in our environment.
`macros` define a globally available set of magic numbers or flags set throughout **`OmniGibson`**. These can either be directly set in `omnigibson.macros.py`, or can be programmatically modified at runtime via:
```{.python .annotate}
from omnigibson.macros import gm, macros
gm.<GLOBAL_MACRO> = <VALUE> # (1)!
macros.<OG_DIRECTORY>.<OG_MODULE>.<MODULE_MACRO> = <VALUE> # (2)!
```
1. `gm` refers to the "global" macros -- i.e.: settings that generally impact the entire **`OmniGibson`** stack. These are usually the only settings you may need to modify.
2. `macros` captures all remaining macros defined throughout **`OmniGibson`**'s codebase -- these are often hardcoded default settings or magic numbers defined in a specific module. These can also be overridden, but we recommend inspecting the module first to understand how it is used.
Many of our examples set various `macros` settings at the beginning of the script, and is a good way to understand use cases for modifying them!
***
## 🌎 **Environments**
These examples showcase the full **`OmniGibson`** stack in use, and the types of environments immediately supported.
### **BEHAVIOR Task Demo**
!!! abstract "This demo is useful for..."
* Understanding how to instantiate a BEHAVIOR task
* Understanding how a pre-defined configuration file is used
```{.python .annotate}
python -m omnigibson.examples.environments.behavior_env_demo
```
This demo instantiates one of our BEHAVIOR tasks (and optionally sampling object locations online) in a fully-populated scene and loads a `Fetch` robot. The robot executes random actions and the environment is reset periodically.
??? code "behavior_env_demo.py"
``` py linenums="1"
--8<-- "examples/environments/behavior_env_demo.py"
```
### **Navigation Task Demo**
!!! abstract "This demo is useful for..."
* Understanding how to instantiate a navigation task
* Understanding how a pre-defined configuration file is used
```{.python .annotate}
python -m omnigibson.examples.environments.navigation_env_demo
```
This demo instantiates one of our navigation tasks in a fully-populated scene and loads a `Turtlebot` robot. The robot executes random actions and the environment is reset periodically.
??? code "navigation_env_demo.py"
``` py linenums="1"
--8<-- "examples/environments/navigation_env_demo.py"
```
## 🧑🏫 **Learning**
These examples showcase how **`OmniGibson`** can be used to train embodied AI agents.
### **Reinforcement Learning Demo**
!!! abstract "This demo is useful for..."
* Understanding how to hook up **`OmniGibson`** to an external algorithm
* Understanding how to train and evaluate a policy
```{.python .annotate}
python -m omnigibson.examples.learning.navigation_policy_demo
```
This demo loads a BEHAVIOR task with a `Fetch` robot, and trains / evaluates the agent using [Stable Baseline3](https://stable-baselines3.readthedocs.io/en/master/)'s PPO algorithm.
??? code "navigation_policy_demo.py"
``` py linenums="1"
--8<-- "examples/learning/navigation_policy_demo.py"
```
## 🏔️ **Scenes**
These examples showcase how to leverage **`OmniGibson`**'s large-scale, diverse scenes shipped with the BEHAVIOR dataset.
### **Scene Selector Demo**
!!! abstract "This demo is useful for..."
* Understanding how to load a scene into **`OmniGibson`**
* Accessing all BEHAVIOR dataset scenes
```{.python .annotate}
python -m omnigibson.examples.scenes.scene_selector
```
This demo lets you choose a scene from the BEHAVIOR dataset, loads it along with a `Turtlebot` robot, and cycles the resulting environment periodically.
??? code "scene_selector.py"
``` py linenums="1"
--8<-- "examples/scenes/scene_selector.py"
```
### **Scene Tour Demo**
!!! abstract "This demo is useful for..."
* Understanding how to load a scene into **`OmniGibson`**
* Understanding how to generate a trajectory from a set of waypoints
```{.python .annotate}
python -m omnigibson.examples.scenes.scene_tour_demo
```
This demo lets you choose a scene from the BEHAVIOR dataset. It allows you to move the camera using the keyboard, select waypoints, and then programmatically generates a video trajectory from the selected waypoints
??? code "scene_tour_demo.py"
``` py linenums="1"
--8<-- "examples/scenes/scene_tour_demo.py"
```
### **Traversability Map Demo**
!!! abstract "This demo is useful for..."
* Understanding how to leverage traversability map information from BEHAVIOR dataset scenes
```{.python .annotate}
python -m omnigibson.examples.scenes.traversability_map_example
```
This demo lets you choose a scene from the BEHAVIOR dataset, and generates its corresponding traversability map.
??? code "traversability_map_example.py"
``` py linenums="1"
--8<-- "examples/scenes/traversability_map_example.py"
```
## 🍎 **Objects**
These examples showcase how to leverage objects in **`OmniGibson`**.
### **Load Object Demo**
!!! abstract "This demo is useful for..."
* Understanding how to load an object into **`OmniGibson`**
* Accessing all BEHAVIOR dataset asset categories and models
```{.python .annotate}
python -m omnigibson.examples.objects.load_object_selector
```
This demo lets you choose a specific object from the BEHAVIOR dataset, and loads the requested object into an environment.
??? code "load_object_selector.py"
``` py linenums="1"
--8<-- "examples/objects/load_object_selector.py"
```
### **Object Visualizer Demo**
!!! abstract "This demo is useful for..."
* Viewing objects' textures as rendered in **`OmniGibson`**
* Viewing articulated objects' range of motion
* Understanding how to reference object instances from the environment
* Understanding how to set object poses and joint states
```{.python .annotate}
python -m omnigibson.examples.objects.visualize_object
```
This demo lets you choose a specific object from the BEHAVIOR dataset, and rotates the object in-place. If the object is articulated, it additionally moves its joints through its full range of motion.
??? code "visualize_object.py"
``` py linenums="1"
--8<-- "examples/objects/visualize_object.py"
```
### **Highlight Object**
!!! abstract "This demo is useful for..."
* Understanding how to highlight individual objects within a cluttered scene
* Understanding how to access groups of objects from the environment
```{.python .annotate}
python -m omnigibson.examples.objects.highlight_objects
```
This demo loads the Rs_int scene and highlights windows on/off repeatedly.
??? code "highlight_objects.py"
``` py linenums="1"
--8<-- "examples/objects/highlight_objects.py"
```
### **Draw Object Bounding Box Demo**
!!! abstract annotate "This demo is useful for..."
* Understanding how to access observations from a `GymObservable` object
* Understanding how to access objects' bounding box information
* Understanding how to dynamically modify vision modalities
*[GymObservable]: [`Environment`](../reference/envs/env_base.md), all sensors extending from [`BaseSensor`](../reference/sensors/sensor_base.md), and all objects extending from [`BaseObject`](../reference/objects/object_base.md) (which includes all robots extending from [`BaseRobot`](../reference/robots/robot_base.md)!) are [`GymObservable`](../reference/utils/gym_utils.md#utils.gym_utils.GymObservable) objects!
```{.python .annotate}
python -m omnigibson.examples.objects.draw_bounding_box
```
This demo loads a door object and banana object, and partially obscures the banana with the door. It generates both "loose" and "tight" bounding boxes (where the latter respects occlusions) for both objects, and dumps them to an image on disk.
??? code "draw_bounding_box.py"
``` py linenums="1"
--8<-- "examples/objects/draw_bounding_box.py"
```
## 🌡️ **Object States**
These examples showcase **`OmniGibson`**'s powerful object states functionality, which captures both individual and relational kinematic and non-kinematic states.
### **Slicing Demo**
!!! abstract "This demo is useful for..."
* Understanding how slicing works in **`OmniGibson`**
* Understanding how to access individual objects once the environment is created
```{.python .annotate}
python -m omnigibson.examples.object_states.slicing_demo
```
This demo spawns an apple on a table with a knife above it, and lets the knife fall to "cut" the apple in half.
??? code "slicing_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/slicing_demo.py"
```
### **Dicing Demo**
!!! abstract "This demo is useful for..."
* Understanding how to leverage the `Dicing` state
* Understanding how to enable objects to be `diceable`
```{.python .annotate}
python -m omnigibson.examples.object_states.dicing_demo
```
This demo loads an apple and a knife, and showcases how apple can be diced into smaller chunks with the knife.
??? code "dicing_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/dicing_demo.py"
```
### **Folded and Unfolded Demo**
!!! abstract "This demo is useful for..."
* Understanding how to load a softbody (cloth) version of a BEHAVIOR dataset object
* Understanding how to enable cloth objects to be `foldable`
* Understanding the current heuristics used for gauging a cloth's "foldness"
```{.python .annotate}
python -m omnigibson.examples.object_states.folded_unfolded_state_demo
```
This demo loads in three different cloth objects, and allows you to manipulate them while printing out their `Folded` state status in real-time. Try manipulating the object by holding down **`Shift`** and then **`Left-click + Drag`**!
??? code "folded_unfolded_state_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/folded_unfolded_state_demo.py"
```
### **Overlaid Demo**
!!! abstract "This demo is useful for..."
* Understanding how cloth objects can be overlaid on rigid objects
* Understanding current heuristics used for gauging a cloth's "overlaid" status
```{.python .annotate}
python -m omnigibson.examples.object_states.overlaid_demo
```
This demo loads in a carpet on top of a table. The demo allows you to manipulate the carpet while printing out their `Overlaid` state status in real-time. Try manipulating the object by holding down **`Shift`** and then **`Left-click + Drag`**!
??? code "overlaid_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/overlaid_demo.py"
```
### **Heat Source or Sink Demo**
!!! abstract "This demo is useful for..."
* Understanding how a heat source (or sink) is visualized in **`OmniGibson`**
* Understanding how dynamic fire visuals are generated in real-time
```{.python .annotate}
python -m omnigibson.examples.object_states.heat_source_or_sink_demo
```
This demo loads in a stove and toggles its `HeatSource` on and off, showcasing the dynamic fire visuals available in **`OmniGibson`**.
??? code "heat_source_or_sink_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/heat_source_or_sink_demo.py"
```
### **Temperature Demo**
!!! abstract "This demo is useful for..."
* Understanding how to dynamically sample kinematic states for BEHAVIOR dataset objects
* Understanding how temperature changes are propagated to individual objects from individual heat sources or sinks
```{.python .annotate}
python -m omnigibson.examples.object_states.temperature_demo
```
This demo loads in various heat sources and sinks, and places an apple within close proximity to each of them. As the environment steps, each apple's temperature is printed in real-time, showcasing **`OmniGibson`**'s rudimentary temperature dynamics.
??? code "temperature_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/temperature_demo.py"
```
### **Heated Demo**
!!! abstract "This demo is useful for..."
* Understanding how temperature modifications can cause objects' visual changes
* Understanding how dynamic steam visuals are generated in real-time
```{.python .annotate}
python -m omnigibson.examples.object_states.heated_state_demo
```
This demo loads in three bowls, and immediately sets their temperatures past their `Heated` threshold. Steam is generated in real-time from these objects, and then disappears once the temperature of the objects drops below their `Heated` threshold.
??? code "heated_state_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/heated_state_demo.py"
```
### **Onfire Demo**
!!! abstract "This demo is useful for..."
* Understanding how changing onfire state can cause objects' visual changes
* Understanding how onfire can be triggered by nearby onfire objects
```{.python .annotate}
python -m omnigibson.examples.object_states.onfire_demo
```
This demo loads in a stove (toggled on) and two apples. The first apple will be ignited by the stove first, then the second apple will be ignited by the first apple.
??? code "onfire_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/onfire_demo.py"
```
### **Particle Applier and Remover Demo**
!!! abstract "This demo is useful for..."
* Understanding how a `ParticleRemover` or `ParticleApplier` object can be generated
* Understanding how particles can be dynamically generated on objects
* Understanding different methods for applying and removing particles via the `ParticleRemover` or `ParticleApplier` object
```{.python .annotate}
python -m omnigibson.examples.object_states.particle_applier_remover_demo
```
This demo loads in a washtowel and table and lets you choose the ability configuration to enable the washtowel with. The washtowel will then proceed to either remove and generate particles dynamically on the table while moving.
??? code "particle_applier_remover_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/particle_applier_remover_demo.py"
```
### **Particle Source and Sink Demo**
!!! abstract "This demo is useful for..."
* Understanding how a `ParticleSource` or `ParticleSink` object can be generated
* Understanding how particles can be dynamically generated and destroyed via such objects
```{.python .annotate}
python -m omnigibson.examples.object_states.particle_source_sink_demo
```
This demo loads in a sink, which is enabled with both the ParticleSource and ParticleSink states. The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles, which is then destroyed ("sunk") by the sink's particle sink located at the drain.
??? note "Difference between `ParticleApplier/Removers` and `ParticleSource/Sinks`"
The key difference between `ParticleApplier/Removers` and `ParticleSource/Sinks` is that `Applier/Removers`
requires contact (if using `ParticleProjectionMethod.ADJACENCY`) or overlap
(if using `ParticleProjectionMethod.PROJECTION`) in order to spawn / remove particles, and generally only spawn
particles at the contact points. `ParticleSource/Sinks` are special cases of `ParticleApplier/Removers` that
always use `ParticleProjectionMethod.PROJECTION` and always spawn / remove particles within their projection volume,
irregardless of overlap with other objects.
??? code "particle_source_sink_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/particle_source_sink_demo.py"
```
### **Kinematics Demo**
!!! abstract "This demo is useful for..."
* Understanding how to dynamically sample kinematic states for BEHAVIOR dataset objects
* Understanding how to import additional objects after the environment is created
```{.python .annotate}
python -m omnigibson.examples.object_states.sample_kinematics_demo
```
This demo procedurally generates a mini populated scene, spawning in a cabinet and placing boxes in its shelves, and then generating a microwave on a cabinet with a plate and apples sampled both inside and on top of it.
??? code "sample_kinematics_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/sample_kinematics_demo.py"
```
### **Attachment Demo**
!!! abstract "This demo is useful for..."
* Understanding how to leverage the `Attached` state
* Understanding how to enable objects to be `attachable`
```{.python .annotate}
python -m omnigibson.examples.object_states.attachment_demo
```
This demo loads an assembled shelf, and showcases how it can be manipulated to attach and detach parts.
??? code "attachment_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/attachment_demo.py"
```
### **Object Texture Demo**
!!! abstract "This demo is useful for..."
* Understanding how different object states can result in texture changes
* Understanding how to enable objects with texture-changing states
* Understanding how to dynamically modify object states
```{.python .annotate}
python -m omnigibson.examples.object_states.object_state_texture_demo
```
This demo loads in a single object, and then dynamically modifies its state so that its texture changes with each modification.
??? code "object_state_texture_demo.py"
``` py linenums="1"
--8<-- "examples/object_states/object_state_texture_demo.py"
```
## 🤖 **Robots**
These examples showcase how to interact and leverage robot objects in **`OmniGibson`**.
### **Robot Visualizer Demo**
!!! abstract "This demo is useful for..."
* Understanding how to load a robot into **`OmniGibson`** after an environment is created
* Accessing all **`OmniGibson`** robot models
* Viewing robots' low-level joint motion
```{.python .annotate}
python -m omnigibson.examples.robots.all_robots_visualizer
```
This demo iterates over all robots in **`OmniGibson`**, loading each one into an empty scene and randomly moving its joints for a brief amount of time.
??? code "all_robots_visualizer.py"
``` py linenums="1"
--8<-- "examples/robots/all_robots_visualizer.py"
```
### **Robot Control Demo**
!!! abstract "This demo is useful for..."
* Understanding how different controllers can be used to control robots
* Understanding how to teleoperate a robot through external commands
```{.python .annotate}
python -m omnigibson.examples.robots.robot_control_example
```
This demo lets you choose a robot and the set of controllers to control the robot, and then lets you teleoperate the robot using your keyboard.
??? code "robot_control_example.py"
``` py linenums="1"
--8<-- "examples/robots/robot_control_example.py"
```
### **Robot Grasping Demo**
!!! abstract annotate "This demo is useful for..."
* Understanding the difference between `physical` and `sticky` grasping
* Understanding how to teleoperate a robot through external commands
```{.python .annotate}
python -m omnigibson.examples.robots.grasping_mode_example
```
This demo lets you choose a grasping mode and then loads a `Fetch` robot and a cube on a table. You can then teleoperate the robot to grasp the cube, observing the difference is grasping behavior based on the grasping mode chosen. Here, `physical` means natural friction is required to hold objects, while `sticky` means that objects are constrained to the robot's gripper once contact is made.
??? code "grasping_mode_example.py"
``` py linenums="1"
--8<-- "examples/robots/grasping_mode_example.py"
```
### **Advanced: IK Demo**
!!! abstract "This demo is useful for..."
* Understanding how to construct your own IK functionality using omniverse's native lula library without explicitly utilizing all of OmniGibson's class abstractions
* Understanding how to manipulate the simulator at a lower-level than the main Environment entry point
```{.python .annotate}
python -m omnigibson.examples.robots.advanced.ik_example
```
This demo loads in `Fetch` robot and a IK solver to control the robot, and then lets you teleoperate the robot using your keyboard.
??? code "ik_example.py"
``` py linenums="1"
--8<-- "examples/robots/advanced/ik_example.py"
```
## 🧰 **Simulator**
These examples showcase useful functionality from **`OmniGibson`**'s monolithic `Simulator` object.
??? question "What's the difference between `Environment` and `Simulator`?"
The [`Simulator`](../../reference/simulator) class is a lower-level object that:
* handles importing scenes and objects into the actual simulation
* directly interfaces with the underlying physics engine
The [`Environment`](../../reference/environemnts/base_env) class thinly wraps the `Simulator`'s core functionality, by:
* providing convenience functions for automatically importing a predefined scene, object(s), and robot(s) (via the `cfg` argument), as well as a [`task`](../../reference/tasks/task_base)
* providing a OpenAI Gym interface for stepping through the simulation
While most of the core functionality in `Environment` (as well as more fine-grained physics control) can be replicated via direct calls to `Simulator` (`og.sim`), it requires deeper understanding of **`OmniGibson`**'s infrastructure and is not recommended for new users.
### **State Saving and Loading Demo**
!!! abstract "This demo is useful for..."
* Understanding how to interact with objects using the mouse
* Understanding how to save the active simulator state to a file
* Understanding how to restore the simulator state from a given file
```{.python .annotate}
python -m omnigibson.examples.simulator.sim_save_load_example
```
This demo loads a stripped-down scene with the `Turtlebot` robot, and lets you interact with objects to modify the scene. The state is then saved, written to a `.json` file, and then restored in the simulation.
??? code "sim_save_load_example.py"
``` py linenums="1"
--8<-- "examples/simulator/sim_save_load_example.py"
```
## 🖼️ **Rendering**
These examples showcase how to change renderer settings in **`OmniGibson`**.
### **Renderer Settings Demo**
!!! abstract "This demo is useful for..."
* Understanding how to use RendererSettings class
```{.python .annotate}
python -m omnigibson.examples.renderer_settings.renderer_settings_example
```
This demo iterates over different renderer settings of and shows how they can be programmatically set with **`OmniGibson`** interface.
??? code "renderer_settings_example.py"
``` py linenums="1"
--8<-- "examples/renderer_settings/renderer_settings_example.py"
```
| 23,454 | Markdown | 37.45082 | 415 | 0.725377 |
StanfordVL/OmniGibson/docs/getting_started/quickstart.md | ---
icon: octicons/rocket-16
---
# 🚀 **Quickstart**
Let's quickly create an environment programmatically!
**`OmniGibson`**'s workflow is straightforward: define the configuration of scene, object(s), robot(s), and task you'd like to load, and then instantiate our `Environment` class with that config.
Let's start with the following:
```{.python .annotate}
import omnigibson as og # (1)!
from omnigibson.macros import gm # (2)!
# Start with an empty configuration
cfg = dict()
```
1. All python scripts should start with this line! This allows access to key global variables through the top-level package.
2. Global macros (`gm`) can always be accessed directly and modified on the fly!
## 🏔️ **Defining a scene**
Next, let's define a scene:
```{.python .annotate}
cfg["scene"] = {
"type": "Scene", # (1)!
"floor_plane_visible": True, # (2)!
}
```
1. Our configuration gets parsed automatically and generates the appropriate class instance based on `type` (the string form of the class name). In this case, we're generating the most basic scene, which only consists of a floor plane. Check out [all of our available `Scene` classes](../reference/scenes/scene_base.md)!
2. In addition to specifying `type`, the remaining keyword-arguments get passed directly into the class constructor. So for the base [`Scene`](../reference/scenes/scene_base.md) class, you could optionally specify `"use_floor_plane"` and `"floor_plane_visible"`, whereas for the more powerful [`InteractiveTraversableScene`](../reference/scenes/interactive_traversable_scene.md) class (which loads a curated, preconfigured scene) you can additionally specify options for filtering objects, such as `"load_object_categories"` and `"load_room_types"`. You can see all available keyword-arguments by viewing the [individual `Scene` class](../reference/scenes/scene_base.md) you'd like to load!
## 🎾 **Defining objects**
We can optionally define some objects to load into our scene:
```{.python .annotate}
cfg["objects"] = [ # (1)!
{
"type": "USDObject", # (2)!
"name": "ghost_stain", # (3)!
"usd_path": f"{gm.ASSET_PATH}/models/stain/stain.usd",
"category": "stain", # (4)!
"visual_only": True, # (5)!
"scale": [1.0, 1.0, 1.0], # (6)!
"position": [1.0, 2.0, 0.001], # (7)!
"orientation": [0, 0, 0, 1.0], # (8)!
},
{
"type": "DatasetObject", # (9)!
"name": "delicious_apple",
"category": "apple",
"model": "agveuv", # (10)!
"position": [0, 0, 1.0],
},
{
"type": "PrimitiveObject", # (11)!
"name": "incredible_box",
"primitive_type": "Cube", # (12)!
"rgba": [0, 1.0, 1.0, 1.0], # (13)!
"scale": [0.5, 0.5, 0.1],
"fixed_base": True, # (14)!
"position": [-1.0, 0, 1.0],
"orientation": [0, 0, 0.707, 0.707],
},
{
"type": "LightObject", # (15)!
"name": "brilliant_light",
"light_type": "Sphere", # (16)!
"intensity": 50000, # (17)!
"radius": 0.1, # (18)!
"position": [3.0, 3.0, 4.0],
},
]
```
1. Unlike the `"scene"` sub-config, we can define an arbitrary number of objects to load, so this is a `list` of `dict` istead of a single nested `dict`.
2. **`OmniGibson`** supports multiple object classes, and we showcase an instance of each core class here. A [`USDObject`](../reference/objects/usd_object.md) is our most generic object class, and generates an object sourced from the `usd_path` argument.
3. All objects **must** define the `name` argument! This is because **`OmniGibson`** enforces a global unique naming scheme, and so any created objects must have unique names assigned to them.
4. `category` is used by all object classes to assign semantic segmentation IDs.
5. `visual_only` is used by all object classes and defines whether the object is subject to both gravity and collisions.
6. `scale` is used by all object classes and defines the global (x,y,z) relative scale of the object.
7. `position` is used by all object classes and defines the initial (x,y,z) position of the object in the global frame.
8. `orientation` is used by all object classes and defines the initial (x,y,z,w) quaternion orientation of the object in the global frame.
9. A [`DatasetObject`](../reference/objects/dataset_object.md) is an object pulled directly from our **BEHAVIOR** dataset. It includes metadata and annotations not found on a generic `USDObject`. Note that these assets are encrypted, and thus cannot be created via the `USDObject` class.
10. Instead of explicitly defining the hardcoded path to the dataset USD model, `model` (in conjunction with `category`) is used to infer the exact dataset object to load. In this case this is the exact same underlying raw USD asset that was loaded above as a `USDObject`!
11. A [`PrimitiveObject`](../reference/objects/primitive_object.md) is a programmatically generated object defining a convex primitive shape.
12. `primitive_type` defines what primitive shape to load -- see [`PrimitiveObject`](../reference/objects/primitive_object.md) for available options!
13. Because this object is programmatically generated, we can also specify the color to assign to this primitive object.
14. `fixed_base` is used by all object classes and determines whether the generated object is fixed relative to the world frame. Useful for fixing in place large objects, such as furniture or structures.
15. A [`LightObject`](../reference/objects/light_object.md) is a programmatically generated light source. It is used to directly illuminate the given scene.
16. `light_type` defines what light shape to load -- see [`LightObject`](../reference/objects/light_object.md) for available options!
17. `intensity` defines how bright the generated light source should be.
18. `radius` is used by `Sphere` lights and determines their relative size.
## 🤖 **Defining robots**
We can also optionally define robots to load into our scene:
```{.python .annotate}
cfg["robots"] = [ # (1)!
{
"type": "Fetch", # (2)!
"name": "baby_robot",
"obs_modalities": ["scan", "rgb", "depth"], # (3)!
},
]
```
1. Like the `"objects"` sub-config, we can define an arbitrary number of robots to load, so this is a `list` of `dict`.
2. **`OmniGibson`** supports multiple robot classes, where each class represents a specific robot model. Check out our [`robots`](../reference/robots/robot_base.md) to view all available robot classes!
3. Execute `print(og.ALL_SENSOR_MODALITIES)` for a list of all available observation modalities!
## 📋 **Defining a task**
Lastly, we can optionally define a task to load into our scene. Since we're just getting started, let's load a "Dummy" task (which is the task that is loaded anyways even if we don't explicitly define a task in our config):
```{.python .annotate}
cfg["task"] = {
"type": "DummyTask", # (1)!
"termination_config": dict(), # (2)!
"reward_config": dict(), # (3)!
}
```
1. Check out all of **`OmniGibson`**'s [available tasks](../reference/tasks/task_base.md)!
2. `termination_config` configures the termination conditions for this task. It maps specific [`TerminationCondition`](../reference/termination_conditions/termination_condition_base.md) arguments to their corresponding values to set.
3. `reward_config` configures the reward functions for this task. It maps specific [`RewardFunction`](../reference/reward_functions/reward_function_base.md) arguments to their corresponding values to set.
## 🌀 **Creating the environment**
We're all set! Let's load the config and create our environment:
```{.python .annotate}
env = og.Environment(cfg)
```
Once the environment loads, we can interface with our environment similar to OpenAI's Gym interface:
```{.python .annotate}
obs, rew, done, info = env.step(env.action_space.sample())
```
??? question "What happens if we have no robot loaded?"
Even if we have no robot loaded, we still need to define an "action" to pass into the environment. In this case, our action space is 0, so you can simply pass `[]` or `np.array([])` into the `env.step()` call!
??? code "my_first_env.py"
``` py linenums="1"
import omnigibson as og
from omnigibson.macros import gm
cfg = dict()
# Define scene
cfg["scene"] = {
"type": "Scene",
"floor_plane_visible": True,
}
# Define objects
cfg["objects"] = [
{
"type": "USDObject",
"name": "ghost_stain",
"usd_path": f"{gm.ASSET_PATH}/models/stain/stain.usd",
"category": "stain",
"visual_only": True,
"scale": [1.0, 1.0, 1.0],
"position": [1.0, 2.0, 0.001],
"orientation": [0, 0, 0, 1.0],
},
{
"type": "DatasetObject",
"name": "delicious_apple",
"category": "apple",
"model": "agveuv",
"position": [0, 0, 1.0],
},
{
"type": "PrimitiveObject",
"name": "incredible_box",
"primitive_type": "Cube",
"rgba": [0, 1.0, 1.0, 1.0],
"scale": [0.5, 0.5, 0.1],
"fixed_base": True,
"position": [-1.0, 0, 1.0],
"orientation": [0, 0, 0.707, 0.707],
},
{
"type": "LightObject",
"name": "brilliant_light",
"light_type": "Sphere",
"intensity": 50000,
"radius": 0.1,
"position": [3.0, 3.0, 4.0],
},
]
# Define robots
cfg["robots"] = [
{
"type": "Fetch",
"name": "skynet_robot",
"obs_modalities": ["scan", "rgb", "depth"],
},
]
# Define task
cfg["task"] = {
"type": "DummyTask",
"termination_config": dict(),
"reward_config": dict(),
}
# Create the environment
env = og.Environment(cfg)
# Allow camera teleoperation
og.sim.enable_viewer_camera_teleoperation()
# Step!
for _ in range(10000):
obs, rew, done, info = env.step(env.action_space.sample())
og.shutdown()
```
## 👀 **Looking around**
Look around by:
* `Left-CLICK + Drag`: Tilt
* `Scroll-Wheel-CLICK + Drag`: Pan
* `Scroll-Wheel UP / DOWN`: Zoom
Interact with objects by:
* `Shift + Left-CLICK + Drag`: Apply force on selected object
Or, for more fine-grained control, run:
```{.python .annotate}
og.sim.enable_viewer_camera_teleoperation() # (1)!
```
1. This allows you to move the camera precisely with your keyboard, record camera poses, and dynamically modify lights!
Or, for programmatic control, directly set the viewer camera's global pose:
```{.python .annotate}
og.sim.viewer_camera.set_position_orientation(<POSITION>, <ORIENTATION>)
```
***
**Next:** Check out some of **`OmniGibson`**'s breadth of features from our [Building Block](./building_blocks.md) examples!
| 10,980 | Markdown | 41.727626 | 690 | 0.643443 |
StanfordVL/OmniGibson/docs/getting_started/installation.md | ---
icon: material/hammer-wrench
---
# 🛠️ **Installation**
## 🗒️ **Requirements**
Please make sure your system meets the following specs:
- [x] **OS:** Ubuntu 20.04+ / Windows 10+
- [x] **RAM:** 32GB+
- [x] **GPU:** NVIDIA RTX 2070+
- [x] **VRAM:** 8GB+
??? question "Why these specs?"
**`OmniGibson`** is built upon NVIDIA's [Omniverse](https://www.nvidia.com/en-us/omniverse/) and [Isaac Sim](https://developer.nvidia.com/isaac-sim) platforms, so we inherit their dependencies. For more information, please see [Isaac Sim's Requirements](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/requirements.html).
## 💻 **Setup**
There are two ways to setup **`OmniGibson`**:
- **🐳 Install with Docker (Linux only)**: You can quickly get **`OmniGibson`** immediately up and running from our pre-built docker image.
- **🧪 Install from source (Linux / Windows)**: This method is recommended for deeper users looking to develop upon **`OmniGibson`** or use it extensively for research.
!!! tip ""
=== "🐳 Install with Docker (Linux only)"
Install **`OmniGibson`** with Docker is supported for **🐧 Linux** only.
??? info "Need to install docker or NVIDIA docker?"
```{.shell .annotate}
# Install docker
curl https://get.docker.com | sh && sudo systemctl --now enable docker
# Install nvidia-docker runtime
distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \
&& curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | \
sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \
&& curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update
sudo apt-get install -y nvidia-docker2 # install
sudo systemctl restart docker # restart docker engine
```
1. Install our docker launching scripts:
```shell
curl -LJO https://raw.githubusercontent.com/StanfordVL/OmniGibson/main/docker/run_docker.sh
chmod a+x run_docker.sh
```
??? question annotate "What is being installed?"
Our docker image automatically ships with a pre-configured conda virtual environment named `omnigibson` with Isaac Sim and **`OmniGibson`** pre-installed. Upon running the first time, our scene and object assets will automatically be downloaded as well.
2. Then, simply launch the shell script:
=== "Headless"
```{.shell .annotate}
sudo ./run_docker.sh -h <ABS_DATA_PATH> # (1)!
```
1. `<ABS_DATA_PATH>` specifies the **absolute** path data will be stored on your machine (if no `<ABS_DATA_PATH>` is specified, it defaults to `./omnigibson_data`). This needs to be called each time the docker container is run!
=== "GUI"
```{.shell .annotate}
sudo ./run_docker.sh <ABS_DATA_PATH> # (1)!
```
1. `<ABS_DATA_PATH>` specifies the **absolute** path data will be stored on your machine (if no `<ABS_DATA_PATH>` is specified, it defaults to `./omnigibson_data`). This needs to be called each time the docker container is run!
??? warning annotate "Are you using NFS or AFS?"
Docker containers are unable to access NFS or AFS drives, so if `run_docker.sh` are located on an NFS / AFS partition, please set `<DATA_PATH>` to an alternative data directory located on a non-NFS / AFS partition.
=== "🧪 Install from source (Linux / Windows)"
Install **`OmniGibson`** from source is supported for both **🐧 Linux (bash)** and **📁 Windows (powershell/cmd)**.
!!! example ""
=== "🐧 Linux (bash)"
<div class="annotate" markdown>
1. Install [Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) and NVIDIA's [Omniverse Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html)
!!! warning "Please make sure you have the latest version of Isaac Sim (2023.1.1) installed."
For Ubuntu 22.04, you need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage.
2. Clone [**`OmniGibson`**](https://github.com/StanfordVL/OmniGibson) and move into the directory:
```shell
git clone https://github.com/StanfordVL/OmniGibson.git
cd OmniGibson
```
??? note "Nightly build"
The main branch contains the stable version of **`OmniGibson`**. For our latest developed (yet not fully tested) features and bug fixes, please clone from the `og-develop` branch.
3. Setup a virtual conda environment to run **`OmniGibson`**:
```{.shell .annotate}
./scripts/setup.sh # (1)!
```
1. The script will ask you which Isaac Sim to use. If you installed it in the default location, it should be `~/.local/share/ov/pkg/isaac_sim-2023.1.1`
This will create a conda env with `omnigibson` installed. Simply call `conda activate` to activate it.
4. Download **`OmniGibson`** dataset (within the conda env):
```shell
python scripts/download_datasets.py
```
</div>
=== "📁 Windows (powershell/cmd)"
<div class="annotate" markdown>
1. Install [Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) and NVIDIA's [Omniverse Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html)
!!! warning "Please make sure you have the latest version of Isaac Sim (2023.1.1) installed."
2. Clone [**`OmniGibson`**](https://github.com/StanfordVL/OmniGibson) and move into the directory:
```shell
git clone https://github.com/StanfordVL/OmniGibson.git
cd OmniGibson
```
??? note "Nightly build"
The main branch contains the stable version of **`OmniGibson`**. For our latest developed (yet not fully tested) features and bug fixes, please clone from the `og-develop` branch.
3. Setup a virtual conda environment to run **`OmniGibson`**:
```{.powershell .annotate}
.\scripts\setup.bat # (1)!
```
1. The script will ask you which Isaac Sim to use. If you installed it in the default location, it should be `C:\Users\<USER_NAME>\AppData\Local\ov\pkg\isaac_sim-2023.1.1`
This will create a conda env with `omnigibson` installed. Simply call `conda activate` to activate it.
4. Download **`OmniGibson`** dataset (within the conda env):
```powershell
python scripts\download_datasets.py
```
</div>
## 🌎 **Explore `OmniGibson`!**
!!! warning annotate "Expect slowdown during first execution"
Omniverse requires some one-time startup setup when **`OmniGibson`** is imported for the first time.
The process could take up to 5 minutes. This is expected behavior, and should only occur once!
**`OmniGibson`** is now successfully installed! Try exploring some of our new scenes interactively:
```{.shell .annotate}
python -m omnigibson.examples.scenes.scene_selector # (1)!
```
1. This demo lets you choose a scene and interactively move around using your keyboard and mouse. Hold down **`Shift`** and then **`Left-click + Drag`** an object to apply forces!
You can also try teleoperating one of our robots:
```{.shell .annotate}
python -m omnigibson.examples.robots.robot_control_example # (1)!
```
1. This demo lets you choose a scene, robot, and set of controllers, and then teleoperate the robot using your keyboard.
***
**Next:** Get quickly familiarized with **`OmniGibson`** from our [Quickstart Guide](./quickstart.md)!
## 🧯 **Troubleshooting**
??? question "I cannot open Omniverse Launcher AppImage on Linux"
You probably need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage.
??? question "OmniGibson is stuck at `HydraEngine rtx failed creating scene renderer.`"
`OmniGibson` is likely using an unsupported GPU (default is id 0). Run `nvidia-smi` to see the active list of GPUs, and select an NVIDIA-supported GPU and set its corresponding ID when running `OmniGibson` with `export OMNIGIBSON_GPU_ID=<ID NUMBER>`. | 9,239 | Markdown | 44.294117 | 337 | 0.613486 |
StanfordVL/OmniGibson/docs/getting_started/slurm.md | ---
icon: material/server-network
---
# 🔌 **Running on a SLURM cluster**
_This documentation is a work in progress._
OmniGibson can be run on a SLURM cluster using the _enroot_ container software, which is a replacement
for Docker that allows containers to be run as the current user rather than as root. _enroot_ needs
to be installed on your SLURM cluster by an administrator.
With enroot installed, you can follow the below steps to run OmniGibson on SLURM:
1. Download the dataset to a location that is accessible by cluster nodes. To do this, you can use
the download_dataset.py script inside OmniGibson's scripts directory, and move it to the right spot
later. In the below example, /cvgl/ is a networked drive that is accessible by the cluster nodes.
**For Stanford users, this step is already done for SVL and Viscam nodes**
```{.shell .annotate}
OMNIGIBSON_NO_OMNIVERSE=1 python scripts/download_dataset.py
mv omnigibson/data /cvgl/group/Gibson/og-data-0-2-1
```
2. (Optional) Distribute the dataset to the individual nodes.
This will make load times much better than reading from a network drive.
To do this, run the below command on your SLURM head node (replace `svl` with your partition
name and `cvgl` with your account name, as well as the paths with the respective network
and local paths). Confirm via `squeue -u $USER` that all jobs have finished. **This step is already done for SVL and Viscam nodes**
```{.shell .annotate}
sinfo -p svl -o "%N,%n" -h | \
sed s/,.*//g | \
xargs -L1 -I{} \
sbatch \
--account=cvgl --partition=svl --nodelist={} --mem=8G --cpus-per-task=4 \
--wrap 'cp -R /cvgl/group/Gibson/og-data-0-2-1 /scr-ssd/og-data-0-2-1'
```
3. Download your desired image to a location that is accessible by the cluster nodes. (Replace the path with your own path, and feel free to replace `latest` with your desired branch tag). You have the option to mount code (meaning you don't need the container to come with all the code you want to run, just the right dependencies / environment setup)
```{.shell .annotate}
enroot import --output /cvgl2/u/cgokmen/omnigibson.sqsh docker://stanfordvl/omnigibson:action-primitives
```
4. (Optional) If you intend to mount code onto the container, make it available at a location that is accessible by the cluster nodes. You can mount arbitrary code, and you can also mount a custom version of OmniGibson (for the latter, you need to make sure you mount your copy of OmniGibson at /omnigibson-src inside the container). For example:
```{.shell .annotate}
git clone https://github.com/StanfordVL/OmniGibson.git /cvgl2/u/cgokmen/OmniGibson
```
5. Create your launch script. You can start with a copy of the script below. If you want to launch multiple workers, increase the job array option. You should keep the setting at at least 1 GPU per node, but can feel free to edit other settings. You can mount any additional code as you'd like, and you can change the entrypoint such that the container runs your mounted code upon launch. See the mounts section for an example. A copy of this script can be found in docker/sbatch_example.sh
```{.shell .annotate}
#!/usr/bin/env bash
#SBATCH --account=cvgl
#SBATCH --partition=svl --qos=normal
#SBATCH --nodes=1
#SBATCH --cpus-per-task=8
#SBATCH --mem=30G
#SBATCH --gres=gpu:2080ti:1
IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh"
GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1)
ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}"
# Define env kwargs to pass
declare -A ENVS=(
[NVIDIA_DRIVER_CAPABILITIES]=all
[NVIDIA_VISIBLE_DEVICES]=0
[DISPLAY]=""
[OMNIGIBSON_HEADLESS]=1
)
for env_var in "${!ENVS[@]}"; do
# Add to env kwargs we'll pass to enroot command later
ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}"
done
# Define mounts to create (maps local directory to container directory)
declare -A MOUNTS=(
[/scr-ssd/og-data-0-2-1]=/data
[${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit
[${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov
[${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip
[${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache
[${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache
[${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs
[${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config
[${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data
[${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents
# Feel free to include lines like the below to mount a workspace or a custom OG version
# [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src
# [/cvgl2/u/cgokmen/my-project]=/my-project
)
MOUNT_KWARGS=""
for mount in "${!MOUNTS[@]}"; do
# Verify mount path in local directory exists, otherwise, create it
if [ ! -e "$mount" ]; then
mkdir -p ${mount}
fi
# Add to mount kwargs we'll pass to enroot command later
MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}"
done
# Create the image if it doesn't already exist
CONTAINER_NAME=omnigibson_${GPU_ID}
enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH}
# Remove leading space in string
ENV_KWARGS="${ENV_KWARGS:1}"
MOUNT_KWARGS="${MOUNT_KWARGS:1}"
# The last line here is the command you want to run inside the container.
# Here I'm running some unit tests.
enroot start \
--root \
--rw \
${ENV_KWARGS} \
${MOUNT_KWARGS} \
${CONTAINER_NAME} \
source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py
# Clean up the image if possible.
enroot remove -f ${CONTAINER_NAME}
```
6. Launch your job using `sbatch your_script.sh` - and profit! | 5,782 | Markdown | 46.01626 | 490 | 0.717226 |
lucasapchagas/Omniverse/README.md |
# OmniVerse API 🌌
OmniVerse API is a straightforward API that provides access only to the basic CRUD concept routes, enabling efficient and consistent data manipulation.
Our API uses the ViaCEP API, a well-known API that returns the data of a specific address based on the provided postal code (CEP).
## Setup 🔧
OmniVerse API is an API built on top of the Java Spring Boot framework, designed to be easily installed and deployed.
For an easy setup, you'll need a MySQL server, but the API itself is prepared to accept any DB you want. Follow [MySQL Documentation](https://dev.mysql.com/doc/mysql-getting-started/en) link in order to setup a working server.
1. First thing you'll need after your MySQL server is running is to setup the API to be able to connect to it. You'll need to modify [**application.properties**](https://github.com/lucasapchagas/Omniverse/blob/main/src/main/resources/application.properties) file to your own needs.
- `spring.datasource.url`, you must provide your MySQL server url.
- `spring.datasource.username`, you must provide your MySQL server username.
- `spring.datasource.password`, you must provide your MySQL server password.
❗**If you provide an url for a database which is not previously created the API will not start. Use `CREATE database <db_name>;` in order to properly create it.**
2. Building it 🔨
To build the project, you need to have Java 17 installed, but you can easily change the version by modifying the application's [**pom.xml**](https://github.com/lucasapchagas/Omniverse/blob/main/pom.xml) file. The project uses Maven as the build platform, which brings all the conveniences of Maven.
- You can build it just by running `./mvnw pacakge` in the project root folder, the target file will be generated at `/target/` folder.
3. Using it 😯
Utilizing the API is as simple as modifying, understanding, and building it. Given that Java runs on the JVM, deploying the API becomes effortless—simply run the compiled JAR on any cloud service.
- You can just use a [RELEASE](https://github.com/lucasapchagas/Omniverse/releases/tag/RELEASE) instead of compiling it. Please, always use the latest one.
- In order to run it you must use the following command `java -jar OmniVerse-0.0.1-SNAPSHOT.jar`. By default it will try to open the api to [`http://localhost:8080/`](http://localhost:8080/).
- Use the OmniverseCLI to test the API. https://github.com/lucasapchagas/OmniverseCLI
## Features 🪶
- Uses **viacep api** in order to register users address.
- Migrations with flyway library.
- Data validation with spring boot data validation.
- JPA design pattern.
## API Usage 🍪
The OmniVerse API is user-friendly and comprises only 5 possible routes that align with the CRUD standard.
You can use popular API testing tools like Insomnia. We have created a configuration that can be accessed on pastebin by [clicking here](https://pastebin.com/f1rBDfZP). Import it into your Insomnia to streamline your testing process.
### What is an user?
Example:
```json
{
"id": 8,
"name": "Lucas",
"email": "[email protected]",
"address": {
"cep": "69050500",
"place": "Rua Peru",
"complement": "",
"neighborhood": "Parque 10 de Novembro",
"locality": "Manaus",
"uf": "AM"
}
}
```
#### Register a user
```http
POST /user
```
| Parameter | Type | Description |
| :---------- | :--------- | :---------------------------------- |
| `name` | `string` | User name |
| `email` | `string` | Valid email |
| `cep` | `string` | Valid cep, just numbers. |
#### Returns an user
```http
GET /user/{id}
```
#### Returns all users
```http
GET /user
```
#### Delete a user
```http
DELETE /user/{id}
```
#### Update a user
Just the field you want to modify is needed as a Parameter. User id is a **must have**.
```http
PUT /user
```
| Parameter | Type | Description |
| :---------- | :--------- | :---------------------------------- |
| `id` | `int` | User id|
| `name` | `string` | User name |
| `email` | `string` | Valid email |
| `cep` | `string` | Valid cep, just numbers. |
## Roadmap
- [x] Implement JPA pattern.
- [x] Usage of **ViaCEP API** in order to generate user's adress.
- [x] Implement Flyway migrations to our database.
- [x] Implement Spring boot data validation.
- [ ] Implement Spring boot security module.
- [ ] Implement JSON Web Token usage.
| 4,499 | Markdown | 35.290322 | 302 | 0.673038 |
Toni-SM/skrl/pyproject.toml | [project]
name = "skrl"
version = "1.1.0"
description = "Modular and flexible library for reinforcement learning on PyTorch and JAX"
readme = "README.md"
requires-python = ">=3.6"
license = {text = "MIT License"}
authors = [
{name = "Toni-SM"},
]
maintainers = [
{name = "Toni-SM"},
]
keywords = ["reinforcement-learning", "machine-learning", "reinforcement", "machine", "learning", "rl"]
classifiers = [
"License :: OSI Approved :: MIT License",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
]
# dependencies / optional-dependencies
dependencies = [
"gym",
"gymnasium",
"tqdm",
"packaging",
"tensorboard",
]
[project.optional-dependencies]
torch = [
"torch>=1.9",
]
jax = [
"jax>=0.4.3",
"jaxlib>=0.4.3",
"flax",
"optax",
]
all = [
"torch>=1.9",
"jax>=0.4.3",
"jaxlib>=0.4.3",
"flax",
"optax",
]
# urls
[project.urls]
"Homepage" = "https://github.com/Toni-SM/skrl"
"Documentation" = "https://skrl.readthedocs.io"
"Discussions" = "https://github.com/Toni-SM/skrl/discussions"
"Bug Reports" = "https://github.com/Toni-SM/skrl/issues"
"Say Thanks!" = "https://github.com/Toni-SM"
"Source" = "https://github.com/Toni-SM/skrl"
[tool.yapf]
# run: yapf -p -m -i -r <folder>
based_on_style = "pep8"
blank_line_before_nested_class_or_def = false
blank_lines_between_top_level_imports_and_variables = 2
column_limit = 120
join_multiple_lines = false
space_between_ending_comma_and_closing_bracket = false
spaces_around_power_operator = true
split_all_top_level_comma_separated_values = true
split_before_arithmetic_operator = true
split_before_dict_set_generator = false
split_before_dot = true
split_complex_comprehension = true
coalesce_brackets = true
[tool.codespell]
# run: codespell <folder>
skip = "./docs/_build,./docs/source/_static"
quiet-level = 3
count = ""
[tool.isort]
use_parentheses = false
line_length = 120
multi_line_output = 3
lines_after_imports = 2
known_annotation = ["typing"]
known_framework = [
"torch",
"jax",
"jaxlib",
"flax",
"optax",
"numpy",
]
sections = [
"FUTURE",
"ANNOTATION",
"STDLIB",
"THIRDPARTY",
"FRAMEWORK",
"FIRSTPARTY",
"LOCALFOLDER",
]
no_lines_before = "THIRDPARTY"
skip = ["docs"]
| 2,365 | TOML | 21.112149 | 103 | 0.671036 |
Toni-SM/skrl/CONTRIBUTING.md |
First of all, **thank you**... For what? Because you are dedicating some time to reading these guidelines and possibly thinking about contributing
<hr>
### I just want to ask a question!
If you have a question, please do not open an issue for this. Instead, use the following resources for it (you will get a faster response):
- [skrl's GitHub discussions](https://github.com/Toni-SM/skrl/discussions), a place to ask questions and discuss about the project
- [Isaac Gym's forum](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/isaac-gym/322), a place to post your questions, find past answers, or just chat with other members of the community about Isaac Gym topics
- [Omniverse Isaac Sim's forum](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/simulation/69), a place to post your questions, find past answers, or just chat with other members of the community about Omniverse Isaac Sim/Gym topics
### I have found a (good) bug. What can I do?
Open an issue on [skrl's GitHub issues](https://github.com/Toni-SM/skrl/issues) and describe the bug. If possible, please provide some of the following items:
- Minimum code that reproduces the bug...
- or the exact steps to reproduce it
- The error log or a screenshot of it
- A link to the source code of the library that you are using (some problems may be due to the use of older versions. If possible, always use the latest version)
- Any other information that you think may be useful or help to reproduce/describe the problem
### I want to contribute, but I don't know how
There is a [board](https://github.com/users/Toni-SM/projects/2/views/8) containing relevant future implementations which can be a good starting place to identify contributions. Please consider the following points
#### Notes about contributing
- Try to **communicate your change first** to [discuss](https://github.com/Toni-SM/skrl/discussions) the implementation if you want to add a new feature or change an existing one
- Modify only the minimum amount of code required and the files needed to make the change
- Use the provided [pre-commit](https://pre-commit.com/) hooks to format the code. Install it by running `pre-commit install` in the root of the repository, running it periodically using `pre-commit run --all` helps reducing commit errors
- Changes that are cosmetic in nature (code formatting, removing whitespace, etc.) or that correct grammatical, spelling or typo errors, and that do not add anything substantial to the functionality of the library will generally not be accepted as a pull request
- The only exception are changes that results from the use of the pre-commit hooks
#### Coding conventions
**skrl** is designed with a focus on modularity, readability, simplicity and transparency of algorithm implementation. The file system structure groups components according to their functionality. Library components only inherit (and must inherit) from a single base class (no multilevel or multiple inheritance) that provides a uniform interface and implements common functionality that is not tied to the implementation details of the algorithms
Read the code a little bit and you will understand it at first glance... Also
- Use 4 indentation spaces
- Follow, as much as possible, the PEP8 Style Guide for Python code
- Document each module, class, function or method using the reStructuredText format
- Annotate all functions, both for the parameters and for the return value
- Follow the commit message style guide for Git described in https://commit.style
- Capitalize (the first letter) and omit any trailing punctuation
- Write it in the imperative tense
- Aim for about 50 (or 72) characters
- Add import statements at the top of each module as follows:
```ini
function annotation (e.g. typing)
# insert an empty line
python libraries and other libraries (e.g. gym, numpy, time, etc.)
# insert an empty line
machine learning framework modules (e.g. torch, torch.nn)
# insert an empty line
skrl components
```
<hr>
Thank you once again,
Toni
| 4,086 | Markdown | 58.231883 | 447 | 0.773128 |
Toni-SM/skrl/CHANGELOG.md | # Changelog
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [1.1.0] - 2024-02-12
### Added
- MultiCategorical mixin to operate MultiDiscrete action spaces
### Changed (breaking changes)
- Rename the `ManualTrainer` to `StepTrainer`
- Output training/evaluation progress messages to system's stdout
- Get single observation/action spaces for vectorized environments
- Update Isaac Orbit environment wrapper
## [1.0.0] - 2023-08-16
Transition from pre-release versions (`1.0.0-rc.1` and`1.0.0-rc.2`) to a stable version.
This release also announces the publication of the **skrl** paper in the Journal of Machine Learning Research (JMLR): https://www.jmlr.org/papers/v24/23-0112.html
Summary of the most relevant features:
- JAX support
- New documentation theme and structure
- Multi-agent Reinforcement Learning (MARL)
## [1.0.0-rc.2] - 2023-08-11
### Added
- Get truncation from `time_outs` info in Isaac Gym, Isaac Orbit and Omniverse Isaac Gym environments
- Time-limit (truncation) boostrapping in on-policy actor-critic agents
- Model instantiators `initial_log_std` parameter to set the log standard deviation's initial value
### Changed (breaking changes)
- Structure environment loaders and wrappers file hierarchy coherently
Import statements now follow the next convention:
- Wrappers (e.g.):
- `from skrl.envs.wrappers.torch import wrap_env`
- `from skrl.envs.wrappers.jax import wrap_env`
- Loaders (e.g.):
- `from skrl.envs.loaders.torch import load_omniverse_isaacgym_env`
- `from skrl.envs.loaders.jax import load_omniverse_isaacgym_env`
### Changed
- Drop support for versions prior to PyTorch 1.9 (1.8.0 and 1.8.1)
## [1.0.0-rc.1] - 2023-07-25
### Added
- JAX support (with Flax and Optax)
- RPO agent
- IPPO and MAPPO multi-agent
- Multi-agent base class
- Bi-DexHands environment loader
- Wrapper for PettingZoo and Bi-DexHands environments
- Parameters `num_envs`, `headless` and `cli_args` for configuring Isaac Gym, Isaac Orbit
and Omniverse Isaac Gym environments when they are loaded
### Changed
- Migrate to `pyproject.toml` Python package development
- Define ML framework dependencies as optional dependencies in the library installer
- Move agent implementations with recurrent models to a separate file
- Allow closing the environment at the end of execution instead of after training/evaluation
- Documentation theme from *sphinx_rtd_theme* to *furo*
- Update documentation structure and examples
### Fixed
- Compatibility for Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)
- Disable PyTorch gradient computation during the environment stepping
- Get categorical models' entropy
- Typo in `KLAdaptiveLR` learning rate scheduler
(keep the old name for compatibility with the examples of previous versions.
The old name will be removed in future releases)
## [0.10.2] - 2023-03-23
### Changed
- Update loader and utils for OmniIsaacGymEnvs 2022.2.1.0
- Update Omniverse Isaac Gym real-world examples
## [0.10.1] - 2023-01-26
### Fixed
- Tensorboard writer instantiation when `write_interval` is zero
## [0.10.0] - 2023-01-22
### Added
- Isaac Orbit environment loader
- Wrap an Isaac Orbit environment
- Gaussian-Deterministic shared model instantiator
## [0.9.1] - 2023-01-17
### Added
- Utility for downloading models from Hugging Face Hub
### Fixed
- Initialization of agent components if they have not been defined
- Manual trainer `train`/`eval` method default arguments
## [0.9.0] - 2023-01-13
### Added
- Support for Farama Gymnasium interface
- Wrapper for robosuite environments
- Weights & Biases integration
- Set the running mode (training or evaluation) of the agents
- Allow clipping the gradient norm for DDPG, TD3 and SAC agents
- Initialize model biases
- Add RNN (RNN, LSTM, GRU and any other variant) support for A2C, DDPG, PPO, SAC, TD3 and TRPO agents
- Allow disabling training/evaluation progressbar
- Farama Shimmy and robosuite examples
- KUKA LBR iiwa real-world example
### Changed (breaking changes)
- Forward model inputs as a Python dictionary
- Returns a Python dictionary with extra output values in model calls
### Changed
- Adopt the implementation of `terminated` and `truncated` over `done` for all environments
### Fixed
- Omniverse Isaac Gym simulation speed for the Franka Emika real-world example
- Call agents' method `record_transition` instead of parent method
to allow storing samples in memories during evaluation
- Move TRPO policy optimization out of the value optimization loop
- Access to the categorical model distribution
- Call reset only once for Gym/Gymnasium vectorized environments
### Removed
- Deprecated method `start` in trainers
## [0.8.0] - 2022-10-03
### Added
- AMP agent for physics-based character animation
- Manual trainer
- Gaussian model mixin
- Support for creating shared models
- Parameter `role` to model methods
- Wrapper compatibility with the new OpenAI Gym environment API
- Internal library colored logger
- Migrate checkpoints/models from other RL libraries to skrl models/agents
- Configuration parameter `store_separately` to agent configuration dict
- Save/load agent modules (models, optimizers, preprocessors)
- Set random seed and configure deterministic behavior for reproducibility
- Benchmark results for Isaac Gym and Omniverse Isaac Gym on the GitHub discussion page
- Franka Emika real-world example
### Changed (breaking changes)
- Models implementation as Python mixin
### Changed
- Multivariate Gaussian model (`GaussianModel` until 0.7.0) to `MultivariateGaussianMixin`
- Trainer's `cfg` parameter position and default values
- Show training/evaluation display progress using `tqdm`
- Update Isaac Gym and Omniverse Isaac Gym examples
### Fixed
- Missing recursive arguments during model weights initialization
- Tensor dimension when computing preprocessor parallel variance
- Models' clip tensors dtype to `float32`
### Removed
- Parameter `inference` from model methods
- Configuration parameter `checkpoint_policy_only` from agent configuration dict
## [0.7.0] - 2022-07-11
### Added
- A2C agent
- Isaac Gym (preview 4) environment loader
- Wrap an Isaac Gym (preview 4) environment
- Support for OpenAI Gym vectorized environments
- Running standard scaler for input preprocessing
- Installation from PyPI (`pip install skrl`)
## [0.6.0] - 2022-06-09
### Added
- Omniverse Isaac Gym environment loader
- Wrap an Omniverse Isaac Gym environment
- Save best models during training
## [0.5.0] - 2022-05-18
### Added
- TRPO agent
- DeepMind environment wrapper
- KL Adaptive learning rate scheduler
- Handle `gym.spaces.Dict` observation spaces (OpenAI Gym and DeepMind environments)
- Forward environment info to agent `record_transition` method
- Expose and document the random seeding mechanism
- Define rewards shaping function in agents' config
- Define learning rate scheduler in agents' config
- Improve agent's algorithm description in documentation (PPO and TRPO at the moment)
### Changed
- Compute the Generalized Advantage Estimation (GAE) in agent `_update` method
- Move noises definition to `resources` folder
- Update the Isaac Gym examples
### Removed
- `compute_functions` for computing the GAE from memory base class
## [0.4.1] - 2022-03-22
### Added
- Examples of all Isaac Gym environments (preview 3)
- Tensorboard file iterator for data post-processing
### Fixed
- Init and evaluate agents in ParallelTrainer
## [0.4.0] - 2022-03-09
### Added
- CEM, SARSA and Q-learning agents
- Tabular model
- Parallel training using multiprocessing
- Isaac Gym utilities
### Changed
- Initialize agents in a separate method
- Change the name of the `networks` argument to `models`
### Fixed
- Reset environments after post-processing
## [0.3.0] - 2022-02-07
### Added
- DQN and DDQN agents
- Export memory to files
- Postprocessing utility to iterate over memory files
- Model instantiator utility to allow fast development
- More examples and contents in the documentation
### Fixed
- Clip actions using the whole space's limits
## [0.2.0] - 2022-01-18
### Added
- First official release
| 8,132 | Markdown | 34.207792 | 162 | 0.764142 |
Toni-SM/skrl/README.md | [](https://pypi.org/project/skrl)
[<img src="https://img.shields.io/badge/%F0%9F%A4%97%20models-hugging%20face-F8D521">](https://huggingface.co/skrl)

<br>
[](https://github.com/Toni-SM/skrl)
<span> </span>
[](https://skrl.readthedocs.io/en/latest/?badge=latest)
[](https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml)
[](https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml)
<br>
<p align="center">
<a href="https://skrl.readthedocs.io">
<img width="300rem" src="https://raw.githubusercontent.com/Toni-SM/skrl/main/docs/source/_static/data/logo-light-mode.png">
</a>
</p>
<h2 align="center" style="border-bottom: 0 !important;">SKRL - Reinforcement Learning library</h2>
<br>
**skrl** is an open-source modular library for Reinforcement Learning written in Python (on top of [PyTorch](https://pytorch.org/) and [JAX](https://jax.readthedocs.io)) and designed with a focus on modularity, readability, simplicity, and transparency of algorithm implementation. In addition to supporting the OpenAI [Gym](https://www.gymlibrary.dev) / Farama [Gymnasium](https://gymnasium.farama.org) and [DeepMind](https://github.com/deepmind/dm_env) and other environment interfaces, it allows loading and configuring [NVIDIA Isaac Gym](https://developer.nvidia.com/isaac-gym/), [NVIDIA Isaac Orbit](https://isaac-orbit.github.io/orbit/index.html) and [NVIDIA Omniverse Isaac Gym](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html) environments, enabling agents' simultaneous training by scopes (subsets of environments among all available environments), which may or may not share resources, in the same run.
<br>
### Please, visit the documentation for usage details and examples
<strong>https://skrl.readthedocs.io</strong>
<br>
> **Note:** This project is under **active continuous development**. Please make sure you always have the latest version. Visit the [develop](https://github.com/Toni-SM/skrl/tree/develop) branch or its [documentation](https://skrl.readthedocs.io/en/develop) to access the latest updates to be released.
<br>
### Citing this library
To cite this library in publications, please use the following reference:
```bibtex
@article{serrano2023skrl,
author = {Antonio Serrano-Muñoz and Dimitrios Chrysostomou and Simon Bøgh and Nestor Arana-Arexolaleiba},
title = {skrl: Modular and Flexible Library for Reinforcement Learning},
journal = {Journal of Machine Learning Research},
year = {2023},
volume = {24},
number = {254},
pages = {1--9},
url = {http://jmlr.org/papers/v24/23-0112.html}
}
```
| 3,043 | Markdown | 59.879999 | 942 | 0.744003 |
Toni-SM/skrl/skrl/__init__.py | from typing import Union
import logging
import sys
import numpy as np
__all__ = ["__version__", "logger", "config"]
# read library version from metadata
try:
import importlib.metadata
__version__ = importlib.metadata.version("skrl")
except ImportError:
__version__ = "unknown"
# logger with format
class _Formatter(logging.Formatter):
_format = "[%(name)s:%(levelname)s] %(message)s"
_formats = {logging.DEBUG: f"\x1b[38;20m{_format}\x1b[0m",
logging.INFO: f"\x1b[38;20m{_format}\x1b[0m",
logging.WARNING: f"\x1b[33;20m{_format}\x1b[0m",
logging.ERROR: f"\x1b[31;20m{_format}\x1b[0m",
logging.CRITICAL: f"\x1b[31;1m{_format}\x1b[0m"}
def format(self, record):
return logging.Formatter(self._formats.get(record.levelno)).format(record)
_handler = logging.StreamHandler()
_handler.setLevel(logging.DEBUG)
_handler.setFormatter(_Formatter())
logger = logging.getLogger("skrl")
logger.setLevel(logging.DEBUG)
logger.addHandler(_handler)
# machine learning framework configuration
class _Config(object):
def __init__(self) -> None:
"""Machine learning framework specific configuration
"""
class JAX(object):
def __init__(self) -> None:
"""JAX configuration
"""
self._backend = "numpy"
self._key = np.array([0, 0], dtype=np.uint32)
@property
def backend(self) -> str:
"""Backend used by the different components to operate and generate arrays
This configuration excludes models and optimizers.
Supported backend are: ``"numpy"`` and ``"jax"``
"""
return self._backend
@backend.setter
def backend(self, value: str) -> None:
if value not in ["numpy", "jax"]:
raise ValueError("Invalid jax backend. Supported values are: numpy, jax")
self._backend = value
@property
def key(self) -> "jax.Array":
"""Pseudo-random number generator (PRNG) key
"""
if isinstance(self._key, np.ndarray):
try:
import jax
self._key = jax.random.PRNGKey(self._key[1])
except ImportError:
pass
return self._key
@key.setter
def key(self, value: Union[int, "jax.Array"]) -> None:
if type(value) is int:
# don't import JAX if it has not been imported before
if "jax" in sys.modules:
import jax
value = jax.random.PRNGKey(value)
else:
value = np.array([0, value], dtype=np.uint32)
self._key = value
self.jax = JAX()
config = _Config()
| 2,993 | Python | 30.851064 | 93 | 0.529569 |
Toni-SM/skrl/skrl/envs/jax.py | # TODO: Delete this file in future releases
from skrl import logger # isort: skip
logger.warning("Using `from skrl.envs.jax import ...` is deprecated and will be removed in future versions.")
logger.warning(" - Import loaders using `from skrl.envs.loaders.jax import ...`")
logger.warning(" - Import wrappers using `from skrl.envs.wrappers.jax import ...`")
from skrl.envs.loaders.jax import (
load_bidexhands_env,
load_isaac_orbit_env,
load_isaacgym_env_preview2,
load_isaacgym_env_preview3,
load_isaacgym_env_preview4,
load_omniverse_isaacgym_env
)
from skrl.envs.wrappers.jax import MultiAgentEnvWrapper, Wrapper, wrap_env
| 654 | Python | 35.388887 | 109 | 0.740061 |
Toni-SM/skrl/skrl/envs/loaders/torch/bidexhands_envs.py | from typing import Optional, Sequence
import os
import sys
from contextlib import contextmanager
from skrl import logger
__all__ = ["load_bidexhands_env"]
@contextmanager
def cwd(new_path: str) -> None:
"""Context manager to change the current working directory
This function restores the current working directory after the context manager exits
:param new_path: The new path to change to
:type new_path: str
"""
current_path = os.getcwd()
os.chdir(new_path)
try:
yield
finally:
os.chdir(current_path)
def _print_cfg(d, indent=0) -> None:
"""Print the environment configuration
:param d: The dictionary to print
:type d: dict
:param indent: The indentation level (default: ``0``)
:type indent: int, optional
"""
for key, value in d.items():
if isinstance(value, dict):
_print_cfg(value, indent + 1)
else:
print(" | " * indent + f" |-- {key}: {value}")
def load_bidexhands_env(task_name: str = "",
num_envs: Optional[int] = None,
headless: Optional[bool] = None,
cli_args: Sequence[str] = [],
bidexhands_path: str = "",
show_cfg: bool = True):
"""Load a Bi-DexHands environment
:param task_name: The name of the task (default: ``""``).
If not specified, the task name is taken from the command line argument (``--task TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param num_envs: Number of parallel environments to create (default: ``None``).
If not specified, the default number of environments defined in the task configuration is used.
Command line argument has priority over function parameter if both are specified
:type num_envs: int, optional
:param headless: Whether to use headless mode (no rendering) (default: ``None``).
If not specified, the default task configuration is used.
Command line argument has priority over function parameter if both are specified
:type headless: bool, optional
:param cli_args: Isaac Gym environment configuration and command line arguments (default: ``[]``)
:type cli_args: list of str, optional
:param bidexhands_path: The path to the ``bidexhands`` directory (default: ``""``).
If empty, the path will obtained from bidexhands package metadata
:type bidexhands_path: str, optional
:param show_cfg: Whether to print the configuration (default: ``True``)
:type show_cfg: bool, optional
:raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments
:raises RuntimeError: The bidexhands package is not installed or the path is wrong
:return: Bi-DexHands environment (preview 4)
:rtype: isaacgymenvs.tasks.base.vec_task.VecTask
"""
import isaacgym # isort:skip
import bidexhands
# check task from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--task"):
defined = True
break
# get task name from command line arguments
if defined:
arg_index = sys.argv.index("--task") + 1
if arg_index >= len(sys.argv):
raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument")
if task_name and task_name != sys.argv[arg_index]:
logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})")
# get task name from function arguments
else:
if task_name:
sys.argv.append("--task")
sys.argv.append(task_name)
else:
raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument")
# check num_envs from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--num_envs"):
defined = True
break
# get num_envs from command line arguments
if defined:
if num_envs is not None:
logger.warning("Overriding num_envs with command line argument --num_envs")
# get num_envs from function arguments
elif num_envs is not None and num_envs > 0:
sys.argv.append("--num_envs")
sys.argv.append(str(num_envs))
# check headless from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--headless"):
defined = True
break
# get headless from command line arguments
if defined:
if headless is not None:
logger.warning("Overriding headless with command line argument --headless")
# get headless from function arguments
elif headless is not None:
sys.argv.append("--headless")
# others command line arguments
sys.argv += cli_args
# get bidexhands path from bidexhands package metadata
if not bidexhands_path:
if not hasattr(bidexhands, "__path__"):
raise RuntimeError("bidexhands package is not installed")
path = list(bidexhands.__path__)[0]
else:
path = bidexhands_path
sys.path.append(path)
status = True
try:
from utils.config import get_args, load_cfg, parse_sim_params # type: ignore
from utils.parse_task import parse_task # type: ignore
from utils.process_marl import get_AgentIndex # type: ignore
except Exception as e:
status = False
logger.error(f"Failed to import required packages: {e}")
if not status:
raise RuntimeError(f"The path ({path}) is not valid")
args = get_args()
# print config
if show_cfg:
print(f"\nBi-DexHands environment ({args.task})")
_print_cfg(vars(args))
# update task arguments
args.task_type = "MultiAgent" # TODO: get from parameters
args.cfg_train = os.path.join(path, args.cfg_train)
args.cfg_env = os.path.join(path, args.cfg_env)
# load environment
with cwd(path):
cfg, cfg_train, _ = load_cfg(args)
agent_index = get_AgentIndex(cfg)
sim_params = parse_sim_params(args, cfg, cfg_train)
task, env = parse_task(args, cfg, cfg_train, sim_params, agent_index)
return env
| 6,552 | Python | 36.445714 | 132 | 0.628205 |
Toni-SM/skrl/skrl/envs/loaders/torch/__init__.py | from skrl.envs.loaders.torch.bidexhands_envs import load_bidexhands_env
from skrl.envs.loaders.torch.isaac_orbit_envs import load_isaac_orbit_env
from skrl.envs.loaders.torch.isaacgym_envs import (
load_isaacgym_env_preview2,
load_isaacgym_env_preview3,
load_isaacgym_env_preview4
)
from skrl.envs.loaders.torch.omniverse_isaacgym_envs import load_omniverse_isaacgym_env
| 383 | Python | 41.666662 | 87 | 0.804178 |
Toni-SM/skrl/skrl/envs/loaders/torch/isaacgym_envs.py | from typing import Optional, Sequence
import os
import sys
from contextlib import contextmanager
from skrl import logger
__all__ = ["load_isaacgym_env_preview2",
"load_isaacgym_env_preview3",
"load_isaacgym_env_preview4"]
@contextmanager
def cwd(new_path: str) -> None:
"""Context manager to change the current working directory
This function restores the current working directory after the context manager exits
:param new_path: The new path to change to
:type new_path: str
"""
current_path = os.getcwd()
os.chdir(new_path)
try:
yield
finally:
os.chdir(current_path)
def _omegaconf_to_dict(config) -> dict:
"""Convert OmegaConf config to dict
:param config: The OmegaConf config
:type config: OmegaConf.Config
:return: The config as dict
:rtype: dict
"""
# return config.to_container(dict)
from omegaconf import DictConfig
d = {}
for k, v in config.items():
d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v
return d
def _print_cfg(d, indent=0) -> None:
"""Print the environment configuration
:param d: The dictionary to print
:type d: dict
:param indent: The indentation level (default: ``0``)
:type indent: int, optional
"""
for key, value in d.items():
if isinstance(value, dict):
_print_cfg(value, indent + 1)
else:
print(" | " * indent + f" |-- {key}: {value}")
def load_isaacgym_env_preview2(task_name: str = "",
num_envs: Optional[int] = None,
headless: Optional[bool] = None,
cli_args: Sequence[str] = [],
isaacgymenvs_path: str = "",
show_cfg: bool = True):
"""Load an Isaac Gym environment (preview 2)
:param task_name: The name of the task (default: ``""``).
If not specified, the task name is taken from the command line argument (``--task TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param num_envs: Number of parallel environments to create (default: ``None``).
If not specified, the default number of environments defined in the task configuration is used.
Command line argument has priority over function parameter if both are specified
:type num_envs: int, optional
:param headless: Whether to use headless mode (no rendering) (default: ``None``).
If not specified, the default task configuration is used.
Command line argument has priority over function parameter if both are specified
:type headless: bool, optional
:param cli_args: Isaac Gym environment configuration and command line arguments (default: ``[]``)
:type cli_args: list of str, optional
:param isaacgymenvs_path: The path to the ``rlgpu`` directory (default: ``""``).
If empty, the path will obtained from isaacgym package metadata
:type isaacgymenvs_path: str, optional
:param show_cfg: Whether to print the configuration (default: ``True``)
:type show_cfg: bool, optional
:raises ValueError: The task name has not been defined,
neither by the function parameter nor by the command line arguments
:raises RuntimeError: The isaacgym package is not installed or the path is wrong
:return: Isaac Gym environment (preview 2)
:rtype: tasks.base.vec_task.VecTask
"""
import isaacgym
# check task from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--task"):
defined = True
break
# get task name from command line arguments
if defined:
arg_index = sys.argv.index("--task") + 1
if arg_index >= len(sys.argv):
raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument")
if task_name and task_name != sys.argv[arg_index]:
logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})")
# get task name from function arguments
else:
if task_name:
sys.argv.append("--task")
sys.argv.append(task_name)
else:
raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument")
# check num_envs from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--num_envs"):
defined = True
break
# get num_envs from command line arguments
if defined:
if num_envs is not None:
logger.warning("Overriding num_envs with command line argument --num_envs")
# get num_envs from function arguments
elif num_envs is not None and num_envs > 0:
sys.argv.append("--num_envs")
sys.argv.append(str(num_envs))
# check headless from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--headless"):
defined = True
break
# get headless from command line arguments
if defined:
if headless is not None:
logger.warning("Overriding headless with command line argument --headless")
# get headless from function arguments
elif headless is not None:
sys.argv.append("--headless")
# others command line arguments
sys.argv += cli_args
# get isaacgym envs path from isaacgym package metadata
if not isaacgymenvs_path:
if not hasattr(isaacgym, "__path__"):
raise RuntimeError("isaacgym package is not installed or could not be accessed by the current Python environment")
path = isaacgym.__path__
path = os.path.join(path[0], "..", "rlgpu")
else:
path = isaacgymenvs_path
# import required packages
sys.path.append(path)
status = True
try:
from utils.config import get_args, load_cfg, parse_sim_params # type: ignore
from utils.parse_task import parse_task # type: ignore
except Exception as e:
status = False
logger.error(f"Failed to import required packages: {e}")
if not status:
raise RuntimeError(f"Path ({path}) is not valid or the isaacgym package is not installed in editable mode (pip install -e .)")
args = get_args()
# print config
if show_cfg:
print(f"\nIsaac Gym environment ({args.task})")
_print_cfg(vars(args))
# update task arguments
args.cfg_train = os.path.join(path, args.cfg_train)
args.cfg_env = os.path.join(path, args.cfg_env)
# load environment
with cwd(path):
cfg, cfg_train, _ = load_cfg(args)
sim_params = parse_sim_params(args, cfg, cfg_train)
task, env = parse_task(args, cfg, cfg_train, sim_params)
return env
def load_isaacgym_env_preview3(task_name: str = "",
num_envs: Optional[int] = None,
headless: Optional[bool] = None,
cli_args: Sequence[str] = [],
isaacgymenvs_path: str = "",
show_cfg: bool = True):
"""Load an Isaac Gym environment (preview 3)
Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs
:param task_name: The name of the task (default: ``""``).
If not specified, the task name is taken from the command line argument (``task=TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param num_envs: Number of parallel environments to create (default: ``None``).
If not specified, the default number of environments defined in the task configuration is used.
Command line argument has priority over function parameter if both are specified
:type num_envs: int, optional
:param headless: Whether to use headless mode (no rendering) (default: ``None``).
If not specified, the default task configuration is used.
Command line argument has priority over function parameter if both are specified
:type headless: bool, optional
:param cli_args: IsaacGymEnvs configuration and command line arguments (default: ``[]``)
:type cli_args: list of str, optional
:param isaacgymenvs_path: The path to the ``isaacgymenvs`` directory (default: ``""``).
If empty, the path will obtained from isaacgymenvs package metadata
:type isaacgymenvs_path: str, optional
:param show_cfg: Whether to print the configuration (default: ``True``)
:type show_cfg: bool, optional
:raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments
:raises RuntimeError: The isaacgymenvs package is not installed or the path is wrong
:return: Isaac Gym environment (preview 3)
:rtype: isaacgymenvs.tasks.base.vec_task.VecTask
"""
import isaacgym
import isaacgymenvs
from hydra._internal.hydra import Hydra
from hydra._internal.utils import create_automatic_config_search_path, get_args_parser
from hydra.types import RunMode
from omegaconf import OmegaConf
# check task from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("task="):
defined = True
break
# get task name from command line arguments
if defined:
if task_name and task_name != arg.split("task=")[1].split(" ")[0]:
logger.warning("Overriding task name ({}) with command line argument ({})" \
.format(task_name, arg.split("task=")[1].split(" ")[0]))
# get task name from function arguments
else:
if task_name:
sys.argv.append(f"task={task_name}")
else:
raise ValueError("No task name defined. Set task_name parameter or use task=<task_name> as command line argument")
# check num_envs from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("num_envs="):
defined = True
break
# get num_envs from command line arguments
if defined:
if num_envs is not None and num_envs != int(arg.split("num_envs=")[1].split(" ")[0]):
logger.warning("Overriding num_envs ({}) with command line argument (num_envs={})" \
.format(num_envs, arg.split("num_envs=")[1].split(" ")[0]))
# get num_envs from function arguments
elif num_envs is not None and num_envs > 0:
sys.argv.append(f"num_envs={num_envs}")
# check headless from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("headless="):
defined = True
break
# get headless from command line arguments
if defined:
if headless is not None and str(headless).lower() != arg.split("headless=")[1].split(" ")[0].lower():
logger.warning("Overriding headless ({}) with command line argument (headless={})" \
.format(headless, arg.split("headless=")[1].split(" ")[0]))
# get headless from function arguments
elif headless is not None:
sys.argv.append(f"headless={headless}")
# others command line arguments
sys.argv += cli_args
# get isaacgymenvs path from isaacgymenvs package metadata
if isaacgymenvs_path == "":
if not hasattr(isaacgymenvs, "__path__"):
raise RuntimeError("isaacgymenvs package is not installed")
isaacgymenvs_path = list(isaacgymenvs.__path__)[0]
config_path = os.path.join(isaacgymenvs_path, "cfg")
# set omegaconf resolvers
try:
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower())
except Exception as e:
pass
try:
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
except Exception as e:
pass
try:
OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b)
except Exception as e:
pass
try:
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg)
except Exception as e:
pass
# get hydra config without use @hydra.main
config_file = "config"
args = get_args_parser().parse_args()
search_path = create_automatic_config_search_path(config_file, None, config_path)
hydra_object = Hydra.create_main_hydra2(task_name='load_isaacgymenv', config_search_path=search_path)
config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN)
cfg = _omegaconf_to_dict(config.task)
# print config
if show_cfg:
print(f"\nIsaac Gym environment ({config.task.name})")
_print_cfg(cfg)
# load environment
sys.path.append(isaacgymenvs_path)
from tasks import isaacgym_task_map # type: ignore
try:
env = isaacgym_task_map[config.task.name](cfg=cfg,
sim_device=config.sim_device,
graphics_device_id=config.graphics_device_id,
headless=config.headless)
except TypeError as e:
env = isaacgym_task_map[config.task.name](cfg=cfg,
rl_device=config.rl_device,
sim_device=config.sim_device,
graphics_device_id=config.graphics_device_id,
headless=config.headless,
virtual_screen_capture=config.capture_video, # TODO: check
force_render=config.force_render)
return env
def load_isaacgym_env_preview4(task_name: str = "",
num_envs: Optional[int] = None,
headless: Optional[bool] = None,
cli_args: Sequence[str] = [],
isaacgymenvs_path: str = "",
show_cfg: bool = True):
"""Load an Isaac Gym environment (preview 4)
Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs
:param task_name: The name of the task (default: ``""``).
If not specified, the task name is taken from the command line argument (``task=TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param num_envs: Number of parallel environments to create (default: ``None``).
If not specified, the default number of environments defined in the task configuration is used.
Command line argument has priority over function parameter if both are specified
:type num_envs: int, optional
:param headless: Whether to use headless mode (no rendering) (default: ``None``).
If not specified, the default task configuration is used.
Command line argument has priority over function parameter if both are specified
:type headless: bool, optional
:param cli_args: IsaacGymEnvs configuration and command line arguments (default: ``[]``)
:type cli_args: list of str, optional
:param isaacgymenvs_path: The path to the ``isaacgymenvs`` directory (default: ``""``).
If empty, the path will obtained from isaacgymenvs package metadata
:type isaacgymenvs_path: str, optional
:param show_cfg: Whether to print the configuration (default: ``True``)
:type show_cfg: bool, optional
:raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments
:raises RuntimeError: The isaacgymenvs package is not installed or the path is wrong
:return: Isaac Gym environment (preview 4)
:rtype: isaacgymenvs.tasks.base.vec_task.VecTask
"""
return load_isaacgym_env_preview3(task_name, num_envs, headless, cli_args, isaacgymenvs_path, show_cfg)
| 16,639 | Python | 42.446475 | 134 | 0.615241 |
Toni-SM/skrl/skrl/envs/loaders/torch/isaac_orbit_envs.py | from typing import Optional, Sequence
import os
import sys
from skrl import logger
__all__ = ["load_isaac_orbit_env"]
def _print_cfg(d, indent=0) -> None:
"""Print the environment configuration
:param d: The dictionary to print
:type d: dict
:param indent: The indentation level (default: ``0``)
:type indent: int, optional
"""
for key, value in d.items():
if isinstance(value, dict):
_print_cfg(value, indent + 1)
else:
print(" | " * indent + f" |-- {key}: {value}")
def load_isaac_orbit_env(task_name: str = "",
num_envs: Optional[int] = None,
headless: Optional[bool] = None,
cli_args: Sequence[str] = [],
show_cfg: bool = True):
"""Load an Isaac Orbit environment
Isaac Orbit: https://isaac-orbit.github.io/orbit/index.html
This function includes the definition and parsing of command line arguments used by Isaac Orbit:
- ``--headless``: Force display off at all times
- ``--cpu``: Use CPU pipeline
- ``--num_envs``: Number of environments to simulate
- ``--task``: Name of the task
- ``--num_envs``: Seed used for the environment
:param task_name: The name of the task (default: ``""``).
If not specified, the task name is taken from the command line argument (``--task TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param num_envs: Number of parallel environments to create (default: ``None``).
If not specified, the default number of environments defined in the task configuration is used.
Command line argument has priority over function parameter if both are specified
:type num_envs: int, optional
:param headless: Whether to use headless mode (no rendering) (default: ``None``).
If not specified, the default task configuration is used.
Command line argument has priority over function parameter if both are specified
:type headless: bool, optional
:param cli_args: Isaac Orbit configuration and command line arguments (default: ``[]``)
:type cli_args: list of str, optional
:param show_cfg: Whether to print the configuration (default: ``True``)
:type show_cfg: bool, optional
:raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments
:return: Isaac Orbit environment
:rtype: gym.Env
"""
import argparse
import atexit
import gym
# check task from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--task"):
defined = True
break
# get task name from command line arguments
if defined:
arg_index = sys.argv.index("--task") + 1
if arg_index >= len(sys.argv):
raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument")
if task_name and task_name != sys.argv[arg_index]:
logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})")
# get task name from function arguments
else:
if task_name:
sys.argv.append("--task")
sys.argv.append(task_name)
else:
raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument")
# check num_envs from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--num_envs"):
defined = True
break
# get num_envs from command line arguments
if defined:
if num_envs is not None:
logger.warning("Overriding num_envs with command line argument (--num_envs)")
# get num_envs from function arguments
elif num_envs is not None and num_envs > 0:
sys.argv.append("--num_envs")
sys.argv.append(str(num_envs))
# check headless from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("--headless"):
defined = True
break
# get headless from command line arguments
if defined:
if headless is not None:
logger.warning("Overriding headless with command line argument (--headless)")
# get headless from function arguments
elif headless is not None:
sys.argv.append("--headless")
# others command line arguments
sys.argv += cli_args
# parse arguments
parser = argparse.ArgumentParser("Welcome to Orbit: Omniverse Robotics Environments!")
parser.add_argument("--headless", action="store_true", default=False, help="Force display off at all times.")
parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.")
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
args = parser.parse_args()
# load the most efficient kit configuration in headless mode
if args.headless:
app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.gym.headless.kit"
else:
app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit"
# launch the simulator
from omni.isaac.kit import SimulationApp # type: ignore
config = {"headless": args.headless}
simulation_app = SimulationApp(config, experience=app_experience)
@atexit.register
def close_the_simulator():
simulation_app.close()
# import orbit extensions
import omni.isaac.contrib_envs # type: ignore
import omni.isaac.orbit_envs # type: ignore
from omni.isaac.orbit_envs.utils import parse_env_cfg # type: ignore
cfg = parse_env_cfg(args.task, use_gpu=not args.cpu, num_envs=args.num_envs)
# print config
if show_cfg:
print(f"\nIsaac Orbit environment ({args.task})")
try:
_print_cfg(cfg)
except AttributeError as e:
pass
# load environment
env = gym.make(args.task, cfg=cfg, headless=args.headless)
return env
| 6,481 | Python | 37.814371 | 132 | 0.636013 |
Toni-SM/skrl/skrl/envs/loaders/torch/omniverse_isaacgym_envs.py | from typing import Optional, Sequence, Union
import os
import queue
import sys
from skrl import logger
__all__ = ["load_omniverse_isaacgym_env"]
def _omegaconf_to_dict(config) -> dict:
"""Convert OmegaConf config to dict
:param config: The OmegaConf config
:type config: OmegaConf.Config
:return: The config as dict
:rtype: dict
"""
# return config.to_container(dict)
from omegaconf import DictConfig
d = {}
for k, v in config.items():
d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v
return d
def _print_cfg(d, indent=0) -> None:
"""Print the environment configuration
:param d: The dictionary to print
:type d: dict
:param indent: The indentation level (default: ``0``)
:type indent: int, optional
"""
for key, value in d.items():
if isinstance(value, dict):
_print_cfg(value, indent + 1)
else:
print(" | " * indent + f" |-- {key}: {value}")
def load_omniverse_isaacgym_env(task_name: str = "",
num_envs: Optional[int] = None,
headless: Optional[bool] = None,
cli_args: Sequence[str] = [],
omniisaacgymenvs_path: str = "",
show_cfg: bool = True,
multi_threaded: bool = False,
timeout: int = 30) -> Union["VecEnvBase", "VecEnvMT"]:
"""Load an Omniverse Isaac Gym environment (OIGE)
Omniverse Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs
:param task_name: The name of the task (default: ``""``).
If not specified, the task name is taken from the command line argument (``task=TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param num_envs: Number of parallel environments to create (default: ``None``).
If not specified, the default number of environments defined in the task configuration is used.
Command line argument has priority over function parameter if both are specified
:type num_envs: int, optional
:param headless: Whether to use headless mode (no rendering) (default: ``None``).
If not specified, the default task configuration is used.
Command line argument has priority over function parameter if both are specified
:type headless: bool, optional
:param cli_args: OIGE configuration and command line arguments (default: ``[]``)
:type cli_args: list of str, optional
:param omniisaacgymenvs_path: The path to the ``omniisaacgymenvs`` directory (default: ``""``).
If empty, the path will obtained from omniisaacgymenvs package metadata
:type omniisaacgymenvs_path: str, optional
:param show_cfg: Whether to print the configuration (default: ``True``)
:type show_cfg: bool, optional
:param multi_threaded: Whether to use multi-threaded environment (default: ``False``)
:type multi_threaded: bool, optional
:param timeout: Seconds to wait for data when queue is empty in multi-threaded environment (default: ``30``)
:type timeout: int, optional
:raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments
:raises RuntimeError: The omniisaacgymenvs package is not installed or the path is wrong
:return: Omniverse Isaac Gym environment
:rtype: omni.isaac.gym.vec_env.vec_env_base.VecEnvBase or omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT
"""
import omegaconf
import omniisaacgymenvs # type: ignore
from hydra._internal.hydra import Hydra
from hydra._internal.utils import create_automatic_config_search_path, get_args_parser
from hydra.types import RunMode
from omegaconf import OmegaConf
from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT # type: ignore
from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT # type: ignore
import torch
# check task from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("task="):
defined = True
break
# get task name from command line arguments
if defined:
if task_name and task_name != arg.split("task=")[1].split(" ")[0]:
logger.warning("Overriding task name ({}) with command line argument (task={})" \
.format(task_name, arg.split("task=")[1].split(" ")[0]))
# get task name from function arguments
else:
if task_name:
sys.argv.append(f"task={task_name}")
else:
raise ValueError("No task name defined. Set task_name parameter or use task=<task_name> as command line argument")
# check num_envs from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("num_envs="):
defined = True
break
# get num_envs from command line arguments
if defined:
if num_envs is not None and num_envs != int(arg.split("num_envs=")[1].split(" ")[0]):
logger.warning("Overriding num_envs ({}) with command line argument (num_envs={})" \
.format(num_envs, arg.split("num_envs=")[1].split(" ")[0]))
# get num_envs from function arguments
elif num_envs is not None and num_envs > 0:
sys.argv.append(f"num_envs={num_envs}")
# check headless from command line arguments
defined = False
for arg in sys.argv:
if arg.startswith("headless="):
defined = True
break
# get headless from command line arguments
if defined:
if headless is not None and str(headless).lower() != arg.split("headless=")[1].split(" ")[0].lower():
logger.warning("Overriding headless ({}) with command line argument (headless={})" \
.format(headless, arg.split("headless=")[1].split(" ")[0]))
# get headless from function arguments
elif headless is not None:
sys.argv.append(f"headless={headless}")
# others command line arguments
sys.argv += cli_args
# get omniisaacgymenvs path from omniisaacgymenvs package metadata
if omniisaacgymenvs_path == "":
if not hasattr(omniisaacgymenvs, "__path__"):
raise RuntimeError("omniisaacgymenvs package is not installed")
omniisaacgymenvs_path = list(omniisaacgymenvs.__path__)[0]
config_path = os.path.join(omniisaacgymenvs_path, "cfg")
# set omegaconf resolvers
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b)
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg)
# get hydra config without use @hydra.main
config_file = "config"
args = get_args_parser().parse_args()
search_path = create_automatic_config_search_path(config_file, None, config_path)
hydra_object = Hydra.create_main_hydra2(task_name='load_omniisaacgymenv', config_search_path=search_path)
config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN)
del config.hydra
cfg = _omegaconf_to_dict(config)
cfg["train"] = {}
# print config
if show_cfg:
print(f"\nOmniverse Isaac Gym environment ({config.task.name})")
_print_cfg(cfg)
# internal classes
class _OmniIsaacGymVecEnv(VecEnvBase):
def step(self, actions):
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
self._task.pre_physics_step(actions)
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
observations, rewards, dones, info = self._task.post_physics_step()
return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \
rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy()
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
class _OmniIsaacGymTrainerMT(TrainerMT):
def run(self):
pass
def stop(self):
pass
class _OmniIsaacGymVecEnvMT(VecEnvMT):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.action_queue = queue.Queue(1)
self.data_queue = queue.Queue(1)
def run(self, trainer=None):
super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer)
def _parse_data(self, data):
self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._rewards = data["rew"].to(self._task.rl_device).clone()
self._dones = data["reset"].to(self._task.rl_device).clone()
self._info = data["extras"].copy()
def step(self, actions):
if self._stop:
raise TaskStopException()
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone()
self.send_actions(actions)
data = self.get_data()
return {"obs": self._observations}, self._rewards, self._dones, self._info
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
def close(self):
# end stop signal to main thread
self.send_actions(None)
self.stop = True
# load environment
sys.path.append(omniisaacgymenvs_path)
from utils.task_util import initialize_task # type: ignore
try:
if config.multi_gpu:
rank = int(os.getenv("LOCAL_RANK", "0"))
config.device_id = rank
config.rl_device = f"cuda:{rank}"
except omegaconf.errors.ConfigAttributeError:
logger.warning("Using an older version of OmniIsaacGymEnvs (2022.2.0 or earlier)")
enable_viewport = "enable_cameras" in config.task.sim and config.task.sim.enable_cameras
if multi_threaded:
try:
env = _OmniIsaacGymVecEnvMT(headless=config.headless,
sim_device=config.device_id,
enable_livestream=config.enable_livestream,
enable_viewport=enable_viewport)
except (TypeError, omegaconf.errors.ConfigAttributeError):
logger.warning("Using an older version of Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)")
env = _OmniIsaacGymVecEnvMT(headless=config.headless) # Isaac Sim 2022.2.0 and earlier
task = initialize_task(cfg, env, init_sim=False)
env.initialize(env.action_queue, env.data_queue, timeout=timeout)
else:
try:
env = _OmniIsaacGymVecEnv(headless=config.headless,
sim_device=config.device_id,
enable_livestream=config.enable_livestream,
enable_viewport=enable_viewport)
except (TypeError, omegaconf.errors.ConfigAttributeError):
logger.warning("Using an older version of Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)")
env = _OmniIsaacGymVecEnv(headless=config.headless) # Isaac Sim 2022.2.0 and earlier
task = initialize_task(cfg, env, init_sim=True)
return env
| 12,134 | Python | 42.808664 | 133 | 0.619829 |
Toni-SM/skrl/skrl/envs/loaders/jax/bidexhands_envs.py | # since Bi-DexHands environments are implemented on top of PyTorch, the loader is the same
from skrl.envs.loaders.torch import load_bidexhands_env
| 148 | Python | 36.249991 | 90 | 0.810811 |
Toni-SM/skrl/skrl/envs/loaders/jax/__init__.py | from skrl.envs.loaders.jax.bidexhands_envs import load_bidexhands_env
from skrl.envs.loaders.jax.isaac_orbit_envs import load_isaac_orbit_env
from skrl.envs.loaders.jax.isaacgym_envs import (
load_isaacgym_env_preview2,
load_isaacgym_env_preview3,
load_isaacgym_env_preview4
)
from skrl.envs.loaders.jax.omniverse_isaacgym_envs import load_omniverse_isaacgym_env
| 375 | Python | 40.777773 | 85 | 0.8 |
Toni-SM/skrl/skrl/envs/loaders/jax/isaacgym_envs.py | # since Isaac Gym (preview) environments are implemented on top of PyTorch, the loaders are the same
from skrl.envs.loaders.torch import ( # isort:skip
load_isaacgym_env_preview2,
load_isaacgym_env_preview3,
load_isaacgym_env_preview4,
)
| 252 | Python | 30.624996 | 100 | 0.746032 |
Toni-SM/skrl/skrl/envs/loaders/jax/isaac_orbit_envs.py | # since Isaac Orbit environments are implemented on top of PyTorch, the loader is the same
from skrl.envs.loaders.torch import load_isaac_orbit_env
| 149 | Python | 36.499991 | 90 | 0.805369 |
Toni-SM/skrl/skrl/envs/loaders/jax/omniverse_isaacgym_envs.py | # since Omniverse Isaac Gym environments are implemented on top of PyTorch, the loader is the same
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
| 164 | Python | 40.24999 | 98 | 0.817073 |
Toni-SM/skrl/skrl/envs/wrappers/torch/gym_envs.py | from typing import Any, Optional, Tuple
import gym
from packaging import version
import numpy as np
import torch
from skrl import logger
from skrl.envs.wrappers.torch.base import Wrapper
class GymWrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""OpenAI Gym environment wrapper
:param env: The environment to wrap
:type env: Any supported OpenAI Gym environment
"""
super().__init__(env)
self._vectorized = False
try:
if isinstance(env, gym.vector.SyncVectorEnv) or isinstance(env, gym.vector.AsyncVectorEnv):
self._vectorized = True
self._reset_once = True
self._obs_tensor = None
self._info_dict = None
except Exception as e:
logger.warning(f"Failed to check for a vectorized environment: {e}")
self._deprecated_api = version.parse(gym.__version__) < version.parse("0.25.0")
if self._deprecated_api:
logger.warning(f"Using a deprecated version of OpenAI Gym's API: {gym.__version__}")
@property
def state_space(self) -> gym.Space:
"""State space
An alias for the ``observation_space`` property
"""
if self._vectorized:
return self._env.single_observation_space
return self._env.observation_space
@property
def observation_space(self) -> gym.Space:
"""Observation space
"""
if self._vectorized:
return self._env.single_observation_space
return self._env.observation_space
@property
def action_space(self) -> gym.Space:
"""Action space
"""
if self._vectorized:
return self._env.single_action_space
return self._env.action_space
def _observation_to_tensor(self, observation: Any, space: Optional[gym.Space] = None) -> torch.Tensor:
"""Convert the OpenAI Gym observation to a flat tensor
:param observation: The OpenAI Gym observation to convert to a tensor
:type observation: Any supported OpenAI Gym observation space
:raises: ValueError if the observation space type is not supported
:return: The observation as a flat tensor
:rtype: torch.Tensor
"""
observation_space = self._env.observation_space if self._vectorized else self.observation_space
space = space if space is not None else observation_space
if self._vectorized and isinstance(space, gym.spaces.MultiDiscrete):
return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1)
elif isinstance(observation, int):
return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1)
elif isinstance(observation, np.ndarray):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gym.spaces.Discrete):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gym.spaces.Box):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gym.spaces.Dict):
tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \
for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1)
return tmp
else:
raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue")
def _tensor_to_action(self, actions: torch.Tensor) -> Any:
"""Convert the action to the OpenAI Gym expected format
:param actions: The actions to perform
:type actions: torch.Tensor
:raise ValueError: If the action space type is not supported
:return: The action in the OpenAI Gym format
:rtype: Any supported OpenAI Gym action space
"""
space = self._env.action_space if self._vectorized else self.action_space
if self._vectorized:
if isinstance(space, gym.spaces.MultiDiscrete):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
elif isinstance(space, gym.spaces.Tuple):
if isinstance(space[0], gym.spaces.Box):
return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(space.shape)
elif isinstance(space[0], gym.spaces.Discrete):
return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(-1)
elif isinstance(space, gym.spaces.Discrete):
return actions.item()
elif isinstance(space, gym.spaces.MultiDiscrete):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
elif isinstance(space, gym.spaces.Box):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
raise ValueError(f"Action space type {type(space)} not supported. Please report this issue")
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
if self._deprecated_api:
observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions))
# truncated: https://gymnasium.farama.org/tutorials/handling_time_limits
if type(info) is list:
truncated = np.array([d.get("TimeLimit.truncated", False) for d in info], dtype=terminated.dtype)
terminated *= np.logical_not(truncated)
else:
truncated = info.get("TimeLimit.truncated", False)
if truncated:
terminated = False
else:
observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions))
# convert response to torch
observation = self._observation_to_tensor(observation)
reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1)
truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1)
# save observation and info for vectorized envs
if self._vectorized:
self._obs_tensor = observation
self._info_dict = info
return observation, reward, terminated, truncated, info
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:return: Observation, info
:rtype: torch.Tensor and any other info
"""
# handle vectorized envs
if self._vectorized:
if not self._reset_once:
return self._obs_tensor, self._info_dict
self._reset_once = False
# reset the env/envs
if self._deprecated_api:
observation = self._env.reset()
info = {}
else:
observation, info = self._env.reset()
return self._observation_to_tensor(observation), info
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
self._env.render(*args, **kwargs)
def close(self) -> None:
"""Close the environment
"""
self._env.close()
| 7,739 | Python | 40.612903 | 113 | 0.625275 |
Toni-SM/skrl/skrl/envs/wrappers/torch/bidexhands_envs.py | from typing import Any, Mapping, Sequence, Tuple
import gym
import torch
from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper
class BiDexHandsWrapper(MultiAgentEnvWrapper):
def __init__(self, env: Any) -> None:
"""Bi-DexHands wrapper
:param env: The environment to wrap
:type env: Any supported Bi-DexHands environment
"""
super().__init__(env)
self._reset_once = True
self._obs_buf = None
self._shared_obs_buf = None
self.possible_agents = [f"agent_{i}" for i in range(self.num_agents)]
@property
def agents(self) -> Sequence[str]:
"""Names of all current agents
These may be changed as an environment progresses (i.e. agents can be added or removed)
"""
return self.possible_agents
@property
def observation_spaces(self) -> Mapping[str, gym.Space]:
"""Observation spaces
"""
return {uid: space for uid, space in zip(self.possible_agents, self._env.observation_space)}
@property
def action_spaces(self) -> Mapping[str, gym.Space]:
"""Action spaces
"""
return {uid: space for uid, space in zip(self.possible_agents, self._env.action_space)}
@property
def shared_observation_spaces(self) -> Mapping[str, gym.Space]:
"""Shared observation spaces
"""
return {uid: space for uid, space in zip(self.possible_agents, self._env.share_observation_space)}
def step(self, actions: Mapping[str, torch.Tensor]) -> \
Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor],
Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: dictionary of torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of dictionaries torch.Tensor and any other info
"""
actions = [actions[uid] for uid in self.possible_agents]
obs_buf, shared_obs_buf, reward_buf, terminated_buf, info, _ = self._env.step(actions)
self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
reward = {uid: reward_buf[:,i].view(-1, 1) for i, uid in enumerate(self.possible_agents)}
terminated = {uid: terminated_buf[:,i].view(-1, 1) for i, uid in enumerate(self.possible_agents)}
truncated = {uid: torch.zeros_like(value) for uid, value in terminated.items()}
info = {"shared_states": self._shared_obs_buf}
return self._obs_buf, reward, terminated, truncated, info
def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]:
"""Reset the environment
:return: Observation, info
:rtype: tuple of dictionaries of torch.Tensor and any other info
"""
if self._reset_once:
obs_buf, shared_obs_buf, _ = self._env.reset()
self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
self._reset_once = False
return self._obs_buf, {"shared_states": self._shared_obs_buf}
| 3,394 | Python | 38.476744 | 107 | 0.629641 |
Toni-SM/skrl/skrl/envs/wrappers/torch/robosuite_envs.py | from typing import Any, Optional, Tuple
import collections
import gym
import numpy as np
import torch
from skrl.envs.wrappers.torch.base import Wrapper
class RobosuiteWrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Robosuite environment wrapper
:param env: The environment to wrap
:type env: Any supported robosuite environment
"""
super().__init__(env)
# observation and action spaces
self._observation_space = self._spec_to_space(self._env.observation_spec())
self._action_space = self._spec_to_space(self._env.action_spec)
@property
def state_space(self) -> gym.Space:
"""State space
An alias for the ``observation_space`` property
"""
return self._observation_space
@property
def observation_space(self) -> gym.Space:
"""Observation space
"""
return self._observation_space
@property
def action_space(self) -> gym.Space:
"""Action space
"""
return self._action_space
def _spec_to_space(self, spec: Any) -> gym.Space:
"""Convert the robosuite spec to a Gym space
:param spec: The robosuite spec to convert
:type spec: Any supported robosuite spec
:raises: ValueError if the spec type is not supported
:return: The Gym space
:rtype: gym.Space
"""
if type(spec) is tuple:
return gym.spaces.Box(shape=spec[0].shape,
dtype=np.float32,
low=spec[0],
high=spec[1])
elif isinstance(spec, np.ndarray):
return gym.spaces.Box(shape=spec.shape,
dtype=np.float32,
low=np.full(spec.shape, float("-inf")),
high=np.full(spec.shape, float("inf")))
elif isinstance(spec, collections.OrderedDict):
return gym.spaces.Dict({k: self._spec_to_space(v) for k, v in spec.items()})
else:
raise ValueError(f"Spec type {type(spec)} not supported. Please report this issue")
def _observation_to_tensor(self, observation: Any, spec: Optional[Any] = None) -> torch.Tensor:
"""Convert the observation to a flat tensor
:param observation: The observation to convert to a tensor
:type observation: Any supported observation
:raises: ValueError if the observation spec type is not supported
:return: The observation as a flat tensor
:rtype: torch.Tensor
"""
spec = spec if spec is not None else self._env.observation_spec()
if isinstance(spec, np.ndarray):
return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1)
elif isinstance(spec, collections.OrderedDict):
return torch.cat([self._observation_to_tensor(observation[k], spec[k]) \
for k in sorted(spec.keys())], dim=-1).reshape(self.num_envs, -1)
else:
raise ValueError(f"Observation spec type {type(spec)} not supported. Please report this issue")
def _tensor_to_action(self, actions: torch.Tensor) -> Any:
"""Convert the action to the robosuite expected format
:param actions: The actions to perform
:type actions: torch.Tensor
:raise ValueError: If the action space type is not supported
:return: The action in the robosuite expected format
:rtype: Any supported robosuite action
"""
spec = self._env.action_spec
if type(spec) is tuple:
return np.array(actions.cpu().numpy(), dtype=np.float32).reshape(spec[0].shape)
else:
raise ValueError(f"Action spec type {type(spec)} not supported. Please report this issue")
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions))
truncated = False
info = {}
# convert response to torch
return self._observation_to_tensor(observation), \
torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1), \
torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \
torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \
info
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:return: The state of the environment
:rtype: torch.Tensor
"""
observation = self._env.reset()
return self._observation_to_tensor(observation), {}
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
self._env.render(*args, **kwargs)
def close(self) -> None:
"""Close the environment
"""
self._env.close()
| 5,343 | Python | 35.108108 | 112 | 0.600786 |
Toni-SM/skrl/skrl/envs/wrappers/torch/base.py | from typing import Any, Mapping, Sequence, Tuple
import gym
import torch
class Wrapper(object):
def __init__(self, env: Any) -> None:
"""Base wrapper class for RL environments
:param env: The environment to wrap
:type env: Any supported RL environment
"""
self._env = env
# device (faster than @property)
if hasattr(self._env, "device"):
self.device = torch.device(self._env.device)
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# spaces
try:
self._action_space = self._env.single_action_space
self._observation_space = self._env.single_observation_space
except AttributeError:
self._action_space = self._env.action_space
self._observation_space = self._env.observation_space
self._state_space = self._env.state_space if hasattr(self._env, "state_space") else self._observation_space
def __getattr__(self, key: str) -> Any:
"""Get an attribute from the wrapped environment
:param key: The attribute name
:type key: str
:raises AttributeError: If the attribute does not exist
:return: The attribute value
:rtype: Any
"""
if hasattr(self._env, key):
return getattr(self._env, key)
raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'")
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:raises NotImplementedError: Not implemented
:return: Observation, info
:rtype: torch.Tensor and any other info
"""
raise NotImplementedError
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:raises NotImplementedError: Not implemented
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
raise NotImplementedError
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
pass
@property
def num_envs(self) -> int:
"""Number of environments
If the wrapped environment does not have the ``num_envs`` property, it will be set to 1
"""
return self._env.num_envs if hasattr(self._env, "num_envs") else 1
@property
def num_agents(self) -> int:
"""Number of agents
If the wrapped environment does not have the ``num_agents`` property, it will be set to 1
"""
return self._env.num_agents if hasattr(self._env, "num_agents") else 1
@property
def state_space(self) -> gym.Space:
"""State space
If the wrapped environment does not have the ``state_space`` property,
the value of the ``observation_space`` property will be used
"""
return self._state_space
@property
def observation_space(self) -> gym.Space:
"""Observation space
"""
return self._observation_space
@property
def action_space(self) -> gym.Space:
"""Action space
"""
return self._action_space
class MultiAgentEnvWrapper(object):
def __init__(self, env: Any) -> None:
"""Base wrapper class for multi-agent environments
:param env: The multi-agent environment to wrap
:type env: Any supported multi-agent environment
"""
self._env = env
# device (faster than @property)
if hasattr(self._env, "device"):
self.device = torch.device(self._env.device)
else:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.possible_agents = []
def __getattr__(self, key: str) -> Any:
"""Get an attribute from the wrapped environment
:param key: The attribute name
:type key: str
:raises AttributeError: If the attribute does not exist
:return: The attribute value
:rtype: Any
"""
if hasattr(self._env, key):
return getattr(self._env, key)
raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'")
def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]:
"""Reset the environment
:raises NotImplementedError: Not implemented
:return: Observation, info
:rtype: tuple of dictionaries of torch.Tensor and any other info
"""
raise NotImplementedError
def step(self, actions: Mapping[str, torch.Tensor]) -> \
Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor],
Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: dictionary of torch.Tensor
:raises NotImplementedError: Not implemented
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of dictionaries of torch.Tensor and any other info
"""
raise NotImplementedError
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
pass
@property
def num_envs(self) -> int:
"""Number of environments
If the wrapped environment does not have the ``num_envs`` property, it will be set to 1
"""
return self._env.num_envs if hasattr(self._env, "num_envs") else 1
@property
def num_agents(self) -> int:
"""Number of agents
If the wrapped environment does not have the ``num_agents`` property, it will be set to 1
"""
return self._env.num_agents if hasattr(self._env, "num_agents") else 1
@property
def agents(self) -> Sequence[str]:
"""Names of all current agents
These may be changed as an environment progresses (i.e. agents can be added or removed)
"""
raise NotImplementedError
@property
def state_spaces(self) -> Mapping[str, gym.Space]:
"""State spaces
An alias for the ``observation_spaces`` property
"""
return self.observation_spaces
@property
def observation_spaces(self) -> Mapping[str, gym.Space]:
"""Observation spaces
"""
raise NotImplementedError
@property
def action_spaces(self) -> Mapping[str, gym.Space]:
"""Action spaces
"""
raise NotImplementedError
@property
def shared_state_spaces(self) -> Mapping[str, gym.Space]:
"""Shared state spaces
An alias for the ``shared_observation_spaces`` property
"""
return self.shared_observation_spaces
@property
def shared_observation_spaces(self) -> Mapping[str, gym.Space]:
"""Shared observation spaces
"""
raise NotImplementedError
def state_space(self, agent: str) -> gym.Space:
"""State space
:param agent: Name of the agent
:type agent: str
:return: The state space for the specified agent
:rtype: gym.Space
"""
return self.state_spaces[agent]
def observation_space(self, agent: str) -> gym.Space:
"""Observation space
:param agent: Name of the agent
:type agent: str
:return: The observation space for the specified agent
:rtype: gym.Space
"""
return self.observation_spaces[agent]
def action_space(self, agent: str) -> gym.Space:
"""Action space
:param agent: Name of the agent
:type agent: str
:return: The action space for the specified agent
:rtype: gym.Space
"""
return self.action_spaces[agent]
def shared_state_space(self, agent: str) -> gym.Space:
"""Shared state space
:param agent: Name of the agent
:type agent: str
:return: The shared state space for the specified agent
:rtype: gym.Space
"""
return self.shared_state_spaces[agent]
def shared_observation_space(self, agent: str) -> gym.Space:
"""Shared observation space
:param agent: Name of the agent
:type agent: str
:return: The shared observation space for the specified agent
:rtype: gym.Space
"""
return self.shared_observation_spaces[agent]
| 8,836 | Python | 28.85473 | 117 | 0.601517 |
Toni-SM/skrl/skrl/envs/wrappers/torch/__init__.py | from typing import Any, Union
import gym
import gymnasium
from skrl import logger
from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper, Wrapper
from skrl.envs.wrappers.torch.bidexhands_envs import BiDexHandsWrapper
from skrl.envs.wrappers.torch.deepmind_envs import DeepMindWrapper
from skrl.envs.wrappers.torch.gym_envs import GymWrapper
from skrl.envs.wrappers.torch.gymnasium_envs import GymnasiumWrapper
from skrl.envs.wrappers.torch.isaac_orbit_envs import IsaacOrbitWrapper
from skrl.envs.wrappers.torch.isaacgym_envs import IsaacGymPreview2Wrapper, IsaacGymPreview3Wrapper
from skrl.envs.wrappers.torch.omniverse_isaacgym_envs import OmniverseIsaacGymWrapper
from skrl.envs.wrappers.torch.pettingzoo_envs import PettingZooWrapper
from skrl.envs.wrappers.torch.robosuite_envs import RobosuiteWrapper
__all__ = ["wrap_env", "Wrapper", "MultiAgentEnvWrapper"]
def wrap_env(env: Any, wrapper: str = "auto", verbose: bool = True) -> Union[Wrapper, MultiAgentEnvWrapper]:
"""Wrap an environment to use a common interface
Example::
>>> from skrl.envs.wrappers.torch import wrap_env
>>>
>>> # assuming that there is an environment called "env"
>>> env = wrap_env(env)
:param env: The environment to be wrapped
:type env: gym.Env, gymnasium.Env, dm_env.Environment or VecTask
:param wrapper: The type of wrapper to use (default: ``"auto"``).
If ``"auto"``, the wrapper will be automatically selected based on the environment class.
The supported wrappers are described in the following table:
+--------------------+-------------------------+
|Environment |Wrapper tag |
+====================+=========================+
|OpenAI Gym |``"gym"`` |
+--------------------+-------------------------+
|Gymnasium |``"gymnasium"`` |
+--------------------+-------------------------+
|Petting Zoo |``"pettingzoo"`` |
+--------------------+-------------------------+
|DeepMind |``"dm"`` |
+--------------------+-------------------------+
|Robosuite |``"robosuite"`` |
+--------------------+-------------------------+
|Bi-DexHands |``"bidexhands"`` |
+--------------------+-------------------------+
|Isaac Gym preview 2 |``"isaacgym-preview2"`` |
+--------------------+-------------------------+
|Isaac Gym preview 3 |``"isaacgym-preview3"`` |
+--------------------+-------------------------+
|Isaac Gym preview 4 |``"isaacgym-preview4"`` |
+--------------------+-------------------------+
|Omniverse Isaac Gym |``"omniverse-isaacgym"`` |
+--------------------+-------------------------+
|Isaac Sim (orbit) |``"isaac-orbit"`` |
+--------------------+-------------------------+
:type wrapper: str, optional
:param verbose: Whether to print the wrapper type (default: ``True``)
:type verbose: bool, optional
:raises ValueError: Unknown wrapper type
:return: Wrapped environment
:rtype: Wrapper or MultiAgentEnvWrapper
"""
if verbose:
logger.info("Environment class: {}".format(", ".join([str(base).replace("<class '", "").replace("'>", "") \
for base in env.__class__.__bases__])))
if wrapper == "auto":
base_classes = [str(base) for base in env.__class__.__bases__]
if "<class 'omni.isaac.gym.vec_env.vec_env_base.VecEnvBase'>" in base_classes or \
"<class 'omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT'>" in base_classes:
if verbose:
logger.info("Environment wrapper: Omniverse Isaac Gym")
return OmniverseIsaacGymWrapper(env)
elif isinstance(env, gym.core.Env) or isinstance(env, gym.core.Wrapper):
# isaac-orbit
if hasattr(env, "sim") and hasattr(env, "env_ns"):
if verbose:
logger.info("Environment wrapper: Isaac Orbit")
return IsaacOrbitWrapper(env)
# gym
if verbose:
logger.info("Environment wrapper: Gym")
return GymWrapper(env)
elif isinstance(env, gymnasium.core.Env) or isinstance(env, gymnasium.core.Wrapper):
if verbose:
logger.info("Environment wrapper: Gymnasium")
return GymnasiumWrapper(env)
elif "<class 'pettingzoo.utils.env" in base_classes[0] or "<class 'pettingzoo.utils.wrappers" in base_classes[0]:
if verbose:
logger.info("Environment wrapper: Petting Zoo")
return PettingZooWrapper(env)
elif "<class 'dm_env._environment.Environment'>" in base_classes:
if verbose:
logger.info("Environment wrapper: DeepMind")
return DeepMindWrapper(env)
elif "<class 'robosuite.environments." in base_classes[0]:
if verbose:
logger.info("Environment wrapper: Robosuite")
return RobosuiteWrapper(env)
elif "<class 'rlgpu.tasks.base.vec_task.VecTask'>" in base_classes:
if verbose:
logger.info("Environment wrapper: Isaac Gym (preview 2)")
return IsaacGymPreview2Wrapper(env)
if verbose:
logger.info("Environment wrapper: Isaac Gym (preview 3/4)")
return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3
elif wrapper == "gym":
if verbose:
logger.info("Environment wrapper: Gym")
return GymWrapper(env)
elif wrapper == "gymnasium":
if verbose:
logger.info("Environment wrapper: gymnasium")
return GymnasiumWrapper(env)
elif wrapper == "pettingzoo":
if verbose:
logger.info("Environment wrapper: Petting Zoo")
return PettingZooWrapper(env)
elif wrapper == "dm":
if verbose:
logger.info("Environment wrapper: DeepMind")
return DeepMindWrapper(env)
elif wrapper == "robosuite":
if verbose:
logger.info("Environment wrapper: Robosuite")
return RobosuiteWrapper(env)
elif wrapper == "bidexhands":
if verbose:
logger.info("Environment wrapper: Bi-DexHands")
return BiDexHandsWrapper(env)
elif wrapper == "isaacgym-preview2":
if verbose:
logger.info("Environment wrapper: Isaac Gym (preview 2)")
return IsaacGymPreview2Wrapper(env)
elif wrapper == "isaacgym-preview3":
if verbose:
logger.info("Environment wrapper: Isaac Gym (preview 3)")
return IsaacGymPreview3Wrapper(env)
elif wrapper == "isaacgym-preview4":
if verbose:
logger.info("Environment wrapper: Isaac Gym (preview 4)")
return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3
elif wrapper == "omniverse-isaacgym":
if verbose:
logger.info("Environment wrapper: Omniverse Isaac Gym")
return OmniverseIsaacGymWrapper(env)
elif wrapper == "isaac-orbit":
if verbose:
logger.info("Environment wrapper: Isaac Orbit")
return IsaacOrbitWrapper(env)
else:
raise ValueError(f"Unknown wrapper type: {wrapper}")
| 7,723 | Python | 46.975155 | 121 | 0.537356 |
Toni-SM/skrl/skrl/envs/wrappers/torch/isaacgym_envs.py | from typing import Any, Tuple
import torch
from skrl.envs.wrappers.torch.base import Wrapper
class IsaacGymPreview2Wrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Isaac Gym environment (preview 2) wrapper
:param env: The environment to wrap
:type env: Any supported Isaac Gym environment (preview 2) environment
"""
super().__init__(env)
self._reset_once = True
self._obs_buf = None
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
self._obs_buf, reward, terminated, info = self._env.step(actions)
truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated)
return self._obs_buf, reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:return: Observation, info
:rtype: torch.Tensor and any other info
"""
if self._reset_once:
self._obs_buf = self._env.reset()
self._reset_once = False
return self._obs_buf, {}
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
pass
class IsaacGymPreview3Wrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Isaac Gym environment (preview 3) wrapper
:param env: The environment to wrap
:type env: Any supported Isaac Gym environment (preview 3) environment
"""
super().__init__(env)
self._reset_once = True
self._obs_dict = None
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
self._obs_dict, reward, terminated, info = self._env.step(actions)
truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated)
return self._obs_dict["obs"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:return: Observation, info
:rtype: torch.Tensor and any other info
"""
if self._reset_once:
self._obs_dict = self._env.reset()
self._reset_once = False
return self._obs_dict["obs"], {}
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
pass
| 3,182 | Python | 30.83 | 112 | 0.595223 |
Toni-SM/skrl/skrl/envs/wrappers/torch/gymnasium_envs.py | from typing import Any, Optional, Tuple
import gymnasium
import numpy as np
import torch
from skrl import logger
from skrl.envs.wrappers.torch.base import Wrapper
class GymnasiumWrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Gymnasium environment wrapper
:param env: The environment to wrap
:type env: Any supported Gymnasium environment
"""
super().__init__(env)
self._vectorized = False
try:
if isinstance(env, gymnasium.vector.SyncVectorEnv) or isinstance(env, gymnasium.vector.AsyncVectorEnv):
self._vectorized = True
self._reset_once = True
self._obs_tensor = None
self._info_dict = None
except Exception as e:
logger.warning(f"Failed to check for a vectorized environment: {e}")
@property
def state_space(self) -> gymnasium.Space:
"""State space
An alias for the ``observation_space`` property
"""
if self._vectorized:
return self._env.single_observation_space
return self._env.observation_space
@property
def observation_space(self) -> gymnasium.Space:
"""Observation space
"""
if self._vectorized:
return self._env.single_observation_space
return self._env.observation_space
@property
def action_space(self) -> gymnasium.Space:
"""Action space
"""
if self._vectorized:
return self._env.single_action_space
return self._env.action_space
def _observation_to_tensor(self, observation: Any, space: Optional[gymnasium.Space] = None) -> torch.Tensor:
"""Convert the Gymnasium observation to a flat tensor
:param observation: The Gymnasium observation to convert to a tensor
:type observation: Any supported Gymnasium observation space
:raises: ValueError if the observation space type is not supported
:return: The observation as a flat tensor
:rtype: torch.Tensor
"""
observation_space = self._env.observation_space if self._vectorized else self.observation_space
space = space if space is not None else observation_space
if self._vectorized and isinstance(space, gymnasium.spaces.MultiDiscrete):
return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1)
elif isinstance(observation, int):
return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1)
elif isinstance(observation, np.ndarray):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gymnasium.spaces.Discrete):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gymnasium.spaces.Box):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gymnasium.spaces.Dict):
tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \
for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1)
return tmp
else:
raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue")
def _tensor_to_action(self, actions: torch.Tensor) -> Any:
"""Convert the action to the Gymnasium expected format
:param actions: The actions to perform
:type actions: torch.Tensor
:raise ValueError: If the action space type is not supported
:return: The action in the Gymnasium format
:rtype: Any supported Gymnasium action space
"""
space = self._env.action_space if self._vectorized else self.action_space
if self._vectorized:
if isinstance(space, gymnasium.spaces.MultiDiscrete):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
elif isinstance(space, gymnasium.spaces.Tuple):
if isinstance(space[0], gymnasium.spaces.Box):
return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(space.shape)
elif isinstance(space[0], gymnasium.spaces.Discrete):
return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(-1)
if isinstance(space, gymnasium.spaces.Discrete):
return actions.item()
elif isinstance(space, gymnasium.spaces.MultiDiscrete):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
elif isinstance(space, gymnasium.spaces.Box):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
raise ValueError(f"Action space type {type(space)} not supported. Please report this issue")
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions))
# convert response to torch
observation = self._observation_to_tensor(observation)
reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1)
truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1)
# save observation and info for vectorized envs
if self._vectorized:
self._obs_tensor = observation
self._info_dict = info
return observation, reward, terminated, truncated, info
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:return: Observation, info
:rtype: torch.Tensor and any other info
"""
# handle vectorized envs
if self._vectorized:
if not self._reset_once:
return self._obs_tensor, self._info_dict
self._reset_once = False
# reset the env/envs
observation, info = self._env.reset()
return self._observation_to_tensor(observation), info
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
self._env.render(*args, **kwargs)
def close(self) -> None:
"""Close the environment
"""
self._env.close()
| 6,882 | Python | 40.463855 | 115 | 0.639494 |
Toni-SM/skrl/skrl/envs/wrappers/torch/pettingzoo_envs.py | from typing import Any, Mapping, Sequence, Tuple
import collections
import gymnasium
import numpy as np
import torch
from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper
class PettingZooWrapper(MultiAgentEnvWrapper):
def __init__(self, env: Any) -> None:
"""PettingZoo (parallel) environment wrapper
:param env: The environment to wrap
:type env: Any supported PettingZoo (parallel) environment
"""
super().__init__(env)
self.possible_agents = self._env.possible_agents
self._shared_observation_space = self._compute_shared_observation_space(self._env.observation_spaces)
def _compute_shared_observation_space(self, observation_spaces):
space = next(iter(observation_spaces.values()))
shape = (len(self.possible_agents),) + space.shape
return gymnasium.spaces.Box(low=np.stack([space.low for _ in self.possible_agents], axis=0),
high=np.stack([space.high for _ in self.possible_agents], axis=0),
dtype=space.dtype,
shape=shape)
@property
def num_agents(self) -> int:
"""Number of agents
"""
return len(self.possible_agents)
@property
def agents(self) -> Sequence[str]:
"""Names of all current agents
These may be changed as an environment progresses (i.e. agents can be added or removed)
"""
return self._env.agents
@property
def observation_spaces(self) -> Mapping[str, gymnasium.Space]:
"""Observation spaces
"""
return {uid: self._env.observation_space(uid) for uid in self.possible_agents}
@property
def action_spaces(self) -> Mapping[str, gymnasium.Space]:
"""Action spaces
"""
return {uid: self._env.action_space(uid) for uid in self.possible_agents}
@property
def shared_observation_spaces(self) -> Mapping[str, gymnasium.Space]:
"""Shared observation spaces
"""
return {uid: self._shared_observation_space for uid in self.possible_agents}
def _observation_to_tensor(self, observation: Any, space: gymnasium.Space) -> torch.Tensor:
"""Convert the Gymnasium observation to a flat tensor
:param observation: The Gymnasium observation to convert to a tensor
:type observation: Any supported Gymnasium observation space
:raises: ValueError if the observation space type is not supported
:return: The observation as a flat tensor
:rtype: torch.Tensor
"""
if isinstance(observation, int):
return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1)
elif isinstance(observation, np.ndarray):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gymnasium.spaces.Discrete):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gymnasium.spaces.Box):
return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1)
elif isinstance(space, gymnasium.spaces.Dict):
tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \
for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1)
return tmp
else:
raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue")
def _tensor_to_action(self, actions: torch.Tensor, space: gymnasium.Space) -> Any:
"""Convert the action to the Gymnasium expected format
:param actions: The actions to perform
:type actions: torch.Tensor
:raise ValueError: If the action space type is not supported
:return: The action in the Gymnasium format
:rtype: Any supported Gymnasium action space
"""
if isinstance(space, gymnasium.spaces.Discrete):
return actions.item()
elif isinstance(space, gymnasium.spaces.Box):
return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape)
raise ValueError(f"Action space type {type(space)} not supported. Please report this issue")
def step(self, actions: Mapping[str, torch.Tensor]) -> \
Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor],
Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: dictionary of torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of dictionaries torch.Tensor and any other info
"""
actions = {uid: self._tensor_to_action(action, self._env.action_space(uid)) for uid, action in actions.items()}
observations, rewards, terminated, truncated, infos = self._env.step(actions)
# build shared observation
shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0)
shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space)
infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents}
# convert response to torch
observations = {uid: self._observation_to_tensor(value, self._env.observation_space(uid)) for uid, value in observations.items()}
rewards = {uid: torch.tensor(value, device=self.device, dtype=torch.float32).view(self.num_envs, -1) for uid, value in rewards.items()}
terminated = {uid: torch.tensor(value, device=self.device, dtype=torch.bool).view(self.num_envs, -1) for uid, value in terminated.items()}
truncated = {uid: torch.tensor(value, device=self.device, dtype=torch.bool).view(self.num_envs, -1) for uid, value in truncated.items()}
return observations, rewards, terminated, truncated, infos
def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]:
"""Reset the environment
:return: Observation, info
:rtype: tuple of dictionaries of torch.Tensor and any other info
"""
outputs = self._env.reset()
if isinstance(outputs, collections.abc.Mapping):
observations = outputs
infos = {uid: {} for uid in self.possible_agents}
else:
observations, infos = outputs
# build shared observation
shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0)
shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space)
infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents}
# convert response to torch
observations = {uid: self._observation_to_tensor(observation, self._env.observation_space(uid)) for uid, observation in observations.items()}
return observations, infos
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
self._env.render(*args, **kwargs)
def close(self) -> None:
"""Close the environment
"""
self._env.close()
| 7,391 | Python | 44.07317 | 149 | 0.652686 |
Toni-SM/skrl/skrl/envs/wrappers/torch/omniverse_isaacgym_envs.py | from typing import Any, Optional, Tuple
import torch
from skrl.envs.wrappers.torch.base import Wrapper
class OmniverseIsaacGymWrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Omniverse Isaac Gym environment wrapper
:param env: The environment to wrap
:type env: Any supported Omniverse Isaac Gym environment
"""
super().__init__(env)
self._reset_once = True
self._obs_dict = None
def run(self, trainer: Optional["omni.isaac.gym.vec_env.vec_env_mt.TrainerMT"] = None) -> None:
"""Run the simulation in the main thread
This method is valid only for the Omniverse Isaac Gym multi-threaded environments
:param trainer: Trainer which should implement a ``run`` method that initiates the RL loop on a new thread
:type trainer: omni.isaac.gym.vec_env.vec_env_mt.TrainerMT, optional
"""
self._env.run(trainer)
def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: torch.Tensor
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
self._obs_dict, reward, terminated, info = self._env.step(actions)
truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated)
return self._obs_dict["obs"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info
def reset(self) -> Tuple[torch.Tensor, Any]:
"""Reset the environment
:return: Observation, info
:rtype: torch.Tensor and any other info
"""
if self._reset_once:
self._obs_dict = self._env.reset()
self._reset_once = False
return self._obs_dict["obs"], {}
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
self._env.close()
| 2,133 | Python | 32.873015 | 114 | 0.619316 |
Toni-SM/skrl/skrl/envs/wrappers/jax/gym_envs.py | from typing import Any, Optional, Tuple, Union
import gym
from packaging import version
import jax
import numpy as np
from skrl import logger
from skrl.envs.wrappers.jax.base import Wrapper
class GymWrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""OpenAI Gym environment wrapper
:param env: The environment to wrap
:type env: Any supported OpenAI Gym environment
"""
super().__init__(env)
self._vectorized = False
try:
if isinstance(env, gym.vector.SyncVectorEnv) or isinstance(env, gym.vector.AsyncVectorEnv):
self._vectorized = True
self._reset_once = True
self._obs_tensor = None
self._info_dict = None
except Exception as e:
logger.warning(f"Failed to check for a vectorized environment: {e}")
self._deprecated_api = version.parse(gym.__version__) < version.parse("0.25.0")
if self._deprecated_api:
logger.warning(f"Using a deprecated version of OpenAI Gym's API: {gym.__version__}")
@property
def state_space(self) -> gym.Space:
"""State space
An alias for the ``observation_space`` property
"""
if self._vectorized:
return self._env.single_observation_space
return self._env.observation_space
@property
def observation_space(self) -> gym.Space:
"""Observation space
"""
if self._vectorized:
return self._env.single_observation_space
return self._env.observation_space
@property
def action_space(self) -> gym.Space:
"""Action space
"""
if self._vectorized:
return self._env.single_action_space
return self._env.action_space
def _observation_to_tensor(self, observation: Any, space: Optional[gym.Space] = None) -> np.ndarray:
"""Convert the OpenAI Gym observation to a flat tensor
:param observation: The OpenAI Gym observation to convert to a tensor
:type observation: Any supported OpenAI Gym observation space
:raises: ValueError if the observation space type is not supported
:return: The observation as a flat tensor
:rtype: np.ndarray
"""
observation_space = self._env.observation_space if self._vectorized else self.observation_space
space = space if space is not None else observation_space
if self._vectorized and isinstance(space, gym.spaces.MultiDiscrete):
return observation.reshape(self.num_envs, -1).astype(np.int32)
elif isinstance(observation, int):
return np.array(observation, dtype=np.int32).reshape(self.num_envs, -1)
elif isinstance(observation, np.ndarray):
return observation.reshape(self.num_envs, -1).astype(np.float32)
elif isinstance(space, gym.spaces.Discrete):
return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1)
elif isinstance(space, gym.spaces.Box):
return observation.reshape(self.num_envs, -1).astype(np.float32)
elif isinstance(space, gym.spaces.Dict):
tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \
for k in sorted(space.keys())], axis=-1).reshape(self.num_envs, -1)
return tmp
else:
raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue")
def _tensor_to_action(self, actions: np.ndarray) -> Any:
"""Convert the action to the OpenAI Gym expected format
:param actions: The actions to perform
:type actions: np.ndarray
:raise ValueError: If the action space type is not supported
:return: The action in the OpenAI Gym format
:rtype: Any supported OpenAI Gym action space
"""
space = self._env.action_space if self._vectorized else self.action_space
if self._vectorized:
if isinstance(space, gym.spaces.MultiDiscrete):
return actions.astype(space.dtype).reshape(space.shape)
elif isinstance(space, gym.spaces.Tuple):
if isinstance(space[0], gym.spaces.Box):
return actions.astype(space[0].dtype).reshape(space.shape)
elif isinstance(space[0], gym.spaces.Discrete):
return actions.astype(space[0].dtype).reshape(-1)
elif isinstance(space, gym.spaces.Discrete):
return actions.item()
elif isinstance(space, gym.spaces.MultiDiscrete):
return actions.astype(space.dtype).reshape(space.shape)
elif isinstance(space, gym.spaces.Box):
return actions.astype(space.dtype).reshape(space.shape)
raise ValueError(f"Action space type {type(space)} not supported. Please report this issue")
def step(self, actions: Union[np.ndarray, jax.Array]) -> \
Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array],
Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: np.ndarray or jax.Array
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of np.ndarray or jax.Array and any other info
"""
if self._jax:
actions = jax.device_get(actions)
if self._deprecated_api:
observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions))
# truncated: https://gymnasium.farama.org/tutorials/handling_time_limits
if type(info) is list:
truncated = np.array([d.get("TimeLimit.truncated", False) for d in info], dtype=terminated.dtype)
terminated *= np.logical_not(truncated)
else:
truncated = info.get("TimeLimit.truncated", False)
if truncated:
terminated = False
else:
observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions))
# convert response to numpy or jax
observation = self._observation_to_tensor(observation)
reward = np.array(reward, dtype=np.float32).reshape(self.num_envs, -1)
terminated = np.array(terminated, dtype=np.int8).reshape(self.num_envs, -1)
truncated = np.array(truncated, dtype=np.int8).reshape(self.num_envs, -1)
# save observation and info for vectorized envs
if self._vectorized:
self._obs_tensor = observation
self._info_dict = info
return observation, reward, terminated, truncated, info
def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]:
"""Reset the environment
:return: Observation, info
:rtype: np.ndarray or jax.Array and any other info
"""
# handle vectorized envs
if self._vectorized:
if not self._reset_once:
return self._obs_tensor, self._info_dict
self._reset_once = False
# reset the env/envs
if self._deprecated_api:
observation = self._env.reset()
info = {}
else:
observation, info = self._env.reset()
return self._observation_to_tensor(observation), info
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
self._env.render(*args, **kwargs)
def close(self) -> None:
"""Close the environment
"""
self._env.close()
| 7,637 | Python | 39.2 | 113 | 0.618437 |
Toni-SM/skrl/skrl/envs/wrappers/jax/bidexhands_envs.py | from typing import Any, Mapping, Sequence, Tuple, Union
import gym
import jax
import jax.dlpack
import numpy as np
try:
import torch
import torch.utils.dlpack
except:
pass # TODO: show warning message
from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper
def _jax2torch(array, device, from_jax=True):
return torch.utils.dlpack.from_dlpack(jax.dlpack.to_dlpack(array)) if from_jax else torch.tensor(array, device=device)
def _torch2jax(tensor, to_jax=True):
return jax.dlpack.from_dlpack(torch.utils.dlpack.to_dlpack(tensor.contiguous())) if to_jax else tensor.cpu().numpy()
class BiDexHandsWrapper(MultiAgentEnvWrapper):
def __init__(self, env: Any) -> None:
"""Bi-DexHands wrapper
:param env: The environment to wrap
:type env: Any supported Bi-DexHands environment
"""
super().__init__(env)
self._reset_once = True
self._obs_buf = None
self._shared_obs_buf = None
self.possible_agents = [f"agent_{i}" for i in range(self.num_agents)]
@property
def agents(self) -> Sequence[str]:
"""Names of all current agents
These may be changed as an environment progresses (i.e. agents can be added or removed)
"""
return self.possible_agents
@property
def observation_spaces(self) -> Mapping[str, gym.Space]:
"""Observation spaces
"""
return {uid: space for uid, space in zip(self.possible_agents, self._env.observation_space)}
@property
def action_spaces(self) -> Mapping[str, gym.Space]:
"""Action spaces
"""
return {uid: space for uid, space in zip(self.possible_agents, self._env.action_space)}
@property
def shared_observation_spaces(self) -> Mapping[str, gym.Space]:
"""Shared observation spaces
"""
return {uid: space for uid, space in zip(self.possible_agents, self._env.share_observation_space)}
def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \
Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]],
Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]],
Mapping[str, Any]]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: dict of nd.ndarray or jax.Array
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of dict of nd.ndarray or jax.Array and any other info
"""
actions = [_jax2torch(actions[uid], self.device, self._jax) for uid in self.possible_agents]
with torch.no_grad():
obs_buf, shared_obs_buf, reward_buf, terminated_buf, info, _ = self._env.step(actions)
obs_buf = _torch2jax(obs_buf, self._jax)
shared_obs_buf = _torch2jax(shared_obs_buf, self._jax)
reward_buf = _torch2jax(reward_buf, self._jax)
terminated_buf = _torch2jax(terminated_buf.to(dtype=torch.int8), self._jax)
self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
reward = {uid: reward_buf[:,i].reshape(-1, 1) for i, uid in enumerate(self.possible_agents)}
terminated = {uid: terminated_buf[:,i].reshape(-1, 1) for i, uid in enumerate(self.possible_agents)}
truncated = terminated
info = {"shared_states": self._shared_obs_buf}
return self._obs_buf, reward, terminated, truncated, info
def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]:
"""Reset the environment
:return: Observation, info
:rtype: tuple of dict of np.ndarray of jax.Array and any other info
"""
if self._reset_once:
obs_buf, shared_obs_buf, _ = self._env.reset()
obs_buf = _torch2jax(obs_buf, self._jax)
shared_obs_buf = _torch2jax(shared_obs_buf, self._jax)
self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)}
self._reset_once = False
return self._obs_buf, {"shared_states": self._shared_obs_buf}
| 4,383 | Python | 37.45614 | 122 | 0.635866 |
Toni-SM/skrl/skrl/envs/wrappers/jax/isaacgym_envs.py | from typing import Any, Tuple, Union
import jax
import jax.dlpack as jax_dlpack
import numpy as np
try:
import torch
import torch.utils.dlpack as torch_dlpack
except:
pass # TODO: show warning message
from skrl import logger
from skrl.envs.wrappers.jax.base import Wrapper
# ML frameworks conversion utilities
# jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided.
_CPU = jax.devices()[0].device_kind.lower() == "cpu"
if _CPU:
logger.warning("IsaacGymEnvs runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.")
def _jax2torch(array, device, from_jax=True):
if from_jax:
return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device)
return torch.tensor(array, device=device)
def _torch2jax(tensor, to_jax=True):
if to_jax:
return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous()))
return tensor.cpu().numpy()
class IsaacGymPreview2Wrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Isaac Gym environment (preview 2) wrapper
:param env: The environment to wrap
:type env: Any supported Isaac Gym environment (preview 2) environment
"""
super().__init__(env)
self._reset_once = True
self._obs_buf = None
def step(self, actions: Union[np.ndarray, jax.Array]) -> \
Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array],
Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: np.ndarray or jax.Array
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of np.ndarray or jax.Array and any other info
"""
actions = _jax2torch(actions, self._env.device, self._jax)
with torch.no_grad():
self._obs_buf, reward, terminated, info = self._env.step(actions)
terminated = terminated.to(dtype=torch.int8)
truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated)
return _torch2jax(self._obs_buf, self._jax), \
_torch2jax(reward.view(-1, 1), self._jax), \
_torch2jax(terminated.view(-1, 1), self._jax), \
_torch2jax(truncated.view(-1, 1), self._jax), \
info
def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]:
"""Reset the environment
:return: Observation, info
:rtype: np.ndarray or jax.Array and any other info
"""
if self._reset_once:
self._obs_buf = self._env.reset()
self._reset_once = False
return _torch2jax(self._obs_buf, self._jax), {}
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
pass
class IsaacGymPreview3Wrapper(Wrapper):
def __init__(self, env: Any) -> None:
"""Isaac Gym environment (preview 3) wrapper
:param env: The environment to wrap
:type env: Any supported Isaac Gym environment (preview 3) environment
"""
super().__init__(env)
self._reset_once = True
self._obs_dict = None
def step(self, actions: Union[np.ndarray, jax.Array]) ->\
Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array],
Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]:
"""Perform a step in the environment
:param actions: The actions to perform
:type actions: np.ndarray or jax.Array
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of np.ndarray or jax.Array and any other info
"""
actions = _jax2torch(actions, self._env.device, self._jax)
with torch.no_grad():
self._obs_dict, reward, terminated, info = self._env.step(actions)
terminated = terminated.to(dtype=torch.int8)
truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated)
return _torch2jax(self._obs_dict["obs"], self._jax), \
_torch2jax(reward.view(-1, 1), self._jax), \
_torch2jax(terminated.view(-1, 1), self._jax), \
_torch2jax(truncated.view(-1, 1), self._jax), \
info
def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]:
"""Reset the environment
:return: Observation, info
:rtype: np.ndarray or jax.Array and any other info
"""
if self._reset_once:
self._obs_dict = self._env.reset()
self._reset_once = False
return _torch2jax(self._obs_dict["obs"], self._jax), {}
def render(self, *args, **kwargs) -> None:
"""Render the environment
"""
pass
def close(self) -> None:
"""Close the environment
"""
pass
| 5,142 | Python | 33.059602 | 121 | 0.608129 |
Toni-SM/skrl/skrl/agents/torch/base.py | from typing import Any, Mapping, Optional, Tuple, Union
import collections
import copy
import datetime
import os
import gym
import gymnasium
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from skrl import logger
from skrl.memories.torch import Memory
from skrl.models.torch import Model
class Agent:
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Base class that represent a RL agent
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
self.models = models
self.observation_space = observation_space
self.action_space = action_space
self.cfg = cfg if cfg is not None else {}
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device)
if type(memory) is list:
self.memory = memory[0]
self.secondary_memories = memory[1:]
else:
self.memory = memory
self.secondary_memories = []
# convert the models to their respective device
for model in self.models.values():
if model is not None:
model.to(model.device)
self.tracking_data = collections.defaultdict(list)
self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000)
self._track_rewards = collections.deque(maxlen=100)
self._track_timesteps = collections.deque(maxlen=100)
self._cumulative_rewards = None
self._cumulative_timesteps = None
self.training = True
# checkpoint
self.checkpoint_modules = {}
self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000)
self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False)
self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": False, "modules": {}}
# experiment directory
directory = self.cfg.get("experiment", {}).get("directory", "")
experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "")
if not directory:
directory = os.path.join(os.getcwd(), "runs")
if not experiment_name:
experiment_name = "{}_{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), self.__class__.__name__)
self.experiment_dir = os.path.join(directory, experiment_name)
def __str__(self) -> str:
"""Generate a representation of the agent as string
:return: Representation of the agent as string
:rtype: str
"""
string = f"Agent: {repr(self)}"
for k, v in self.cfg.items():
if type(v) is dict:
string += f"\n |-- {k}"
for k1, v1 in v.items():
string += f"\n | |-- {k1}: {v1}"
else:
string += f"\n |-- {k}: {v}"
return string
def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any:
"""Empty preprocess method
This method is defined because PyTorch multiprocessing can't pickle lambdas
:param _input: Input to preprocess
:type _input: Any
:return: Preprocessed input
:rtype: Any
"""
return _input
def _get_internal_value(self, _module: Any) -> Any:
"""Get internal module/variable state/value
:param _module: Module or variable
:type _module: Any
:return: Module/variable state/value
:rtype: Any
"""
return _module.state_dict() if hasattr(_module, "state_dict") else _module
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
This method should be called before the agent is used.
It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory
:param trainer_cfg: Trainer configuration
:type trainer_cfg: dict, optional
"""
# setup Weights & Biases
if self.cfg.get("experiment", {}).get("wandb", False):
# save experiment config
trainer_cfg = trainer_cfg if trainer_cfg is not None else {}
try:
models_cfg = {k: v.net._modules for (k, v) in self.models.items()}
except AttributeError:
models_cfg = {k: v._modules for (k, v) in self.models.items()}
config={**self.cfg, **trainer_cfg, **models_cfg}
# set default values
wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {}))
wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1])
wandb_kwargs.setdefault("sync_tensorboard", True)
wandb_kwargs.setdefault("config", {})
wandb_kwargs["config"].update(config)
# init Weights & Biases
import wandb
wandb.init(**wandb_kwargs)
# main entry to log data for consumption and visualization by TensorBoard
if self.write_interval > 0:
self.writer = SummaryWriter(log_dir=self.experiment_dir)
if self.checkpoint_interval > 0:
os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True)
def track_data(self, tag: str, value: float) -> None:
"""Track data to TensorBoard
Currently only scalar data are supported
:param tag: Data identifier (e.g. 'Loss / policy loss')
:type tag: str
:param value: Value to track
:type value: float
"""
self.tracking_data[tag].append(value)
def write_tracking_data(self, timestep: int, timesteps: int) -> None:
"""Write tracking data to TensorBoard
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
for k, v in self.tracking_data.items():
if k.endswith("(min)"):
self.writer.add_scalar(k, np.min(v), timestep)
elif k.endswith("(max)"):
self.writer.add_scalar(k, np.max(v), timestep)
else:
self.writer.add_scalar(k, np.mean(v), timestep)
# reset data containers for next iteration
self._track_rewards.clear()
self._track_timesteps.clear()
self.tracking_data.clear()
def write_checkpoint(self, timestep: int, timesteps: int) -> None:
"""Write checkpoint (modules) to disk
The checkpoints are saved in the directory 'checkpoints' in the experiment directory.
The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time.
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"))
# separated modules
if self.checkpoint_store_separately:
for name, module in self.checkpoint_modules.items():
torch.save(self._get_internal_value(module),
os.path.join(self.experiment_dir, "checkpoints", f"{name}_{tag}.pt"))
# whole agent
else:
modules = {}
for name, module in self.checkpoint_modules.items():
modules[name] = self._get_internal_value(module)
torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pt"))
# best modules
if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]:
# separated modules
if self.checkpoint_store_separately:
for name, module in self.checkpoint_modules.items():
torch.save(self.checkpoint_best_modules["modules"][name],
os.path.join(self.experiment_dir, "checkpoints", f"best_{name}.pt"))
# whole agent
else:
modules = {}
for name, module in self.checkpoint_modules.items():
modules[name] = self.checkpoint_best_modules["modules"][name]
torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", "best_agent.pt"))
self.checkpoint_best_modules["saved"] = True
def act(self,
states: torch.Tensor,
timestep: int,
timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
:return: Actions
:rtype: torch.Tensor
"""
raise NotImplementedError
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory (to be implemented by the inheriting classes)
Inheriting classes must call this method to record episode information (rewards, timesteps, etc.).
In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded.
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if self.write_interval > 0:
# compute the cumulative sum of the rewards and timesteps
if self._cumulative_rewards is None:
self._cumulative_rewards = torch.zeros_like(rewards, dtype=torch.float32)
self._cumulative_timesteps = torch.zeros_like(rewards, dtype=torch.int32)
self._cumulative_rewards.add_(rewards)
self._cumulative_timesteps.add_(1)
# check ended episodes
finished_episodes = (terminated + truncated).nonzero(as_tuple=False)
if finished_episodes.numel():
# storage cumulative rewards and timesteps
self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist())
self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist())
# reset the cumulative rewards and timesteps
self._cumulative_rewards[finished_episodes] = 0
self._cumulative_timesteps[finished_episodes] = 0
# record data
self.tracking_data["Reward / Instantaneous reward (max)"].append(torch.max(rewards).item())
self.tracking_data["Reward / Instantaneous reward (min)"].append(torch.min(rewards).item())
self.tracking_data["Reward / Instantaneous reward (mean)"].append(torch.mean(rewards).item())
if len(self._track_rewards):
track_rewards = np.array(self._track_rewards)
track_timesteps = np.array(self._track_timesteps)
self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards))
self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards))
self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards))
self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps))
self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps))
self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps))
def set_mode(self, mode: str) -> None:
"""Set the model mode (training or evaluation)
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
for model in self.models.values():
if model is not None:
model.set_mode(mode)
def set_running_mode(self, mode: str) -> None:
"""Set the current running mode (training or evaluation)
This method sets the value of the ``training`` property (boolean).
This property can be used to know if the agent is running in training or evaluation mode.
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
self.training = mode == "train"
def save(self, path: str) -> None:
"""Save the agent to the specified path
:param path: Path to save the model to
:type path: str
"""
modules = {}
for name, module in self.checkpoint_modules.items():
modules[name] = self._get_internal_value(module)
torch.save(modules, path)
def load(self, path: str) -> None:
"""Load the model from the specified path
The final storage device is determined by the constructor of the model
:param path: Path to load the model from
:type path: str
"""
modules = torch.load(path, map_location=self.device)
if type(modules) is dict:
for name, data in modules.items():
module = self.checkpoint_modules.get(name, None)
if module is not None:
if hasattr(module, "load_state_dict"):
module.load_state_dict(data)
if hasattr(module, "eval"):
module.eval()
else:
raise NotImplementedError
else:
logger.warning(f"Cannot load the {name} module. The agent doesn't have such an instance")
def migrate(self,
path: str,
name_map: Mapping[str, Mapping[str, str]] = {},
auto_mapping: bool = True,
verbose: bool = False) -> bool:
"""Migrate the specified extrernal checkpoint to the current agent
The final storage device is determined by the constructor of the agent.
Only files generated by the *rl_games* library are supported at the moment
For ambiguous models (where 2 or more parameters, for source or current model, have equal shape)
it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully
:param path: Path to the external checkpoint to migrate from
:type path: str
:param name_map: Name map to use for the migration (default: ``{}``).
Keys are the current parameter names and values are the external parameter names
:type name_map: Mapping[str, Mapping[str, str]], optional
:param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``)
:type auto_mapping: bool, optional
:param verbose: Show model names and migration (default: ``False``)
:type verbose: bool, optional
:raises ValueError: If the correct file type cannot be identified from the ``path`` parameter
:return: True if the migration was successful, False otherwise.
Migration is successful if all parameters of the current model are found in the external model
:rtype: bool
Example::
# migrate a rl_games checkpoint with ambiguous state_dict
>>> agent.migrate(path="./runs/Cartpole/nn/Cartpole.pth", verbose=False)
[skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias]
[skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias]
[skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight]
[skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias]
[skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias]
[skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias]
[skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias]
[skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias]
[skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight]
[skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias]
[skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias]
[skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias]
False
>>> name_map = {"policy": {"net.0.bias": "a2c_network.actor_mlp.0.bias",
... "net.2.bias": "a2c_network.actor_mlp.2.bias",
... "net.4.weight": "a2c_network.mu.weight",
... "net.4.bias": "a2c_network.mu.bias"},
... "value": {"net.0.bias": "a2c_network.actor_mlp.0.bias",
... "net.2.bias": "a2c_network.actor_mlp.2.bias",
... "net.4.weight": "a2c_network.value.weight",
... "net.4.bias": "a2c_network.value.bias"}}
>>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", name_map=name_map, verbose=True)
[skrl:INFO] Modules
[skrl:INFO] |-- current
[skrl:INFO] | |-- policy (Policy)
[skrl:INFO] | | |-- log_std_parameter : [1]
[skrl:INFO] | | |-- net.0.weight : [32, 4]
[skrl:INFO] | | |-- net.0.bias : [32]
[skrl:INFO] | | |-- net.2.weight : [32, 32]
[skrl:INFO] | | |-- net.2.bias : [32]
[skrl:INFO] | | |-- net.4.weight : [1, 32]
[skrl:INFO] | | |-- net.4.bias : [1]
[skrl:INFO] | |-- value (Value)
[skrl:INFO] | | |-- net.0.weight : [32, 4]
[skrl:INFO] | | |-- net.0.bias : [32]
[skrl:INFO] | | |-- net.2.weight : [32, 32]
[skrl:INFO] | | |-- net.2.bias : [32]
[skrl:INFO] | | |-- net.4.weight : [1, 32]
[skrl:INFO] | | |-- net.4.bias : [1]
[skrl:INFO] | |-- optimizer (Adam)
[skrl:INFO] | | |-- state (dict)
[skrl:INFO] | | |-- param_groups (list)
[skrl:INFO] | |-- state_preprocessor (RunningStandardScaler)
[skrl:INFO] | | |-- running_mean : [4]
[skrl:INFO] | | |-- running_variance : [4]
[skrl:INFO] | | |-- current_count : []
[skrl:INFO] | |-- value_preprocessor (RunningStandardScaler)
[skrl:INFO] | | |-- running_mean : [1]
[skrl:INFO] | | |-- running_variance : [1]
[skrl:INFO] | | |-- current_count : []
[skrl:INFO] |-- source
[skrl:INFO] | |-- model (OrderedDict)
[skrl:INFO] | | |-- value_mean_std.running_mean : [1]
[skrl:INFO] | | |-- value_mean_std.running_var : [1]
[skrl:INFO] | | |-- value_mean_std.count : []
[skrl:INFO] | | |-- running_mean_std.running_mean : [4]
[skrl:INFO] | | |-- running_mean_std.running_var : [4]
[skrl:INFO] | | |-- running_mean_std.count : []
[skrl:INFO] | | |-- a2c_network.sigma : [1]
[skrl:INFO] | | |-- a2c_network.actor_mlp.0.weight : [32, 4]
[skrl:INFO] | | |-- a2c_network.actor_mlp.0.bias : [32]
[skrl:INFO] | | |-- a2c_network.actor_mlp.2.weight : [32, 32]
[skrl:INFO] | | |-- a2c_network.actor_mlp.2.bias : [32]
[skrl:INFO] | | |-- a2c_network.value.weight : [1, 32]
[skrl:INFO] | | |-- a2c_network.value.bias : [1]
[skrl:INFO] | | |-- a2c_network.mu.weight : [1, 32]
[skrl:INFO] | | |-- a2c_network.mu.bias : [1]
[skrl:INFO] | |-- epoch (int)
[skrl:INFO] | |-- optimizer (dict)
[skrl:INFO] | |-- frame (int)
[skrl:INFO] | |-- last_mean_rewards (float32)
[skrl:INFO] | |-- env_state (NoneType)
[skrl:INFO] Migration
[skrl:INFO] Model: policy (Policy)
[skrl:INFO] Models
[skrl:INFO] |-- current: 7 items
[skrl:INFO] | |-- log_std_parameter : [1]
[skrl:INFO] | |-- net.0.weight : [32, 4]
[skrl:INFO] | |-- net.0.bias : [32]
[skrl:INFO] | |-- net.2.weight : [32, 32]
[skrl:INFO] | |-- net.2.bias : [32]
[skrl:INFO] | |-- net.4.weight : [1, 32]
[skrl:INFO] | |-- net.4.bias : [1]
[skrl:INFO] |-- source: 9 items
[skrl:INFO] | |-- a2c_network.sigma : [1]
[skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : [32, 4]
[skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : [32]
[skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : [32, 32]
[skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : [32]
[skrl:INFO] | |-- a2c_network.value.weight : [1, 32]
[skrl:INFO] | |-- a2c_network.value.bias : [1]
[skrl:INFO] | |-- a2c_network.mu.weight : [1, 32]
[skrl:INFO] | |-- a2c_network.mu.bias : [1]
[skrl:INFO] Migration
[skrl:INFO] |-- auto: log_std_parameter <- a2c_network.sigma
[skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight
[skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias
[skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight
[skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias
[skrl:INFO] |-- map: net.4.weight <- a2c_network.mu.weight
[skrl:INFO] |-- map: net.4.bias <- a2c_network.mu.bias
[skrl:INFO] Model: value (Value)
[skrl:INFO] Models
[skrl:INFO] |-- current: 6 items
[skrl:INFO] | |-- net.0.weight : [32, 4]
[skrl:INFO] | |-- net.0.bias : [32]
[skrl:INFO] | |-- net.2.weight : [32, 32]
[skrl:INFO] | |-- net.2.bias : [32]
[skrl:INFO] | |-- net.4.weight : [1, 32]
[skrl:INFO] | |-- net.4.bias : [1]
[skrl:INFO] |-- source: 9 items
[skrl:INFO] | |-- a2c_network.sigma : [1]
[skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : [32, 4]
[skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : [32]
[skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : [32, 32]
[skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : [32]
[skrl:INFO] | |-- a2c_network.value.weight : [1, 32]
[skrl:INFO] | |-- a2c_network.value.bias : [1]
[skrl:INFO] | |-- a2c_network.mu.weight : [1, 32]
[skrl:INFO] | |-- a2c_network.mu.bias : [1]
[skrl:INFO] Migration
[skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight
[skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias
[skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight
[skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias
[skrl:INFO] |-- map: net.4.weight <- a2c_network.value.weight
[skrl:INFO] |-- map: net.4.bias <- a2c_network.value.bias
True
"""
# load state_dict from path
if path is not None:
# rl_games checkpoint
if path.endswith(".pt") or path.endswith(".pth"):
checkpoint = torch.load(path, map_location=self.device)
else:
raise ValueError("Cannot identify file type")
# show modules
if verbose:
logger.info("Modules")
logger.info(" |-- current")
for name, module in self.checkpoint_modules.items():
logger.info(f" | |-- {name} ({type(module).__name__})")
if hasattr(module, "state_dict"):
for k, v in module.state_dict().items():
if hasattr(v, "shape"):
logger.info(f" | | |-- {k} : {list(v.shape)}")
else:
logger.info(f" | | |-- {k} ({type(v).__name__})")
logger.info(" |-- source")
for name, module in checkpoint.items():
logger.info(f" | |-- {name} ({type(module).__name__})")
if name == "model":
for k, v in module.items():
logger.info(f" | | |-- {k} : {list(v.shape)}")
else:
if hasattr(module, "state_dict"):
for k, v in module.state_dict().items():
if hasattr(v, "shape"):
logger.info(f" | | |-- {k} : {list(v.shape)}")
else:
logger.info(f" | | |-- {k} ({type(v).__name__})")
logger.info("Migration")
if "optimizer" in self.checkpoint_modules:
# loaded state dict contains a parameter group that doesn't match the size of optimizer's group
# self.checkpoint_modules["optimizer"].load_state_dict(checkpoint["optimizer"])
pass
# state_preprocessor
if "state_preprocessor" in self.checkpoint_modules:
if "running_mean_std.running_mean" in checkpoint["model"]:
state_dict = copy.deepcopy(self.checkpoint_modules["state_preprocessor"].state_dict())
state_dict["running_mean"] = checkpoint["model"]["running_mean_std.running_mean"]
state_dict["running_variance"] = checkpoint["model"]["running_mean_std.running_var"]
state_dict["current_count"] = checkpoint["model"]["running_mean_std.count"]
self.checkpoint_modules["state_preprocessor"].load_state_dict(state_dict)
del checkpoint["model"]["running_mean_std.running_mean"]
del checkpoint["model"]["running_mean_std.running_var"]
del checkpoint["model"]["running_mean_std.count"]
# value_preprocessor
if "value_preprocessor" in self.checkpoint_modules:
if "value_mean_std.running_mean" in checkpoint["model"]:
state_dict = copy.deepcopy(self.checkpoint_modules["value_preprocessor"].state_dict())
state_dict["running_mean"] = checkpoint["model"]["value_mean_std.running_mean"]
state_dict["running_variance"] = checkpoint["model"]["value_mean_std.running_var"]
state_dict["current_count"] = checkpoint["model"]["value_mean_std.count"]
self.checkpoint_modules["value_preprocessor"].load_state_dict(state_dict)
del checkpoint["model"]["value_mean_std.running_mean"]
del checkpoint["model"]["value_mean_std.running_var"]
del checkpoint["model"]["value_mean_std.count"]
# TODO: AMP state preprocessor
# model
status = True
for name, module in self.checkpoint_modules.items():
if module not in ["state_preprocessor", "value_preprocessor", "optimizer"] and hasattr(module, "migrate"):
if verbose:
logger.info(f"Model: {name} ({type(module).__name__})")
status *= module.migrate(state_dict=checkpoint["model"],
name_map=name_map.get(name, {}),
auto_mapping=auto_mapping,
verbose=verbose)
self.set_mode("eval")
return bool(status)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
timestep += 1
# update best models and write checkpoints
if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval:
# update best models
reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31))
if reward > self.checkpoint_best_modules["reward"]:
self.checkpoint_best_modules["timestep"] = timestep
self.checkpoint_best_modules["reward"] = reward
self.checkpoint_best_modules["saved"] = False
self.checkpoint_best_modules["modules"] = {k: copy.deepcopy(self._get_internal_value(v)) for k, v in self.checkpoint_modules.items()}
# write checkpoints
self.write_checkpoint(timestep, timesteps)
# write to tensorboard
if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval:
self.write_tracking_data(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
"""
raise NotImplementedError
| 33,314 | Python | 49.097744 | 149 | 0.555082 |
Toni-SM/skrl/skrl/agents/torch/__init__.py | from skrl.agents.torch.base import Agent
| 41 | Python | 19.99999 | 40 | 0.829268 |
Toni-SM/skrl/skrl/agents/torch/trpo/__init__.py | from skrl.agents.torch.trpo.trpo import TRPO, TRPO_DEFAULT_CONFIG
from skrl.agents.torch.trpo.trpo_rnn import TRPO_RNN
| 119 | Python | 38.999987 | 65 | 0.815126 |
Toni-SM/skrl/skrl/agents/torch/trpo/trpo.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.convert_parameters import parameters_to_vector, vector_to_parameters
from skrl.agents.torch import Agent
from skrl.memories.torch import Memory
from skrl.models.torch import Model
# [start-config-dict-torch]
TRPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"value_learning_rate": 1e-3, # value learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"value_loss_scale": 1.0, # value loss scaling factor
"damping": 0.1, # damping coefficient for computing the Hessian-vector product
"max_kl_divergence": 0.01, # maximum KL divergence between old and new policy
"conjugate_gradient_steps": 10, # maximum number of iterations for the conjugate gradient algorithm
"max_backtrack_steps": 10, # maximum number of backtracking steps during line search
"accept_ratio": 0.5, # accept ratio for the line search loss improvement
"step_fraction": 1.0, # fraction of the step size for the line search
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class TRPO(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Trust Region Policy Optimization (TRPO)
https://arxiv.org/abs/1502.05477
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
_cfg = copy.deepcopy(TRPO_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
self.backup_policy = copy.deepcopy(self.policy)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._max_kl_divergence = self.cfg["max_kl_divergence"]
self._damping = self.cfg["damping"]
self._conjugate_gradient_steps = self.cfg["conjugate_gradient_steps"]
self._max_backtrack_steps = self.cfg["max_backtrack_steps"]
self._accept_ratio = self.cfg["accept_ratio"]
self._step_fraction = self.cfg["step_fraction"]
self._value_learning_rate = self.cfg["value_learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
self.value_optimizer = torch.optim.Adam(self.value.parameters(), lr=self._value_learning_rate)
if self._learning_rate_scheduler is not None:
self.value_scheduler = self._learning_rate_scheduler(self.value_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["value_optimizer"] = self.value_optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memory.create_tensor(name="values", size=1, dtype=torch.float32)
self.memory.create_tensor(name="returns", size=1, dtype=torch.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32)
self._tensors_names_policy = ["states", "actions", "log_prob", "advantages"]
self._tensors_names_value = ["states", "returns"]
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# sample random actions
# TODO: fix for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value")
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else last_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
def surrogate_loss(policy: Model,
states: torch.Tensor,
actions: torch.Tensor,
log_prob: torch.Tensor,
advantages: torch.Tensor) -> torch.Tensor:
"""Compute the surrogate objective (policy loss)
:param policy: Policy
:type policy: Model
:param states: States
:type states: torch.Tensor
:param actions: Actions
:type actions: torch.Tensor
:param log_prob: Log probability
:type log_prob: torch.Tensor
:param advantages: Advantages
:type advantages: torch.Tensor
:return: Surrogate loss
:rtype: torch.Tensor
"""
_, new_log_prob, _ = policy.act({"states": states, "taken_actions": actions}, role="policy")
return (advantages * torch.exp(new_log_prob - log_prob.detach())).mean()
def conjugate_gradient(policy: Model,
states: torch.Tensor,
b: torch.Tensor,
num_iterations: float = 10,
residual_tolerance: float = 1e-10) -> torch.Tensor:
"""Conjugate gradient algorithm to solve Ax = b using the iterative method
https://en.wikipedia.org/wiki/Conjugate_gradient_method#As_an_iterative_method
:param policy: Policy
:type policy: Model
:param states: States
:type states: torch.Tensor
:param b: Vector b
:type b: torch.Tensor
:param num_iterations: Number of iterations (default: ``10``)
:type num_iterations: float, optional
:param residual_tolerance: Residual tolerance (default: ``1e-10``)
:type residual_tolerance: float, optional
:return: Conjugate vector
:rtype: torch.Tensor
"""
x = torch.zeros_like(b)
r = b.clone()
p = b.clone()
rr_old = torch.dot(r, r)
for _ in range(num_iterations):
hv = fisher_vector_product(policy, states, p, damping=self._damping)
alpha = rr_old / torch.dot(p, hv)
x += alpha * p
r -= alpha * hv
rr_new = torch.dot(r, r)
if rr_new < residual_tolerance:
break
p = r + rr_new / rr_old * p
rr_old = rr_new
return x
def fisher_vector_product(policy: Model,
states: torch.Tensor,
vector: torch.Tensor,
damping: float = 0.1) -> torch.Tensor:
"""Compute the Fisher vector product (direct method)
https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/
:param policy: Policy
:type policy: Model
:param states: States
:type states: torch.Tensor
:param vector: Vector
:type vector: torch.Tensor
:param damping: Damping (default: ``0.1``)
:type damping: float, optional
:return: Hessian vector product
:rtype: torch.Tensor
"""
kl = kl_divergence(policy, policy, states)
kl_gradient = torch.autograd.grad(kl, policy.parameters(), create_graph=True)
flat_kl_gradient = torch.cat([gradient.view(-1) for gradient in kl_gradient])
hessian_vector_gradient = torch.autograd.grad((flat_kl_gradient * vector).sum(), policy.parameters())
flat_hessian_vector_gradient = torch.cat([gradient.contiguous().view(-1) for gradient in hessian_vector_gradient])
return flat_hessian_vector_gradient + damping * vector
def kl_divergence(policy_1: Model, policy_2: Model, states: torch.Tensor) -> torch.Tensor:
"""Compute the KL divergence between two distributions
https://en.wikipedia.org/wiki/Normal_distribution#Other_properties
:param policy_1: First policy
:type policy_1: Model
:param policy_2: Second policy
:type policy_2: Model
:param states: States
:type states: torch.Tensor
:return: KL divergence
:rtype: torch.Tensor
"""
mu_1 = policy_1.act({"states": states}, role="policy")[2]["mean_actions"]
logstd_1 = policy_1.get_log_std(role="policy")
mu_1, logstd_1 = mu_1.detach(), logstd_1.detach()
mu_2 = policy_2.act({"states": states}, role="policy")[2]["mean_actions"]
logstd_2 = policy_2.get_log_std(role="policy")
kl = logstd_1 - logstd_2 + 0.5 * (torch.square(logstd_1.exp()) + torch.square(mu_1 - mu_2)) \
/ torch.square(logstd_2.exp()) - 0.5
return torch.sum(kl, dim=-1).mean()
# compute returns and advantages
with torch.no_grad():
self.value.train(False)
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value")
self.value.train(True)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample all from memory
sampled_states, sampled_actions, sampled_log_prob, sampled_advantages \
= self.memory.sample_all(names=self._tensors_names_policy, mini_batches=1)[0]
sampled_states = self._state_preprocessor(sampled_states, train=True)
# compute policy loss gradient
policy_loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages)
policy_loss_gradient = torch.autograd.grad(policy_loss, self.policy.parameters())
flat_policy_loss_gradient = torch.cat([gradient.view(-1) for gradient in policy_loss_gradient])
# compute the search direction using the conjugate gradient algorithm
search_direction = conjugate_gradient(self.policy, sampled_states, flat_policy_loss_gradient.data,
num_iterations=self._conjugate_gradient_steps)
# compute step size and full step
xHx = (search_direction * fisher_vector_product(self.policy, sampled_states, search_direction, self._damping)) \
.sum(0, keepdim=True)
step_size = torch.sqrt(2 * self._max_kl_divergence / xHx)[0]
full_step = step_size * search_direction
# backtracking line search
restore_policy_flag = True
self.backup_policy.update_parameters(self.policy)
params = parameters_to_vector(self.policy.parameters())
expected_improvement = (flat_policy_loss_gradient * full_step).sum(0, keepdim=True)
for alpha in [self._step_fraction * 0.5 ** i for i in range(self._max_backtrack_steps)]:
new_params = params + alpha * full_step
vector_to_parameters(new_params, self.policy.parameters())
expected_improvement *= alpha
kl = kl_divergence(self.backup_policy, self.policy, sampled_states)
loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages)
if kl < self._max_kl_divergence and (loss - policy_loss) / expected_improvement > self._accept_ratio:
restore_policy_flag = False
break
if restore_policy_flag:
self.policy.update_parameters(self.backup_policy)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names_value, mini_batches=self._mini_batches)
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
# mini-batches loop
for sampled_states, sampled_returns in sampled_batches:
sampled_states = self._state_preprocessor(sampled_states, train=not epoch)
# compute value loss
predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value")
value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values)
# optimization step (value)
self.value_optimizer.zero_grad()
value_loss.backward()
if self._grad_norm_clip > 0:
nn.utils.clip_grad_norm_(self.value.parameters(), self._grad_norm_clip)
self.value_optimizer.step()
# update cumulative losses
cumulative_value_loss += value_loss.item()
# update learning rate
if self._learning_rate_scheduler:
self.value_scheduler.step()
# record data
self.track_data("Loss / Policy loss", policy_loss.item())
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Value learning rate", self.value_scheduler.get_last_lr()[0])
| 26,328 | Python | 45.682624 | 136 | 0.598184 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.