file_path
stringlengths 22
162
| content
stringlengths 19
501k
| size
int64 19
501k
| lang
stringclasses 1
value | avg_line_length
float64 6.33
100
| max_line_length
int64 18
935
| alphanum_fraction
float64 0.34
0.93
|
---|---|---|---|---|---|---|
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/helpers/test_projection.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import os
import math
import asyncio
from time import time
import carb
import numpy as np
import omni.kit.test
from omni.kit.viewport.utility import get_active_viewport
from pxr import Sdf, UsdGeom, Vt
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.syntheticdata as syn
from ..utils import add_semantics
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
# Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class TestProjection(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
await omni.usd.get_context().new_stage_async()
# Setup viewport
self.viewport = get_active_viewport()
self.stage = omni.usd.get_context().get_stage()
prim = self.stage.DefinePrim("/World", "Xform")
self.stage.SetDefaultPrim(prim)
cube = self.stage.DefinePrim("/World/Cube", "Cube")
add_semantics(cube, "cube")
usd_camera = UsdGeom.Camera.Define(self.stage, "/World/Camera")
usd_camera.AddTranslateOp()
self.camera = usd_camera.GetPrim()
self.camera.CreateAttribute("cameraProjectionType", Sdf.ValueTypeNames.Token).Set(Vt.Token("pinhole"))
self.camera.CreateAttribute("fthetaWidth", Sdf.ValueTypeNames.Float).Set(960)
self.camera.CreateAttribute("fthetaHeight", Sdf.ValueTypeNames.Float).Set(604)
self.camera.CreateAttribute("fthetaCx", Sdf.ValueTypeNames.Float).Set(460)
self.camera.CreateAttribute("fthetaCy", Sdf.ValueTypeNames.Float).Set(340)
self.camera.CreateAttribute("fthetaMaxFov", Sdf.ValueTypeNames.Float).Set(200.0)
self.camera.CreateAttribute("fthetaPolyA", Sdf.ValueTypeNames.Float).Set(0.0)
self.camera.CreateAttribute("fthetaPolyB", Sdf.ValueTypeNames.Float).Set(0.0059)
self.camera.CreateAttribute("fthetaPolyC", Sdf.ValueTypeNames.Float).Set(0.0)
self.camera.CreateAttribute("fthetaPolyD", Sdf.ValueTypeNames.Float).Set(0.0)
self.camera.CreateAttribute("fthetaPolyE", Sdf.ValueTypeNames.Float).Set(0.0)
self.viewport.camera_path = self.camera.GetPath()
syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.BoundingBox3D])
await syn.sensors.next_sensor_data_async(self.viewport, True)
async def test_pinhole(self):
""" Test pinhole projection
"""
self.camera.GetAttribute("xformOp:translate").Set((0.0, 0.0, 9.0))
# Render one frame
await syn.sensors.next_sensor_data_async(self.viewport,True)
# Get 3D bbox
bbox3d = syn.sensors.get_bounding_box_3d(self.viewport, return_corners=True, parsed=True)
# Project corners
corners = bbox3d["corners"]
projected = syn.helpers.world_to_image(corners.reshape(-1, 3), self.viewport).reshape(-1, 8, 3)
# GT
# Confirmed visually to be correct
GT = [
[
[0.26139346, 0.9241894, 0.9000009],
[0.73860654, 0.9241894, 0.9000009],
[0.26139346, 0.0758106, 0.9000009],
[0.73860654, 0.0758106, 0.9000009],
[0.20174183, 1.03023675, 0.87500088],
[0.79825817, 1.03023675, 0.87500088],
[0.20174183, -0.03023675, 0.87500088],
[0.79825817, -0.03023675, 0.87500088],
]
]
# Validate
assert np.allclose(GT, projected)
async def test_fisheye_polynomial(self):
""" Test fisheye polynomial projection (F-Theta)
"""
self.camera.GetAttribute("xformOp:translate").Set((0.0, 0.0, 3.0))
self.camera.GetAttribute("cameraProjectionType").Set(Vt.Token("fisheyePolynomial"))
# Render one frame
await syn.sensors.next_sensor_data_async(self.viewport, True)
# Get 3D bbox
bbox3d = syn.sensors.get_bounding_box_3d(self.viewport, return_corners=True, parsed=True)
# Project corners
corners = bbox3d["corners"]
projected = syn.helpers.world_to_image(corners.reshape(-1, 3), self.viewport).reshape(-1, 8, 3)
# GT
# Confirmed visually to be correct
GT = [
[
[0.43674065, 0.6457944, 0.0],
[0.52159268, 0.6457944, 0.0],
[0.43674065, 0.49494634, 0.0],
[0.52159268, 0.49494634, 0.0],
[0.40232877, 0.70697108, 0.0],
[0.55600456, 0.70697108, 0.0],
[0.40232877, 0.43376967, 0.0],
[0.55600456, 0.43376967, 0.0],
]
]
# Validate
assert np.allclose(GT, projected)
# Run the operation in reverse
view_params = syn.helpers.get_view_params(self.viewport)
proj_i2w = projected[0, :, :2]
proj_i2w[..., 0] *= view_params["width"]
proj_i2w[..., 1] *= view_params["height"]
origin, directions = syn.helpers.image_to_world(proj_i2w, view_params)
gt_corner_directions = corners[0] - origin
gt_corner_directions /= np.linalg.norm(gt_corner_directions, axis=1, keepdims=True)
assert np.allclose(gt_corner_directions, directions)
# FOR VISUAL DEBUGGING
self.camera.GetAttribute("clippingRange").Set((0.1, 1000000))
for i, d in enumerate(directions):
s = self.stage.DefinePrim(f"/World/pt{i}", "Sphere")
UsdGeom.Xformable(s).AddTranslateOp().Set(tuple((d + origin).tolist()))
s.GetAttribute("radius").Set(0.03)
await syn.sensors.next_sensor_data_async(self.viewport,True)
async def test_fisheye_polynomial_edge(self):
""" Test fisheye polynomial projection (F-Theta) at edge of FOV
"""
self.camera.GetAttribute("xformOp:translate").Set((4.0, 0.0, 0.5))
self.camera.GetAttribute("cameraProjectionType").Set(Vt.Token("fisheyePolynomial"))
# Render one frame
await syn.sensors.next_sensor_data_async(self.viewport, True)
# Get 3D bbox
bbox3d = syn.sensors.get_bounding_box_3d(self.viewport, return_corners=True, parsed=True)
# Project corners
corners = bbox3d["corners"]
projected = syn.helpers.world_to_image(corners.reshape(-1, 3), self.viewport).reshape(-1, 8, 3)
# GT
# Confirmed visually to be correct
GT = [
[
[0.25675408, 0.6494504, 0.0],
[0.2902532, 0.68231909, 0.0],
[0.25675408, 0.49129034, 0.0],
[0.2902532, 0.45842165, 0.0],
[0.19030016, 0.67307846, 0.0],
[0.18980286, 0.74184522, 0.0],
[0.19030016, 0.46766228, 0.0],
[0.18980286, 0.39889552, 0.0],
]
]
# Validate
assert np.allclose(GT, projected)
# Run the operation in reverse
view_params = syn.helpers.get_view_params(self.viewport)
proj_i2w = projected[0, :, :2]
proj_i2w[..., 0] *= view_params["width"]
proj_i2w[..., 1] *= view_params["height"]
origin, directions = syn.helpers.image_to_world(proj_i2w, view_params)
gt_corner_directions = corners[0] - origin
gt_corner_directions /= np.linalg.norm(gt_corner_directions, axis=1, keepdims=True)
assert np.allclose(gt_corner_directions, directions)
# FOR VISUAL DEBUGGING
self.camera.GetAttribute("clippingRange").Set((0.1, 1000000))
for i, d in enumerate(directions):
s = self.stage.DefinePrim(f"/World/pt{i}", "Sphere")
UsdGeom.Xformable(s).AddTranslateOp().Set(tuple((d + origin).tolist()))
await syn.sensors.next_sensor_data_async(self.viewport,True)
# After running each test
async def tearDown(self):
pass
| 8,149 | Python | 40.370558 | 141 | 0.618972 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/helpers/test_instance_mapping.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import os
import math
import asyncio
from time import time
import carb
import numpy as np
import omni.kit.test
from omni.kit.viewport.utility import get_active_viewport
from pxr import UsdPhysics
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.syntheticdata as syn
from ..utils import add_semantics
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
# Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class TestHelpersInstanceMappings(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
# Setup viewport
self.viewport = get_active_viewport()
await omni.usd.get_context().new_stage_async()
self.stage = omni.usd.get_context().get_stage()
prim = self.stage.DefinePrim("/World", "Xform")
self.stage.SetDefaultPrim(prim)
async def test_non_semantic_schemas(self):
""" Test mixture of applied schemas including non-semantics.
"""
prim = self.stage.DefinePrim("/World/Cone", "Cone")
# Add semantics schema
add_semantics(prim, "Je ne suis pas un cone.")
# Non-semantics schema
UsdPhysics.RigidBodyAPI.Apply(prim)
await syn.sensors.next_sensor_data_async(self.viewport,True)
# Get instance mappings
instance_mappings = syn.helpers.get_instance_mappings()
# Validate
cone_im = instance_mappings[0]
assert cone_im["uniqueId"] == 1
assert cone_im["name"] == "/World/Cone"
assert cone_im["semanticId"] == 1
assert cone_im["semanticLabel"] == "Je ne suis pas un cone."
# After running each test
async def tearDown(self):
pass
| 2,054 | Python | 31.619047 | 141 | 0.686465 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/helpers/test_bboxes.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import os
import math
import asyncio
from time import time
import carb
import numpy as np
import omni.kit.test
from omni.kit.viewport.utility import get_active_viewport
from pxr import Sdf, UsdGeom, Vt
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.syntheticdata as syn
from ..utils import add_semantics
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
# Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class TestBBoxes(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
await omni.usd.get_context().new_stage_async()
# Setup viewport
self.viewport = get_active_viewport()
await omni.usd.get_context().new_stage_async()
self.stage = omni.usd.get_context().get_stage()
prim = self.stage.DefinePrim("/World", "Xform")
self.stage.SetDefaultPrim(prim)
marked_cube = self.stage.DefinePrim("/World/MarkedCube0", "Cube")
add_semantics(marked_cube, "cube")
marked_cube.GetAttribute("size").Set(100)
UsdGeom.XformCommonAPI(marked_cube).SetTranslate((3, 3, 0))
unmarked_cube = self.stage.DefinePrim("/World/UnmarkedCube", "Cube")
unmarked_cube.GetAttribute("size").Set(100)
UsdGeom.XformCommonAPI(unmarked_cube).SetTranslate((3, 3, -100))
await omni.kit.app.get_app().next_update_async()
syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.BoundingBox2DLoose])
syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.BoundingBox2DTight])
syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.BoundingBox3D])
syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.Occlusion])
async def test_reduce_bboxes_3d(self):
"""Verify that reduce_bboxes_3d removes a cube without a semantic label"""
# Render one frame
await syn.sensors.next_sensor_data_async(self.viewport,True)
# Get 3D bbox
bbox = syn.sensors.get_bounding_box_3d(self.viewport, return_corners=True)
assert np.allclose(bbox["z_min"], [-50, -50])
# Transform of unmarked cube should be included in pre-reduced bbox but not included in reduced bbox
UNMARKED_CUBE_GT = [[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [3.0, 3.0, -100.0, 1.0]]]
assert np.allclose(bbox["transform"][0], UNMARKED_CUBE_GT) or np.allclose(
bbox["transform"][1], UNMARKED_CUBE_GT
)
instance_mappings = syn.helpers.get_instance_mappings()
bbox_reduced = syn.helpers.reduce_bboxes_3d(bbox, instance_mappings)
assert np.allclose(bbox_reduced["z_min"], [-50])
assert np.allclose(
bbox_reduced["transform"],
[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [3.0, 3.0, 0.0, 1.0]]],
)
async def test_reduce_occlusion(self):
"""Verify that reduce_occlusion removes a cube without a semantic label"""
# Add an extra cube
cube = self.stage.DefinePrim("/World/MarkedCube1", "Cube")
add_semantics(cube, "cube")
cube.GetAttribute("size").Set(100)
UsdGeom.XformCommonAPI(cube).SetTranslate((3, -10, 0))
# Render one frame
await syn.sensors.next_sensor_data_async(self.viewport, True)
# Get occlusion
occlusion = syn.sensors.get_occlusion(self.viewport)
occlusion_ratios = np.sort(occlusion["occlusionRatio"])
assert np.allclose(occlusion_ratios, [0.0327, 0.38059998, 0.8886], atol=0.05)
instance_mappings = syn.helpers.get_instance_mappings()
reduced_occlusion = syn.helpers.reduce_occlusion(occlusion, instance_mappings)
reduced_occlusion_ratios = np.sort(reduced_occlusion["occlusionRatio"])
assert np.allclose(reduced_occlusion_ratios, [0.0327, 0.8886], atol=0.05)
async def test_merge_sensors(self):
"""Verify that merge_sensors merges the data correctly"""
# Render one frame
await syn.sensors.next_sensor_data_async(self.viewport, True)
# Get bounding boxes and merge
bounding_box_2d_tight = syn.sensors.get_bounding_box_2d_tight(self.viewport)
bounding_box_2d_loose = syn.sensors.get_bounding_box_2d_loose(self.viewport)
bounding_box_3d = syn.sensors.get_bounding_box_3d(self.viewport, parsed=True)
merged_data = syn.helpers.merge_sensors(bounding_box_2d_tight, bounding_box_2d_loose, bounding_box_3d)
for suffix, data_source in [
("_bbox2d_tight", bounding_box_2d_tight),
("_bbox2d_loose", bounding_box_2d_loose),
("_bbox3d", bounding_box_3d),
]:
suffix_present = False
for key in merged_data.dtype.fields:
if key.endswith(suffix):
sub_key = key[: -len(suffix)]
assert merged_data[key] == data_source[key]
suffix_present = True
assert suffix_present
# After running each test
async def tearDown(self):
pass
| 5,491 | Python | 43.290322 | 141 | 0.65489 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/visualize/test_warp_post_vis.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import os
import carb
from pxr import Gf, UsdGeom, UsdLux, Sdf
import unittest
import omni.kit.test
from omni.syntheticdata import SyntheticData, SyntheticDataStage
from ..utils import add_semantics
class TestWarpPostVisualization(omni.kit.test.AsyncTestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
async def setUp(self):
# Setup the scene
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
world_prim = UsdGeom.Xform.Define(stage,"/World")
UsdGeom.Xformable(world_prim).AddTranslateOp().Set((0, 0, 0))
UsdGeom.Xformable(world_prim).AddRotateXYZOp().Set((0, 0, 0))
capsule0_prim = stage.DefinePrim("/World/Capsule0", "Capsule")
add_semantics(capsule0_prim, "capsule_0")
UsdGeom.Xformable(capsule0_prim).AddTranslateOp().Set((100, 0, 0))
UsdGeom.Xformable(capsule0_prim).AddScaleOp().Set((30, 30, 30))
UsdGeom.Xformable(capsule0_prim).AddRotateXYZOp().Set((-90, 0, 0))
capsule0_prim.GetAttribute("primvars:displayColor").Set([(0.3, 1, 0)])
capsule1_prim = stage.DefinePrim("/World/Capsule1", "Capsule")
add_semantics(capsule0_prim, "capsule_1")
UsdGeom.Xformable(capsule1_prim).AddTranslateOp().Set((-100, 0, 0))
UsdGeom.Xformable(capsule1_prim).AddScaleOp().Set((30, 30, 30))
UsdGeom.Xformable(capsule1_prim).AddRotateXYZOp().Set((-90, 0, 0))
capsule1_prim.GetAttribute("primvars:displayColor").Set([(0, 1, 0.3)])
spherelight = UsdLux.SphereLight.Define(stage, "/SphereLight")
spherelight.GetIntensityAttr().Set(30000)
spherelight.GetRadiusAttr().Set(30)
# Setup viewports / renderproduct
vp_iface = omni.kit.viewport_legacy.get_viewport_interface()
viewport = vp_iface.get_viewport_window()
render_product_path = viewport.get_render_product_path()
# SyntheticData singleton interface
sdg_iface = SyntheticData.Get()
if not sdg_iface.is_node_template_registered("TestWarpPostVisualization"):
sdg_iface.register_node_template(
SyntheticData.NodeTemplate(
SyntheticDataStage.ON_DEMAND, # node tempalte stage
"omni.syntheticdata.SdTestWarpPostVisulation", # node template type
# node template connections
[
SyntheticData.NodeConnectionTemplate("LdrColorSDExportRawArray"),
]),
template_name="TestWarpPostVisualization" # node template name
)
if not sdg_iface.is_node_template_registered("TestWarpPostVisualizationDisplay"):
sdg_iface.register_node_template(
SyntheticData.NodeTemplate(
SyntheticDataStage.ON_DEMAND, # node tempalte stage
"omni.syntheticdata.SdLinearArrayToTexture", # node template type
# node template connections
[
SyntheticData.NodeConnectionTemplate("TestWarpPostVisualization"),
]),
template_name="TestWarpPostVisualizationDisplay" # node template name
)
sdg_iface.activate_node_template("TestWarpPostVisualizationDisplay", 0, [render_product_path])
self.numLoops = 100
async def run_loop(self):
# ensuring that the setup is taken into account
for _ in range(5):
await omni.kit.app.get_app().next_update_async()
for _ in range(self.numLoops):
await omni.kit.app.get_app().next_update_async()
async def test_display(self):
""" Test display
"""
await self.run_loop()
async def tearDown(self):
pass
| 4,156 | Python | 40.989899 | 109 | 0.619105 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/visualize/test_flattener.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import os
import math
from time import time
import carb
import numpy as np
import omni.kit.test
from omni.kit.viewport.utility import get_active_viewport
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.syntheticdata as syn
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
TIMEOUT = 50
BAKE_ACCURACY_THRESHOLD = 0.9
# segmentation mask testing against inputs of different resolutions
def test_against_golden(semantic_data, golden_semantic_data):
input_dim = semantic_data.shape
golden_dim = golden_semantic_data.shape
correct_count = 0
for y in range(0, input_dim[0]):
for x in range(0, input_dim[1]):
u = x / input_dim[1]
v = y / input_dim[0]
sample_x = math.floor(u * golden_dim[1])
sample_y = math.floor(v * golden_dim[0])
if semantic_data[y, x] == golden_semantic_data[sample_y, sample_x]:
correct_count += 1
return correct_count / (input_dim[0] * input_dim[1])
# Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class TestFlattenerSegmentationBakingVis(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
np.random.seed(1234)
# Setup viewport
self.viewport = get_active_viewport()
await omni.kit.app.get_app_interface().next_update_async()
filepath = os.path.join(FILE_DIR, "../data/scenes/OmniUe4-benchmark.usda")
usd_context = omni.usd.get_context()
await usd_context.open_stage_async(filepath)
await omni.kit.app.get_app().next_update_async()
syn.sensors.enable_sensors(self.viewport, [syn._syntheticdata.SensorType.SemanticSegmentation])
async def _wait_for_data(self):
data = np.empty(0)
start = time()
settings = carb.settings.get_settings()
# wait until flattener is done loading in assets
while not settings.get_as_bool("/app/captureFrame/ready"):
await omni.kit.app.get_app_interface().next_update_async()
# stall a couple of frames until samplerFeedback kicks off baking work.
# NOTE: If we don't stall here, then we won't bake at all, because the ready flag will be falsely set
# since samplerFeedback hasn't seen any tiles yet, so flattener thinks scene is ready for capture.
for i in range(0, 20):
await omni.kit.app.get_app_interface().next_update_async()
# wait until baking to be done
while not settings.get_as_bool("/app/captureFrame/ready"):
await omni.kit.app.get_app_interface().next_update_async()
async def test_baking(self):
""" Test that flattener correctly bakes semantic information into vtex
"""
settings = carb.settings.get_settings()
settings.set("/app/hydraEngine/waitIdle", True)
# start flattener baking
settings.set("/rtx/materialflattener/bake", True)
settings.set("/rtx/materialflattener/rebaking", True)
await omni.kit.app.get_app_interface().next_update_async()
await self._wait_for_data()
await syn.sensors.next_sensor_data_async(self.viewport)
semantic_data = syn.sensors.get_semantic_segmentation(self.viewport)
unique_classes = np.unique(semantic_data)
# visual debug code
#from PIL import Image
#semantic_image = syn.visualize.colorize_segmentation(semantic_data)
#semantic_image = np.uint8(semantic_image[:,:,:3])
#im = Image.fromarray(semantic_image)
#im.save('/home/chen/work/debug_segmentation.png')
golden_filepath = os.path.join(FILE_DIR, "../data/golden/baked_segmentation.npz")
golden_semantic_data = np.load(golden_filepath)["array"]
unique_classes = np.unique(semantic_data)
carb.log_warn(f'unique classes = {unique_classes}')
assert len(unique_classes) == 3
if len(unique_classes) == 3:
accuracy = test_against_golden(semantic_data, golden_semantic_data)
carb.log_warn(f'1st accuracy = {accuracy}')
# it's possible semantic labels are flipped between road and lanemark, so redo the test
# if accuracy is strikingly low
if accuracy < BAKE_ACCURACY_THRESHOLD:
for y in range(0, semantic_data.shape[0]):
for x in range(0, semantic_data.shape[1]):
# flip classes
if semantic_data[y, x] == unique_classes[1]:
semantic_data[y, x] = unique_classes[2]
elif semantic_data[y, x] == unique_classes[2]:
semantic_data[y, x] = unique_classes[1]
accuracy = test_against_golden(semantic_data, golden_semantic_data)
# visual debug code
#semantic_image = syn.visualize.colorize_segmentation(semantic_data)
#semantic_image = np.uint8(semantic_image[:,:,:3])
#im = Image.fromarray(semantic_image)
#im.save('/home/chen/work/debug_segmentation_2nd_try.png')
carb.log_warn(f'2nd accuracy = {accuracy}')
assert accuracy >= BAKE_ACCURACY_THRESHOLD
# After running each test
async def tearDown(self):
pass | 5,665 | Python | 42.584615 | 141 | 0.639188 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/visualize/test_semantic_seg.py | # NOTE:
# omni.kit.test - std python's unittest module with additional wrapping to add suport for async/await tests
# For most things refer to unittest docs: https://docs.python.org/3/library/unittest.html
import os
import numpy as np
import omni.kit.test
from omni.kit.viewport.utility import get_active_viewport
from pxr import UsdGeom
# Import extension python module we are testing with absolute import path, as if we are external user (other extension)
import omni.syntheticdata as syn
from ..utils import add_semantics
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
TIMEOUT = 50
# Having a test class derived from omni.kit.test.AsyncTestCase declared on the root of module will make it auto-discoverable by omni.kit.test
class TestSemanticSegVis(omni.kit.test.AsyncTestCase):
# Before running each test
async def setUp(self):
np.random.seed(1234)
# Setup viewport
self.viewport = get_active_viewport()
# Initialize Sensor
await omni.usd.get_context().new_stage_async()
stage = omni.usd.get_context().get_stage()
await omni.kit.app.get_app().next_update_async()
syn.sensors.enable_sensors(
self.viewport,
[syn._syntheticdata.SensorType.SemanticSegmentation, syn._syntheticdata.SensorType.InstanceSegmentation],
)
async def test_parsed_empty(self):
""" Test semantic segmentation returns zero array with empty scene
"""
await syn.sensors.next_sensor_data_async(self.viewport, True)
data = syn.visualize.get_semantic_segmentation(self.viewport, mode="parsed")
assert np.array_equal(data, np.zeros_like(data).astype(np.uint8))
async def test_number_of_classes(self):
""" Test that number of classes in output matches number of classes in scene
"""
stage = omni.usd.get_context().get_stage()
cube = stage.DefinePrim("/Cube1", "Cube")
add_semantics(cube, "cube1")
UsdGeom.Xformable(cube).AddTranslateOp().Set((0, 10, 0))
cube = stage.DefinePrim("/Cube2", "Cube")
add_semantics(cube, "cube2")
UsdGeom.Xformable(cube).AddTranslateOp().Set((0, -10, 0))
await syn.sensors.next_sensor_data_async(self.viewport, True)
data = syn.visualize.get_semantic_segmentation(self.viewport, mode="parsed")
data_non_bkg = data[data.sum(axis=-1) != 0] # Remove background, encoded as (0, 0, 0, 0)
assert len(np.unique(data_non_bkg, axis=0)) == 2
# After running each test
async def tearDown(self):
pass
| 2,597 | Python | 39.593749 | 141 | 0.676165 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/graph/test_graph_manipulation.py | import carb
from pxr import Gf, UsdGeom, UsdLux, Sdf
import omni.hydratexture
import omni.kit.test
from omni.syntheticdata import SyntheticData, SyntheticDataStage
# Test the instance mapping pipeline
class TestGraphManipulation(omni.kit.test.AsyncTestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
def render_product_path(self, hydra_texture) -> str:
'''Return a string to the UsdRender.Product used by the texture'''
render_product = hydra_texture.get_render_product_path()
if render_product and (not render_product.startswith('/')):
render_product = '/Render/RenderProduct_' + render_product
return render_product
async def setUp(self):
self._settings = carb.settings.acquire_settings_interface()
self._hydra_texture_factory = omni.hydratexture.acquire_hydra_texture_factory_interface()
self._usd_context_name = ''
self._usd_context = omni.usd.get_context(self._usd_context_name)
await self._usd_context.new_stage_async()
self._stage = omni.usd.get_context().get_stage()
# renderer
renderer = "rtx"
if renderer not in self._usd_context.get_attached_hydra_engine_names():
omni.usd.add_hydra_engine(renderer, self._usd_context)
# create the hydra textures
self._hydra_texture_0 = self._hydra_texture_factory.create_hydra_texture(
"TEX0",
1920,
1080,
self._usd_context_name,
hydra_engine_name=renderer,
is_async=self._settings.get("/app/asyncRendering")
)
self._render_product_path_0 = self.render_product_path(self._hydra_texture_0)
self._hydra_texture_rendered_counter = 0
def on_hydra_texture_0(event: carb.events.IEvent):
self._hydra_texture_rendered_counter += 1
self._hydra_texture_rendered_counter_sub = self._hydra_texture_0.get_event_stream().create_subscription_to_push_by_type(
omni.hydratexture.EVENT_TYPE_DRAWABLE_CHANGED,
on_hydra_texture_0,
name='async rendering test drawable update',
)
async def tearDown(self):
self._hydra_texture_rendered_counter_sub = None
self._hydra_texture_0 = None
self._usd_context.close_stage()
omni.usd.release_all_hydra_engines(self._usd_context)
self._hydra_texture_factory = None
self._settings = None
wait_iterations = 6
for _ in range(wait_iterations):
await omni.kit.app.get_app().next_update_async()
async def test_rendervar_enable(self):
isdg = SyntheticData.Get()
render_var = "BoundingBox3DSD"
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.enable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.disable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
async def test_rendervar_auto_activation(self):
isdg = SyntheticData.Get()
render_var = "BoundingBox3DSD"
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.activate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], {}, self._stage, True)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
isdg.deactivate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], self._stage, True)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
async def test_rendervar_manual_activation(self):
isdg = SyntheticData.Get()
render_var = "BoundingBox3DSD"
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
assert(not isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,False))
isdg.activate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], {}, self._stage, False)
assert(isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,False))
assert(isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,True))
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.enable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
isdg.deactivate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], self._stage, False)
assert(not isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,True))
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.disable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
async def test_rendervar_hybrid_activation(self):
isdg = SyntheticData.Get()
render_var = "BoundingBox3DSD"
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.activate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], {}, self._stage, False)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.enable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.deactivate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], self._stage, True)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
isdg.disable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
async def test_rendervar_initially_activated(self):
isdg = SyntheticData.Get()
render_var = "BoundingBox3DSD"
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.enable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.activate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], {}, self._stage, True)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
isdg.deactivate_node_template("BoundingBox3DReduction",0, [self._render_product_path_0], self._stage, True)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.disable_rendervar(self._render_product_path_0, render_var, self._stage)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
async def test_rendervar_multiple_activation(self):
isdg = SyntheticData.Get()
render_var = "BoundingBox3DSD"
if not isdg.is_node_template_registered("BoundingBox3DDisplayPostDuplicate"):
isdg.register_node_template(
SyntheticData.NodeTemplate(
SyntheticDataStage.POST_RENDER,
"omni.syntheticdata.SdPostRenderVarDisplayTexture",
[
SyntheticData.NodeConnectionTemplate("LdrColorSD"),
SyntheticData.NodeConnectionTemplate("Camera3dPositionSD"),
SyntheticData.NodeConnectionTemplate("PostRenderProductCamera"),
SyntheticData.NodeConnectionTemplate("InstanceMappingPost"),
SyntheticData.NodeConnectionTemplate("BoundingBox3DReduction")
],
{
"inputs:renderVar": "LdrColorSD",
"inputs:renderVarDisplay": "BoundingBox3DSDDisplay",
"inputs:mode": "semanticBoundingBox3dMode",
"inputs:parameters": [0.0, 5.0, 0.027, 0.27]
}
), # node template default attribute values (when differs from the default value specified in the .ogn)
template_name="BoundingBox3DDisplayPostDuplicate" # node template name
)
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, False, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.activate_node_template("BoundingBox3DDisplayPost",0, [self._render_product_path_0], {}, self._stage, True)
assert(not isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,True))
assert(isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,False))
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
isdg.activate_node_template("BoundingBox3DDisplayPostDuplicate",0, [self._render_product_path_0], {}, self._stage, True)
isdg.deactivate_node_template("BoundingBox3DDisplayPost",0, [self._render_product_path_0], self._stage, True)
assert(isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
assert(isdg.is_rendervar_used(self._render_product_path_0, render_var))
assert(not isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,True))
assert(isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,False))
isdg.deactivate_node_template("BoundingBox3DDisplayPostDuplicate",0, [self._render_product_path_0], self._stage, True)
assert(not isdg.is_node_template_activated("BoundingBox3DReduction",self._render_product_path_0,False))
assert(not isdg.is_rendervar_enabled(self._render_product_path_0, render_var, True, self._stage))
assert(not isdg.is_rendervar_used(self._render_product_path_0, render_var))
| 12,821 | Python | 63.757575 | 128 | 0.675922 |
eliabntt/GRADE-RR/isaac_internals/kit/extscore/omni.syntheticdata/omni/syntheticdata/tests/data/golden/view_np_image.py | import os
import sys
import matplotlib.pyplot as plt
import numpy as np
image = np.load(sys.argv[1])["array"]
print(image.shape)
# np.savez_compressed(f"{os.path.splitext(sys.argv[1])[0]}.npz", array=image)
# image = (image - image.min()) / image.ptp()
plt.imshow(image)
plt.show()
| 283 | Python | 22.666665 | 77 | 0.70318 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/manipulator_eval.py | from eval_model.skrl_oige_model import skrl_oige_model
from eval_monitor.stl_dense_offline import stl_dense_offline_monitor
from eval_optimizer.optimizer import Optimizer
import os
if __name__ == "__main__":
# Config inputs
agent_type = "PPO" # TRPO, PPO
omniisaacgymenvs_path = os.path.realpath(
os.path.join(os.path.realpath(__file__), "../../Gym_Envs")
)
agent_path = (
omniisaacgymenvs_path
+ "/Final_Policy/BallBalancing/BallBalancing_skrl_"
+ agent_type
+ "/checkpoints/best_agent.pt"
)
# Task choice: PointReaching, PegInHole, DoorOpen,
# BallBalancing, BallPushing, BallCatching
# CubeStacking, ClothPlacing
task_name = "FrankaBallBalancing"
simulation_max_steps = 300
num_envs = 1
opt_types = ["random"]
global_budget = 1
local_budget = 100
# Load model under test (drl agent + oige env)
is_action_noise = True
test_model = skrl_oige_model(
agent_path=agent_path,
agent_type=agent_type,
task_name=task_name,
num_envs=num_envs,
timesteps=simulation_max_steps,
is_action_noise= is_action_noise
)
for opt_type in opt_types:
# Load STL monitor based on task
monitor = stl_dense_offline_monitor(task_name=task_name, agent_type=agent_type)
# global search
for i in range(global_budget):
# print("Global trial: " + str(i))
# Create optimizer
optimizer = Optimizer(
task_name,
test_model,
monitor,
opt_type=opt_type,
budget_size=local_budget,
)
# local search
results = optimizer.optimize()
print(results)
# close simulation environment
test_model.close_env()
| 1,848 | Python | 26.191176 | 87 | 0.593074 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/eval_monitor/stl_dense_offline.py | from rtamt import STLDenseTimeSpecification
from typing import Optional
import sys
class stl_dense_offline_monitor(object):
"""STL dense time offline monitor based rtamt
agent_path: the path to the agent parameters (checkpoint)
oige_path: path to the OIGE environment;
agent_type: type of DRL agent (PPO, DDPG, TRPO)
task_name: the name of the task
num_envs: the number of parallel running environments
"""
def __init__(
self,
task_name: Optional[str] = None,
agent_type: Optional[str] = None,
oige_path: Optional[str] = None,
):
if task_name is not None:
self.task_name = task_name
else:
self.task_name = "FrankaBallPushing"
self.agent_type = agent_type
self.generate_spec()
# generate specification based on task name
def generate_spec(self):
# Initialization
self.spec = STLDenseTimeSpecification()
self.spec.name = "STL Dense-time Offline Monitor"
###############################################
# Specification according to task
# Ball Pushing
if self.task_name is "FrankaBallPushing":
self.spec.declare_var("distance_ball_hole", "float")
self.spec.spec = "eventually[1:299](distance_ball_hole <= 0.3) "
# Ball Balancing
elif self.task_name is "FrankaBallBalancing":
self.spec.declare_var("distance_ball_tool", "float")
self.spec.spec = "always[50:200]( distance_ball_tool <= 0.25)"
# Ball Catching
elif self.task_name is "FrankaBallCatching":
self.spec.declare_var("distance_ball_tool", "float")
self.spec.spec = "always[50:299]( distance_ball_tool <= 0.1)"
# Cube Stacking
elif self.task_name is "FrankaCubeStacking":
self.spec.declare_var("distance_cube", "float")
self.spec.declare_var("z_cube_distance", "float")
self.spec.spec = (
"eventually[1:299]((distance_cube<= 0.024) and (z_cube_distance>0) )"
)
# Door Open
elif self.task_name is "FrankaDoorOpen":
self.spec.declare_var("yaw_door", "float")
self.spec.spec = "eventually[1:299]( yaw_door >= 20)"
# Peg In Hole
elif self.task_name is "FrankaPegInHole":
self.spec.declare_var("distance_tool_hole", "float")
self.spec.spec = "always[250:299]( distance_tool_hole <= 0.1)"
# Point Reaching
elif self.task_name is "FrankaPointReaching":
self.spec.declare_var("distance_finger_target", "float")
self.spec.spec = "always[50:299]( distance_finger_target <= 0.12)" # fixed
# Cloth Placing
elif self.task_name is "FrankaClothPlacing":
self.spec.declare_var("distance_cloth_target", "float")
self.spec.declare_var("cloth_height", "float")
self.spec.spec = "eventually[1:299]( (distance_cloth_target <= 0.25))" # and (cloth_height > 0.1) )"
else:
raise ValueError("Task name unknown for defining the specification")
################################################
# Load specification
try:
self.spec.parse()
except rtamt.STLParseException as err:
print("STL Parse Exception: {}".format(err))
sys.exit()
# Compute the robustness given trace
def compute_robustness(self, trace):
if self.task_name is "FrankaBallPushing":
# print(trace)
robustness = self.spec.evaluate(["distance_ball_hole", trace])
# print(robustness)
elif self.task_name is "FrankaBallBalancing":
robustness = self.spec.evaluate(["distance_ball_tool", trace])
elif self.task_name is "FrankaBallCatching":
robustness = self.spec.evaluate(["distance_ball_tool", trace])
elif self.task_name is "FrankaCubeStacking":
distance_cube = trace["distance_cube"]
z_cube_distance = trace["z_cube_distance"]
robustness = self.spec.evaluate(
["distance_cube", distance_cube], ["z_cube_distance", z_cube_distance]
)
elif self.task_name is "FrankaDoorOpen":
robustness = self.spec.evaluate(["yaw_door", trace])
elif self.task_name is "FrankaPegInHole":
robustness = self.spec.evaluate(["distance_tool_hole", trace])
elif self.task_name is "FrankaPointReaching":
robustness = self.spec.evaluate(["distance_finger_target", trace])
elif self.task_name is "FrankaClothPlacing":
distance_cloth_target = trace["distance_cloth_target"]
cloth_height = trace["cloth_height"]
# print("distance")
# print(distance_cloth_target)
# print(cloth_height)
robustness = self.spec.evaluate(
["distance_cloth_target", distance_cloth_target]#, ["cloth_height", cloth_height]
)
# print("rob: ")
# print(robustness)
else:
raise ValueError("Task name unknown for defining the specification")
return robustness
| 5,263 | Python | 30.710843 | 112 | 0.580847 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/eval_model/skrl_oige_model.py | import os
import torch
from typing import Optional
from .load_oige import load_oige_test_env
from .agent.PPO_agent import create_skrl_ppo_agent
from .agent.TRPO_agent import create_skrl_trpo_agent
from skrl.envs.torch import wrap_env
class skrl_oige_model(object):
"""Testing environment model based on SKRL and Omniverse Isaac Gym Environments (OIGE)
agent_path: the path to the agent parameters (checkpoint)
oige_path: path to the OIGE environment;
agent_type: type of DRL agent (PPO, DDPG, TRPO)
task_name: the name of the task
num_envs: the number of parallel running environments
"""
def __init__(
self,
agent_path: str,
oige_path: Optional[str] = None,
agent_type: Optional[str] = None,
task_name: Optional[str] = None,
timesteps: Optional[int] = 10000,
num_envs: Optional[int] = 1,
headless: Optional[bool] = False,
is_action_noise: Optional[bool] = False,
):
# setup
if oige_path is not None:
self.oige_path = oige_path
else:
self.oige_path = os.path.realpath(
os.path.join(os.path.realpath(__file__), "../../../Gym_Envs")
)
if agent_type is not None:
self.agent_type = agent_type
else:
self.agent_type = "PPO"
if task_name is not None:
self.task_name = task_name
else:
self.task_name = "FrankaBallPushing"
self.agent_path = agent_path
self.timesteps = timesteps
self.headless = headless
# Load OIGE env with skrl wrapper
self.num_envs = num_envs # for testing, we use only 1 env for now
env = load_oige_test_env(
task_name=self.task_name,
omniisaacgymenvs_path=self.oige_path,
num_envs=self.num_envs,
)
self.env = wrap_env(env)
self.env._env.set_as_test()
# if action noise is required
if is_action_noise is True:
self.env._env.set_action_noise()
# Load agent
if self.agent_type is "PPO":
self.agent = create_skrl_ppo_agent(self.env, self.agent_path)
elif self.agent_type is "TRPO":
self.agent = create_skrl_trpo_agent(self.env, self.agent_path)
else:
raise ValueError("Agent type unknown.")
# Initialize agent
# cfg_trainer = {"timesteps": self.timesteps, "headless": self.headless}
self.agent.init()
if self.num_envs == 1:
self.agent.set_running_mode("eval")
else:
raise ValueError("Currently only one environment (agent) is supported")
# close env
def close_env(self):
self.env.close()
# Compute the trace w.r.t a given initial condition
def compute_trace(self, initial_value):
# set initial configuration
self.env._env.set_initial_test_value(initial_value)
# reset env
states, infos = self.env.reset()
# initialize trace
trace = states
# simulation loop
for timestep in range(self.timesteps):
# compute actions
with torch.no_grad():
actions = self.agent.act(
states, timestep=timestep, timesteps=self.timesteps
)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# record trace
states.copy_(next_states)
trace = torch.vstack([trace, states])
# terminate simulation
with torch.no_grad():
if terminated.any() or truncated.any():
break
return trace
# Merge trace based on the task type
def merge_trace(self, trace):
if self.task_name is "FrankaBallPushing":
# Ball hole distance
ball_hole_distance = trace[:, 24:27].detach().cpu()
ball_hole_distance = torch.norm(ball_hole_distance, p=2, dim=-1)
ball_Z_pos = trace[:, 29].detach().cpu()
# create index
trace_length = list(ball_hole_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_trace = torch.vstack((times, ball_hole_distance))
indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist()
elif self.task_name is "FrankaBallBalancing":
# Ball tool distance
ball_tool_distance = trace[:, 21:23].detach().cpu()
ball_tool_distance = torch.norm(ball_tool_distance, p=2, dim=-1)
# create index
trace_length = list(ball_tool_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_trace = torch.vstack((times, ball_tool_distance))
indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist()
elif self.task_name is "FrankaBallCatching":
# Ball tool distance
ball_tool_distance = trace[:, 21:23].detach().cpu()
ball_tool_distance = torch.norm(ball_tool_distance, p=2, dim=-1)
# create index
trace_length = list(ball_tool_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_trace = torch.vstack((times, ball_tool_distance))
indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist()
elif self.task_name is "FrankaCubeStacking":
# Cube distance
cube_distance = trace[:, 25:27].detach().cpu()
cube_distance = torch.norm(cube_distance, p=2, dim=-1)
# Cube height
cube_height_distance = trace[:, 27].detach().cpu()
# create index
trace_length = list(cube_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_cube_distance = torch.vstack((times, cube_distance))
indexed_cube_distance = torch.transpose(
indexed_cube_distance, 0, 1
).tolist()
indexed_cube_height_distance = torch.vstack((times, cube_height_distance))
indexed_cube_height_distance = torch.transpose(
indexed_cube_height_distance, 0, 1
).tolist()
indexed_trace = {
"distance_cube": indexed_cube_distance,
"z_cube_distance": indexed_cube_height_distance,
}
elif self.task_name is "FrankaDoorOpen":
# Ball tool distance
handle_rot = trace[:, 21:25].detach().cpu()
handle_yaw = torch.atan2(
2.0
* (
handle_rot[:, 0] * handle_rot[:, 3]
+ handle_rot[:, 1] * handle_rot[:, 2]
),
1.0
- 2.0
* (
handle_rot[:, 2] * handle_rot[:, 2]
+ handle_rot[:, 3] * handle_rot[:, 3]
),
)
handle_yaw = torch.rad2deg(handle_yaw)
# create index
trace_length = list(handle_yaw.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_trace = torch.vstack((times, handle_yaw))
indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist()
elif self.task_name is "FrankaPegInHole":
# Ball tool distance
tool_hole_distance = trace[:, 25:27].detach().cpu()
tool_hole_distance = torch.norm(tool_hole_distance, p=2, dim=-1)
# print(tool_hole_distance)
# create index
trace_length = list(tool_hole_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_trace = torch.vstack((times, tool_hole_distance))
indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist()
elif self.task_name is "FrankaPointReaching":
# Ball tool distance
finger_target_distance = trace[:, 24:27].detach().cpu()
finger_target_distance = torch.norm(finger_target_distance, p=2, dim=-1)
# create index
trace_length = list(finger_target_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_trace = torch.vstack((times, finger_target_distance))
indexed_trace = torch.transpose(indexed_trace, 0, 1).tolist()
elif self.task_name is "FrankaClothPlacing":
# Cube distance
cloth_target_distance = trace[:, 21:24].detach().cpu()
cloth_target_distance = torch.norm(cloth_target_distance, p=2, dim=-1)
# Cube height
cloth_height = trace[:, 20].detach().cpu()
# create index
trace_length = list(cloth_target_distance.size())[0]
times = torch.linspace(1, trace_length, steps=trace_length)
# convert to list for computing robustness
indexed_distance_cloth_target = torch.vstack((times, cloth_target_distance))
indexed_distance_cloth_target = torch.transpose(
indexed_distance_cloth_target, 0, 1
).tolist()
indexed_cloth_height = torch.vstack((times, cloth_height))
indexed_cloth_height = torch.transpose(
indexed_cloth_height, 0, 1
).tolist()
indexed_trace = {
"distance_cloth_target": indexed_distance_cloth_target,
"cloth_height": indexed_cloth_height,
}
else:
raise ValueError("Task name unknown for merging the trace")
return indexed_trace
| 10,291 | Python | 33.653199 | 90 | 0.563502 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/eval_model/load_oige.py | """
This is a copy from SKRL's implementation of loading oige environment,
with modifications for generating testing oige environment
"""
import sys
import os
from contextlib import contextmanager
def _omegaconf_to_dict(config) -> dict:
"""Convert OmegaConf config to dict
:param config: The OmegaConf config
:type config: OmegaConf.Config
:return: The config as dict
:rtype: dict
"""
# return config.to_container(dict)
from omegaconf import DictConfig
d = {}
for k, v in config.items():
d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v
return d
def _print_cfg(d, indent=0) -> None:
"""Print the environment configuration
:param d: The dictionary to print
:type d: dict
:param indent: The indentation level (default: 0)
:type indent: int, optional
"""
for key, value in d.items():
if isinstance(value, dict):
_print_cfg(value, indent + 1)
else:
print(' | ' * indent + " |-- {}: {}".format(key, value))
def load_oige_test_env(task_name: str = "",
omniisaacgymenvs_path: str = "",
num_envs: int = 1,
show_cfg: bool = True,
timeout: int = 30):
"""Load an Omniverse Isaac Gym environment, this is a slight modification of SKRL's implementation
:param task_name: The name of the task (default: "").
If not specified, the task name is taken from the command line argument (``task=TASK_NAME``).
Command line argument has priority over function parameter if both are specified
:type task_name: str, optional
:param omniisaacgymenvs_path: The path to the ``omniisaacgymenvs`` directory (default: "").
If empty, the path will obtained from omniisaacgymenvs package metadata
:type omniisaacgymenvs_path: str, optional
:param show_cfg: Whether to print the configuration (default: True)
:type show_cfg: bool, optional
:param timeout: Seconds to wait for data when queue is empty in multi-threaded environment (default: 30)
:type timeout: int, optional
:raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments
:raises RuntimeError: The omniisaacgymenvs package is not installed or the path is wrong
:return: Omniverse Isaac Gym environment
:rtype: omni.isaac.gym.vec_env.vec_env_base.VecEnvBase or omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT
"""
import torch
from hydra.types import RunMode
from hydra._internal.hydra import Hydra
from hydra._internal.utils import create_automatic_config_search_path, get_args_parser
from omegaconf import OmegaConf
from omni.isaac.gym.vec_env import VecEnvBase, TaskStopException
import omniisaacgymenvs
sys.argv.append("task={}".format(task_name))
sys.argv.append("num_envs={}".format(num_envs))
# get omniisaacgymenvs path from omniisaacgymenvs package metadata
if omniisaacgymenvs_path == "":
if not hasattr(omniisaacgymenvs, "__path__"):
raise RuntimeError("omniisaacgymenvs package is not installed")
omniisaacgymenvs_path = list(omniisaacgymenvs.__path__)[0]
config_path = os.path.join(omniisaacgymenvs_path, "cfg")
# set omegaconf resolvers
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b)
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg)
# get hydra config without use @hydra.main
config_file = "config"
args = get_args_parser().parse_args()
search_path = create_automatic_config_search_path(config_file, None, config_path)
hydra_object = Hydra.create_main_hydra2(task_name='load_omniisaacgymenv', config_search_path=search_path)
config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN)
cfg = {}
cfg["task"] = _omegaconf_to_dict(config.task)
cfg["task_name"] = config.task_name
cfg["experiment"] = config.experiment
cfg["num_envs"] = config.num_envs
cfg["seed"] = config.seed
cfg["torch_deterministic"] = config.torch_deterministic
cfg["max_iterations"] = config.max_iterations
cfg["physics_engine"] = config.physics_engine
cfg["pipeline"] = config.pipeline
cfg["sim_device"] = config.sim_device
cfg["device_id"] = config.device_id
cfg["rl_device"] = config.rl_device
cfg["num_threads"] = config.num_threads
cfg["solver_type"] = config.solver_type
cfg["test"] = config.test
cfg["checkpoint"] = config.checkpoint
cfg["headless"] = config.headless
# print config
if show_cfg:
print("\nOmniverse Isaac Gym environment ({})".format(config.task.name))
_print_cfg(cfg)
# internal classes
class _OmniIsaacGymVecEnv(VecEnvBase):
def step(self, actions):
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
self._task.pre_physics_step(actions)
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
observations, rewards, dones, info = self._task.post_physics_step()
return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \
rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy()
def set_as_test(self):
self._task.set_as_test()
def set_action_noise(self):
self._task.set_action_noise()
def set_initial_test_value(self, value):
self._task.set_initial_test_value(value)
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
# load environment
sys.path.append(omniisaacgymenvs_path)
from utils.task_util import initialize_task
env = _OmniIsaacGymVecEnv(headless=config.headless)
task = initialize_task(cfg, env, init_sim=True)
return env | 6,481 | Python | 39.767295 | 132 | 0.6553 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/eval_model/agent/TRPO_agent.py | """
Create PPO agent based on SKRL implementation
"""
import torch.nn as nn
import torch
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
# Define the models (stochastic and deterministic models) for the agent using mixins.
# - Policy: takes as input the environment's observation/state and returns an action
# - Value: takes the state as input and provides a value to guide the policy
class Policy_2_Layers(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Policy_3_Layers(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Value_2_Layers(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
class Value_3_Layers(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# Create SKRL PPO agent
def create_skrl_trpo_agent(env, agent_path):
device = env.device
models_trpo_2_layer = {}
models_trpo_2_layer["policy"] = Policy_2_Layers(env.observation_space, env.action_space, device)
models_trpo_2_layer["value"] = Value_2_Layers(env.observation_space, env.action_space, device)
models_trpo_3_layer = {}
models_trpo_3_layer["policy"] = Policy_3_Layers(env.observation_space, env.action_space, device)
models_trpo_3_layer["value"] = Value_3_Layers(env.observation_space, env.action_space, device)
# Configs
cfg_trpo = TRPO_DEFAULT_CONFIG.copy()
cfg_trpo["state_preprocessor"] = RunningStandardScaler
cfg_trpo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_trpo["value_preprocessor"] = RunningStandardScaler
cfg_trpo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# no log to TensorBoard and write checkpoints
cfg_trpo["experiment"]["write_interval"] = 0
cfg_trpo["experiment"]["checkpoint_interval"] = 0
try:
# Initialize and load agent with 2 layers
agent = TRPO(models=models_trpo_2_layer,
memory=None,
cfg=cfg_trpo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent.load(agent_path)
except:
# Initialize and load agent with 3 layers
agent = TRPO(models=models_trpo_3_layer,
memory=None,
cfg=cfg_trpo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent.load(agent_path)
return agent
| 5,370 | Python | 40 | 100 | 0.581006 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/eval_model/agent/PPO_agent.py | """
Create PPO agent based on SKRL implementation
"""
import torch.nn as nn
import torch
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
# Define the shared model (stochastic and deterministic models) for the agent using mixins.
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU())
self.mean_layer = nn.Linear(128, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
self.value_layer = nn.Linear(128, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# Create SKRL PPO agent
def create_skrl_ppo_agent(env, agent_path):
device = env.device
models_ppo = {}
models_ppo["policy"] = Shared(env.observation_space, env.action_space, device)
models_ppo["value"] = models_ppo["policy"] # same instance: shared model
# Configs
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard each 100 timesteps and ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 0
cfg_ppo["experiment"]["checkpoint_interval"] = 0
# Initialize and load agent
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
agent.load(agent_path)
return agent
| 2,889 | Python | 36.051282 | 101 | 0.622361 |
zhehuazhou/ai-cps-robotics-benchmark/Evaluation/eval_optimizer/optimizer.py | from typing import Optional
import sys
import numpy as np
import torch
import time
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
class Optimizer(object):
"""Optimizer class for testing
task_name: the task name of environment
test_model: the model under test
monitor: the monitor for the STL specification
"""
def __init__(
self,
task_name,
test_model,
monitor,
opt_type: Optional[str] = "random",
budget_size: Optional[int] = 1000,
):
self.task_name = task_name
self.test_model = test_model
self.monitor = monitor
self.opt_type = opt_type
self.budget_size = budget_size
self.fal_succ = False
self.start_time = time.time()
self.fal_time = 0
self.fal_sim = 0
self.worst_rob = 1000
# generate initial values based on the task type
def generate_initial(self):
if self.task_name is "FrankaBallPushing":
# ball inside an area x:[-0.1,0.1], y:[-0.1,0.1]
value_1 = np.random.rand(1) * (0.1 + 0.1) - 0.1
value_2 = np.random.rand(1) * (0.1 + 0.1) - 0.1
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaBallBalancing":
# ball inside an area x:[-0.15,0.15], y:[-0.15,0.15]
value_1 = np.random.rand(1) * (0.15 + 0.15) - 0.15
value_2 = np.random.rand(1) * (0.15 + 0.15) - 0.15
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaBallCatching":
# ball inside an area x:[-0.1,0.1], y:[-0.1,0.1]
# ball velociry: vx: [1.0,1.5], vy: [0.0,0.2]
value_1 = np.random.rand(1) * (0.05 + 0.05) - 0.05
value_2 = np.random.rand(1) * (0.05 + 0.05) - 0.05
value_3 = np.random.rand(1) * (1.0 - 1.0) + 1.0
value_4 = np.random.rand(1) * (0.0 + 0.0) + 0.0
initial_value = np.hstack((value_1, value_2, value_3, value_4))
elif self.task_name is "FrankaCubeStacking":
# target cube inside an area x:[-0.2,0.2], y:[-0.2,0.2]
value_1 = np.random.rand(1) * (0.2 + 0.2) - 0.2
value_2 = np.random.rand(1) * (0.2 + 0.2) - 0.2
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaDoorOpen":
# target inside an area x:[-0.1,0.1], y:[-0.4,0.4]
value_1 = np.random.rand(1) * (0.005 + 0.005) - 0.005
value_2 = np.random.rand(1) * (0.025 + 0.025) - 0.025
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaPegInHole":
# target inside an area x:[-0.2,0.2], y:[-0.2,0.2]
value_1 = np.random.rand(1) * (0.1 + 0.1) - 0.1
value_2 = np.random.rand(1) * (0.1 + 0.1) - 0.1
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaPointReaching":
# target inside an area x:[-0.2,0.2], y:[-0.4,0.4], z:[-0.2,0.2]
value_1 = np.random.rand(1) * (0.2 + 0.2) - 0.2
value_2 = np.random.rand(1) * (0.4 + 0.4) - 0.4
value_3 = np.random.rand(1) * (0.2 + 0.2) - 0.2
initial_value = np.hstack((value_1, value_2, value_3))
elif self.task_name is "FrankaClothPlacing":
# target inside an area x:[-0.1,0.2], y:[-0.35,0.35]
value_1 = np.random.rand(1) * (0.2 + 0.1) - 0.1
value_2 = np.random.rand(1) * (0.35 + 0.35) - 0.35
initial_value = np.hstack((value_1, value_2))
else:
raise ValueError("Task name unknown for generating the initial values")
return initial_value
# Generate one function (input: initial values, output: robustness) for testing algorithms
def robustness_function(self, initial_value):
# print("Initial Value:", initial_value)
# Get trace
trace = self.test_model.compute_trace(initial_value)
indexed_trace = self.test_model.merge_trace(trace)
# compute robustness
rob_sequence = self.monitor.compute_robustness(indexed_trace)
rob_sequence = np.array(rob_sequence)
# RTAMT is for monitoring, so for eventually, the robustness computed from the current timepoint to the end
# workaround to compute the maximum
if (
self.task_name is "FrankaBallPushing"
or self.task_name is "FrankaCubeStacking"
or self.task_name is "FrankaDoorOpen"
or self.task_name is "FrankaPegInHole"
or self.task_name is "FrankaClothPlacing"
):
min_rob = np.max(rob_sequence[:, 1])
else:
min_rob = np.min(rob_sequence[:, 1])
# print("Min Robustness:", min_rob)
if min_rob < self.worst_rob:
self.worst_rob = min_rob
if min_rob < 0 and self.fal_succ == False:
self.fal_succ = True
self.fal_time = time.time() - self.start_time
elif self.fal_succ == False:
self.fal_sim += 1
return min_rob, rob_sequence, indexed_trace
# optimization based on the optimizer type
def optimize(self):
if self.opt_type is "random":
results = self.optimize_random()
return results
else:
raise ValueError("Optimizer type undefined!")
# Random optimization
def optimize_random(self):
success_count = 0 # num success trail/ num total trail
dangerous_rate = list() # num dangerous steps/ num total trail w.r.t each trail
completion_time = list() # the step that indicates the task is completed
# Random optimizer
for i in range(self.budget_size):
print("trail ",i)
# random initial value
initial_value = self.generate_initial()
# compute robustness and its sequence
min_rob, rob_sequence, indexed_trace = self.robustness_function(initial_value)
# compute dangerous_rate, completion_time w.r.t tasks
if self.task_name == "FrankaCubeStacking":
# info extraction
cube_dist = np.array(indexed_trace["distance_cube"])[:,1]
cube_z_dist = np.array(indexed_trace["z_cube_distance"])[:,1]
# dangerous rate:
cube_too_far = cube_dist >= 0.35
cube_fall_ground = cube_z_dist < 0.02
dangerous_rate.append(np.sum(np.logical_or(cube_too_far, cube_fall_ground))/len(cube_dist))
# completation step
if_complete = (np.logical_and(cube_dist<=0.024, cube_z_dist>0))
complete_Step = np.where(if_complete == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaDoorOpen":
handle_yaw = np.array(indexed_trace)[:,1]
# dangerous rate:
dangerous_rate.append(np.sum(handle_yaw<0.1)/len(handle_yaw))
# completation step
if_complete = (handle_yaw>=20)
complete_Step = np.where(if_complete == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaPegInHole":
tool_hole_distance = np.array(indexed_trace)[:,1]
# dangerous rate:
dangerous_rate.append(np.sum(tool_hole_distance>0.37)/len(tool_hole_distance))
# completation step
if_complete = (tool_hole_distance<=0.1)
complete_Step = np.where(if_complete == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaBallCatching":
ball_tool_distance = np.array(indexed_trace)[:,1]
# dangerous rate:
dangerous_rate.append(np.sum(ball_tool_distance>0.2)/len(ball_tool_distance))
# completation step
if_complete = (ball_tool_distance<=0.1)
complete_interval = np.zeros(len(if_complete)-5)
# spec satisified holds within a 3-step interval
for i in range(0, int(len(if_complete)-5)):
complete_interval[i] = np.all(if_complete[i:i+5])
complete_Step = np.where(complete_interval == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaBallBalancing":
ball_tool_distance = np.array(indexed_trace)[:,1]
# dangerous rate:
dangerous_rate.append(np.sum(ball_tool_distance>0.2)/len(ball_tool_distance))
# completation step
if_complete = (ball_tool_distance<=0.1)
complete_interval = np.zeros(len(if_complete)-5)
# spec satisified holds within a 3-step interval
for i in range(0, int(len(if_complete)-5)):
complete_interval[i] = np.all(if_complete[i:i+5])
complete_Step = np.where(complete_interval == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaBallPushing":
ball_hole_distance = np.array(indexed_trace)[:,1]
# dangerous rate:
dangerous_rate.append(np.sum(ball_hole_distance>0.5)/len(ball_hole_distance))
# completation step
if_complete = (ball_hole_distance<=0.3)
complete_interval = np.zeros(len(if_complete)-5)
# spec satisified holds within a 3-step interval
for i in range(0, int(len(if_complete)-5)):
complete_interval[i] = np.all(if_complete[i:i+5])
complete_Step = np.where(complete_interval == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaPointReaching":
finger_target_distance = np.array(indexed_trace)[:,1]
# dangerous rate:
dangerous_rate.append(np.sum(finger_target_distance>=0.6)/len(finger_target_distance))
# completation step
if_complete = (finger_target_distance<=0.12)
complete_Step = np.where(if_complete == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
elif self.task_name == "FrankaClothPlacing":
# info extraction
cloth_target_dist = np.array(indexed_trace["distance_cloth_target"])[:,1]
cloth_z_pos = np.array(indexed_trace["cloth_height"])[:,1]
# dangerous rate:
cloth_too_far = cloth_target_dist >= 0.3
cloth_fall_ground = cloth_z_pos < 0.1
dangerous_rate.append(np.sum(np.logical_or(cloth_too_far, cloth_fall_ground))/len(cloth_target_dist))
# completation step
if_complete = cloth_target_dist<=0.25
complete_Step = np.where(if_complete == True)[0]
if len(complete_Step) > 0:
completion_time.append(complete_Step[0])
# print(indexed_trace)
else:
print("Invalid Task")
break
# perform evaluation:
# success rate
if min_rob > 0:
success_count += 1
# dangerous behavior: change the STL specification and use rob_sequence?
# completion time: check first satisfication in index_trace?
# if i == 0:
# break
if len(dangerous_rate) == 0:
dangerous_rate = 0
results = {"success_count": success_count/self.budget_size,
"dangerous_rate": np.mean(dangerous_rate),
"completion_time": np.mean(completion_time)}
return results | 12,314 | Python | 40.05 | 117 | 0.54231 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/skrl_train_SAC.py | import torch
import os
import torch.nn as nn
import torch.nn.functional as F
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_omniverse_isaacgym_env
from skrl.utils import set_seed
# set the seed for reproducibility
set_seed(42)
# Define the models (stochastic and deterministic models) for the SAC agent using the mixins.
# - StochasticActor (policy): takes as input the environment's observation/state and returns an action
# - Critic: takes the state and action as input and provides a value to guide the policy
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=True,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, self.num_actions),)
# nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 1))
def compute(self, inputs, role):
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# Load and wrap the Omniverse Isaac Gym environment]
omniisaacgymenvs_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "..") )
env = load_omniverse_isaacgym_env(task_name="FrankaCatching", omniisaacgymenvs_path = omniisaacgymenvs_path)
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=128, num_envs=env.num_envs, device=device, replacement=True)
# Instantiate the agent's models (function approximators).
# SAC requires 5 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#spaces-and-models
models_sac = {}
models_sac["policy"] = Policy(env.observation_space, env.action_space, device)
models_sac["critic_1"] = Critic(env.observation_space, env.action_space, device)
models_sac["critic_2"] = Critic(env.observation_space, env.action_space, device)
models_sac["target_critic_1"] = Critic(env.observation_space, env.action_space, device)
models_sac["target_critic_2"] = Critic(env.observation_space, env.action_space, device)
# Initialize the models' parameters (weights and biases) using a Gaussian distribution
for model in models_sac.values():
model.init_parameters(method_name="normal_", mean=0.0, std=0.1)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#configuration-and-hyperparameters
cfg_sac = SAC_DEFAULT_CONFIG.copy()
cfg_sac["gradient_steps"] = 1
cfg_sac["batch_size"] = 128
cfg_sac["random_timesteps"] = 10
cfg_sac["learning_starts"] = 0
cfg_sac["actor_learning_rate"]: 5e-4 # actor learning rate
cfg_sac["critic_learning_rate"]: 5e-3 # critic learning rate
cfg_sac["learn_entropy"] = True
cfg_sac["entropy_learning_rate"]: 1e-3 # entropy learning rate
cfg_sac["initial_entropy_value"]: 0.2 # initial entropy value
# logging to TensorBoard and write checkpoints each 1000 and 1000 timesteps respectively
cfg_sac["experiment"]["write_interval"] = 100
cfg_sac["experiment"]["checkpoint_interval"] = 1000
agent= SAC(models=models_sac,
memory=memory,
cfg=cfg_sac,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 320000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,185 | Python | 44.095652 | 108 | 0.688717 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/skrl_train_TD3.py | import torch
import os
import torch.nn as nn
import torch.nn.functional as F
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_omniverse_isaacgym_env
from skrl.utils import set_seed
from skrl.resources.noises.torch import GaussianNoise
# set the seed for reproducibility
set_seed(42)
# Define the models (deterministic models) for the TD3 agent using mixins
# and programming with two approaches (torch functional and torch.nn.Sequential class).
# - Actor (policy): takes as input the environment's observation/state and returns an action
# - Critic: takes the state and action as input and provides a value to guide the policy
class DeterministicActor(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=True):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, self.num_actions),
nn.Tanh())
def compute(self, inputs, role):
# x = F.relu(self.linear_layer_1(inputs["states"]))
# x = F.relu(self.linear_layer_2(x))
return self.net(inputs["states"]), {}
class DeterministicCritic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 400),
nn.ReLU(),
nn.Linear(400, 300),
nn.ReLU(),
nn.Linear(300, 1))
def compute(self, inputs, role):
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# Load and wrap the Omniverse Isaac Gym environment]
omniisaacgymenvs_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "..") )
env = load_omniverse_isaacgym_env(task_name="FrankaBallCatching", omniisaacgymenvs_path = omniisaacgymenvs_path)
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=2500, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# TRPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.trpo.html#spaces-and-models
# Instantiate the agent's models (function approximators).
# TD3 requires 6 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#spaces-and-models
models = {}
models["policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["critic_1"] = DeterministicCritic(env.observation_space, env.action_space, device)
models["critic_2"] = DeterministicCritic(env.observation_space, env.action_space, device)
models["target_critic_1"] = DeterministicCritic(env.observation_space, env.action_space, device)
models["target_critic_2"] = DeterministicCritic(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.trpo.html#configuration-and-hyperparameters
cfg_td3 = TD3_DEFAULT_CONFIG.copy()
cfg_td3["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device)
cfg_td3["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device)
cfg_td3["smooth_regularization_clip"] = 0.5
cfg_td3["batch_size"] = 16
cfg_td3["random_timesteps"] = 0
cfg_td3["learning_starts"] = 0
# logging to TensorBoard and write checkpoints each 16 and 80 timesteps respectively
cfg_td3["experiment"]["write_interval"] = 100
cfg_td3["experiment"]["checkpoint_interval"] = 1000
agent = TD3(models=models,
memory=memory,
cfg=cfg_td3,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 320000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,119 | Python | 42.760683 | 112 | 0.70209 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/skrl_train_DDPG.py | import torch
import os
import torch.nn as nn
import torch.nn.functional as F
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_omniverse_isaacgym_env
from skrl.utils import set_seed
# set the seed for reproducibility
set_seed(42)
# Define the models (deterministic models) for the DDPG agent using mixins
# and programming with two approaches (torch functional and torch.nn.Sequential class).
# - Actor (policy): takes as input the environment's observation/state and returns an action
# - Critic: takes the state and action as input and provides a value to guide the policy
class DeterministicActor(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=True):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, self.num_actions),
nn.Tanh())
def compute(self, inputs, role):
# x = F.relu(self.linear_layer_1(inputs["states"]))
# x = F.relu(self.linear_layer_2(x))
return self.net(inputs["states"]), {}
class DeterministicCritic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 1))
def compute(self, inputs, role):
# x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)))
# x = F.relu(self.linear_layer_2(x))
# return torch.tanh(self.action_layer(x)), {}
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# Load and wrap the Omniverse Isaac Gym environment]
omniisaacgymenvs_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "..") )
env = load_omniverse_isaacgym_env(task_name="FrankaCatching", omniisaacgymenvs_path = omniisaacgymenvs_path)
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device, replacement=False)
# Instantiate the agent's models (function approximators).
# DDPG requires 4 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#spaces-and-models
models_ddpg = {}
models_ddpg["policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models_ddpg["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models_ddpg["critic"] = DeterministicCritic(env.observation_space, env.action_space, device)
models_ddpg["target_critic"] = DeterministicCritic(env.observation_space, env.action_space, device)
# Initialize the models' parameters (weights and biases) using a Gaussian distribution
for model in models_ddpg.values():
model.init_parameters(method_name="normal_", mean=0.0, std=0.5)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#configuration-and-hyperparameters
cfg_ddpg = DDPG_DEFAULT_CONFIG.copy()
# cfg_ddpg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device)
cfg_ddpg["gradient_steps"] = 1 # gradient steps
cfg_ddpg["batch_size"] = 32 # training batch size
cfg_ddpg["polyak"] = 0.005 # soft update hyperparameter (tau)
cfg_ddpg["discount_factor"] = 0.99 # discount factor (gamma)
cfg_ddpg["random_timesteps"] = 0 # random exploration steps
cfg_ddpg["learning_starts"] = 0 # learning starts after this many steps
cfg_ddpg["actor_learning_rate"] = 1e-3
cfg_ddpg["critic_learning_rate"] = 5e-3
# cfg_ddpg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
# logging to TensorBoard and write checkpoints each 1000 and 5000 timesteps respectively
cfg_ddpg["experiment"]["write_interval"] = 100
cfg_ddpg["experiment"]["checkpoint_interval"] = 1000
# cfg_ddpg["experiment"]["experiment_name"] = ""
agent = DDPG(models=models_ddpg,
memory=memory,
cfg=cfg_ddpg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 320000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,728 | Python | 44.110236 | 159 | 0.686627 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/skrl_train_PPO.py | import torch
import os
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_omniverse_isaacgym_env
from skrl.utils import set_seed
# set the seed for reproducibility
set_seed(42)
# Define the shared model (stochastic and deterministic models) for the agent using mixins.
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU())
self.mean_layer = nn.Linear(128, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
self.value_layer = nn.Linear(128, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# Load and wrap the Omniverse Isaac Gym environment]
omniisaacgymenvs_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "..") )
env = load_omniverse_isaacgym_env(task_name="FrankaPegInHole", omniisaacgymenvs_path = omniisaacgymenvs_path)
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=32, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Shared(env.observation_space, env.action_space, device)
models_ppo["value"] = models_ppo["policy"] # same instance: shared model
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["rollouts"] = 32 # memory_size
cfg_ppo["learning_epochs"] = 16
cfg_ppo["mini_batches"] = 8 # 16 * 8192 / 32768
cfg_ppo["discount_factor"] = 0.99
cfg_ppo["lambda"] = 0.95
cfg_ppo["learning_rate"] = 5e-4
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.02}
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["grad_norm_clip"] = 1.0
cfg_ppo["ratio_clip"] = 0.2
cfg_ppo["value_clip"] = 0.2
cfg_ppo["clip_predicted_values"] = True
cfg_ppo["entropy_loss_scale"] = 0.0
cfg_ppo["value_loss_scale"] = 2.0
cfg_ppo["kl_threshold"] = 0
# cfg_ppo["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 800 and 8000 timesteps respectively
cfg_ppo["experiment"]["write_interval"] = 100
cfg_ppo["experiment"]["checkpoint_interval"] = 1000
agent = PPO(models=models_ppo,
memory=memory,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1000000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 4,809 | Python | 39.762712 | 109 | 0.688917 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/skrl_train_TRPO.py | import torch
import os
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.envs.torch import load_omniverse_isaacgym_env
from skrl.utils import set_seed
# set the seed for reproducibility
set_seed(42)
# Define the models (stochastic and deterministic models) for the agent using mixins.
# - Policy: takes as input the environment's observation/state and returns an action
# - Value: takes the state as input and provides a value to guide the policy
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
# self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
# nn.ELU(),
# nn.Linear(512, 256),
# nn.ELU(),
# nn.Linear(256, self.num_actions))
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
# self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
# nn.ELU(),
# nn.Linear(512, 256),
# nn.ELU(),
# nn.Linear(256, 1))
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ELU(),
nn.Linear(512, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# Load and wrap the Omniverse Isaac Gym environment]
omniisaacgymenvs_path = os.path.realpath( os.path.join(os.path.realpath(__file__), "..") )
env = load_omniverse_isaacgym_env(task_name="FrankaPegInHole", omniisaacgymenvs_path = omniisaacgymenvs_path)
env = wrap_env(env)
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=32, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# TRPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.trpo.html#spaces-and-models
models_trpo = {}
models_trpo["policy"] = Policy(env.observation_space, env.action_space, device)
models_trpo["value"] = Value(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.trpo.html#configuration-and-hyperparameters
cfg_trpo = TRPO_DEFAULT_CONFIG.copy()
cfg_trpo["rollouts"] = 32 # memory_size
cfg_trpo["learning_epochs"] = 16
cfg_trpo["mini_batches"] = 8
cfg_trpo["discount_factor"] = 0.99
cfg_trpo["lambda"] = 0.95
cfg_trpo["learning_rate"] = 5e-4
cfg_trpo["grad_norm_clip"] = 1.0
cfg_trpo["value_loss_scale"] = 2.0
cfg_trpo["state_preprocessor"] = RunningStandardScaler
cfg_trpo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_trpo["value_preprocessor"] = RunningStandardScaler
cfg_trpo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 16 and 80 timesteps respectively
cfg_trpo["experiment"]["write_interval"] = 100
cfg_trpo["experiment"]["checkpoint_interval"] = 1000
agent = TRPO(models=models_trpo,
memory=memory,
cfg=cfg_trpo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 320000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,310 | Python | 41.150793 | 109 | 0.632203 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Ball_Catching.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from Models.ball_catching.tool import Tool
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Gf, Usd, UsdGeom
class FrankaBallCatchingTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 27
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"ball_vel": torch_zeros(), "ball_tool_dist": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
# Franka
franka_translation = torch.tensor([0.3, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_tool()
self.get_ball()
# Here the env is cloned (cannot clone particle systems right now)
super().set_up_scene(scene)
# Add Franka
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add tool
self._tool = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/tool_mesh", name="tool_view", reset_xform_properties=False)
self._tool_center = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/center_cube", name="tool_center_view", reset_xform_properties=False)
# Add ball
self._ball = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view", reset_xform_properties=False)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._tool)
scene.add(self._tool_center)
scene.add(self._ball)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_tool(self):
tool = Tool(prim_path=self.default_zero_env_path + "/tool", name="tool")
self._sim_config.apply_articulation_settings("tool", get_prim_at_path(tool.prim_path), self._sim_config.parse_actor_config("tool"))
def get_ball(self):
ball = DynamicSphere(
name = 'ball',
position=[-0.8,0,1.5],
orientation=[1,0,0,0],
prim_path=self.default_zero_env_path + "/ball",
radius=0.01,
color=np.array([1, 0, 0]),
density = 100,
mass = 0.001
)
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
# finger pos
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
# franka grasp local pose
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.franka_default_dof_pos = torch.tensor(
[0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.04, 0.04], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
# Franka
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
# ball
self.ball_pos, self.ball_rot = self._ball.get_world_poses(clone=False)
ball_vel = self._ball.get_velocities() # ball velocity
ball_linvels = ball_vel[:, 0:3] # ball linear velocity
# tool
tool_pos, tool_rot = self._tool_center.get_world_poses(clone=False) # tool position
to_target = tool_pos - self.ball_pos # ball to tool dist
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
self.ball_pos,
to_target,
ball_linvels,
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
# fix the finger movement so that the tool will always be grasped in hand
self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7]
self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8]
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = torch.clamp(
self.franka_default_dof_pos.unsqueeze(0),
#+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# reset tool
self._tool.set_world_poses(self.default_tool_pos[env_ids], self.default_tool_rot[env_ids], indices = indices)
self._tool.set_velocities(self.default_tool_velocity[env_ids], indices = indices)
if not self.is_test:
# reset ball position within an area: x [-0.1, 0.1], y [-0.1,0.1]
self.new_ball_pos = self.default_ball_pos.clone().detach()
self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + (0.05 + 0.05) * torch.rand(self._num_envs, device=self._device) -0.05
self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + (0.05 + 0.05) * torch.rand(self._num_envs, device=self._device) -0.05
# reset ball velocity: default_ball_vel = [2.2 0.1, 0.0]
# x-axis vel: [1.0, 1.5]
# y-axis vel: [0.0, 0.2]
self.new_ball_vel = self.default_ball_velocity.clone().detach()
self.new_ball_vel[:,0] = (1.0 - 1.0) * torch.rand(self._num_envs, device=self._device) + 1.0
self.new_ball_vel[:,1] = (0.0 - 0.0) * torch.rand(self._num_envs, device=self._device) + 0.0
# reset ball
self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices)
self._ball.set_velocities(self.new_ball_vel[env_ids], indices = indices)
else:
self.new_ball_pos = self.default_ball_pos.clone().detach()
self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + self.initial_test_value[0]
self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + self.initial_test_value[1]
self.new_ball_vel = self.default_ball_velocity.clone().detach()
self.new_ball_vel[:,0] = self.initial_test_value[2]
self.new_ball_vel[:,1] = self.initial_test_value[3]
# reset ball
self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices)
self._ball.set_velocities(self.new_ball_vel[env_ids], indices = indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
# Franka
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# tool
self.default_tool_pos, self.default_tool_rot = self._tool.get_world_poses()
self.default_tool_velocity = self._tool.get_velocities()
# ball
self.default_ball_pos, self.default_ball_rot = self._ball.get_world_poses()
self.default_ball_velocity = self._ball.get_velocities()
# change default velocities
self.default_ball_velocity[:,0] = 2.2
self.default_ball_velocity[:,1] = 0.1
self.default_ball_velocity[:,2] = 0.0
self._ball.set_velocities(self.default_ball_velocity)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
# variables for reward
ball_pos = self.ball_pos # ball pos
ball_vel = self._ball.get_velocities() # ball velocity
tool_pos, tool_rot = self._tool_center.get_world_poses() # tool center pos and rot
ball_linvels = ball_vel[:, 0:3] # ball linear velocity
# 1st reward ball to tool center distance
ball_center_dist = torch.norm(tool_pos - ball_pos, p=2, dim=-1)
ball_center_XY_dist = torch.norm(tool_pos[:,0:3] - ball_pos[:,0:3], p=2, dim=-1)
center_dist_reward = 1.0/(1.0+ball_center_dist*100)
# 2nd reward: ball is unmoved
norm_ball_linvel = torch.norm(ball_linvels, p=2, dim=-1)
ball_vel_reward = 1.0/(1.0+norm_ball_linvel*100)
# 3rd reward: rotation not too much
rot_diff = torch.norm(tool_rot - self.default_tool_rot, p=2, dim=-1)
tool_rot_reward = 1.0/(1.0+rot_diff)
# action penalty
action_penalty = torch.sum(self.actions[:,0:7] ** 2, dim=-1)
action_penalty = 1 - 1.0 / (1.0 + action_penalty)
# liveness_reward
liveness_reward = torch.where(ball_center_XY_dist<0.03, torch.ones_like(center_dist_reward), torch.zeros_like(center_dist_reward))
# final cumulative reward
final_reward = 1.0*center_dist_reward + 1.0*ball_vel_reward + 0.0*tool_rot_reward + 0.5*liveness_reward - 0.01*action_penalty
self.rew_buf[:] = final_reward
# log additional info
self.episode_sums["ball_vel"] += norm_ball_linvel
self.episode_sums["ball_tool_dist"] += ball_center_dist
def is_done(self) -> None:
if not self.is_test:
ball_pos = self.ball_pos # ball pos
tool_pos, tool_rot = self._tool_center.get_world_poses() # tool center pos and rot
ball_center_dist = torch.norm(tool_pos - ball_pos, p=2, dim=-1)
# 1st reset: if ball falls from tool
# self.reset_buf = torch.where(ball_center_dist > 5.0, torch.ones_like(self.reset_buf), self.reset_buf)
# 2nd reset: if ball falls to the ground
self.reset_buf = torch.where(self.ball_pos[:,2] < 0.02, torch.ones_like(self.reset_buf), self.reset_buf)
# 3rd reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 17,673 | Python | 41.898058 | 152 | 0.606971 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Cube_Stacking.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Gf, Usd, UsdGeom
class FrankaCubeStackingTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 28
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"cube_cube_dist": torch_zeros(), "finger_to_cube_dist": torch_zeros(), "is_stacked": torch_zeros(), "success_rate": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
# Franka
franka_translation = torch.tensor([0.3, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_cube()
self.get_target_cube()
# Here the env is cloned
super().set_up_scene(scene)
# Add Franka
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add cube
self._cube = RigidPrimView(prim_paths_expr="/World/envs/.*/cube", name="cube_view", reset_xform_properties=False)
# Add location_ball
self._target_cube = RigidPrimView(prim_paths_expr="/World/envs/.*/target_cube", name="target_cube_view", reset_xform_properties=False)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._cube)
scene.add(self._target_cube)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_cube(self):
cube = DynamicCuboid(
name = 'cube',
position=[-0.04, 0.0, 0.91],
orientation=[1,0,0,0],
size=0.05,
prim_path=self.default_zero_env_path + "/cube",
color=np.array([1, 0, 0]),
density = 100
)
def get_target_cube(self):
target_cube = DynamicCuboid(
name = 'target_cube',
position=[-0.3, 0.1, 0.025],
orientation=[1, 0, 0, 0],
prim_path=self.default_zero_env_path + "/target_cube",
size=0.05,
color=np.array([0, 1, 0]),
density = 100
)
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
# finger pos
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
# franka grasp local pose
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.franka_default_dof_pos = torch.tensor(
[0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.025, 0.025], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
# Franka
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
# cube
cube_pos, cube_rot = self._cube.get_world_poses(clone=False)
# target cube
tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) # tool position
to_target = cube_pos - tar_cube_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
cube_pos,
cube_rot,
to_target,
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
# self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
# # release cube
cube_pos, cube_rot = self._cube.get_world_poses(clone=False) # cube
tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) # target pos
target_pos = tar_cube_pos.clone().detach()
target_pos[:,2] = target_pos[:,2] + 0.025
target_dist = torch.norm(cube_pos - tar_cube_pos, p=2, dim=-1)
self.release_condition = torch.logical_and(target_dist<0.08, cube_pos[:,2] >= target_pos[:,2])
# self.franka_dof_targets[:,7] = torch.where(self.release_condition, 0.08, self.franka_dof_targets[:,7])
# self.franka_dof_targets[:,8] = torch.where(self.release_condition, 0.08, self.franka_dof_targets[:,8])
self.franka_dof_targets[:,7] = torch.where(self.release_condition, 0.08, 0.005)
self.franka_dof_targets[:,8] = torch.where(self.release_condition, 0.08, 0.005)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = torch.clamp(
self.franka_default_dof_pos.unsqueeze(0),
#+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# reset cube
self._cube.set_world_poses(self.default_cube_pos[env_ids], self.default_cube_rot[env_ids], indices = indices)
self._cube.set_velocities(self.default_cube_velocity[env_ids], indices = indices)
if not self.is_test:
# reset target cube
# reset target cube position within an area: x [-0.2, 0.2], y [-0.2,0.2]
self.new_cube_pos = self.default_target_cube_pos.clone().detach()
self.new_cube_pos[:,0] = self.default_target_cube_pos[:,0] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2
self.new_cube_pos[:,1] = self.default_target_cube_pos[:,1] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2
self._target_cube.set_world_poses(self.new_cube_pos[env_ids], self.default_target_cube_rot[env_ids], indices = indices)
self._target_cube.set_velocities(self.default_target_cube_velocity[env_ids], indices = indices)
# if is test mode
else:
self.new_cube_pos = self.default_target_cube_pos.clone().detach()
self.new_cube_pos[:,0] = self.default_target_cube_pos[:,0] + self.initial_test_value[0]
self.new_cube_pos[:,1] = self.default_target_cube_pos[:,1] + self.initial_test_value[1]
self._target_cube.set_world_poses(self.new_cube_pos[env_ids], self.default_target_cube_rot[env_ids], indices = indices)
self._target_cube.set_velocities(self.default_target_cube_velocity[env_ids], indices = indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
if key == "success_rate":
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])
else:
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0
def post_reset(self):
# Franka
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# Cube
self.default_cube_pos, self.default_cube_rot = self._cube.get_world_poses()
self.default_cube_velocity = self._cube.get_velocities()
# Target cube
self.default_target_cube_pos, self.default_target_cube_rot = self._target_cube.get_world_poses()
self.default_target_cube_velocity = self._target_cube.get_velocities()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
# reward info
joint_positions = self.franka_dof_pos
cube_pos, cube_rot = self._cube.get_world_poses(clone=False) # cube
cube_vel = self._cube.get_velocities()
cube_vel = cube_vel[:,0:3]
cube_vel_norm = torch.norm(cube_vel, p=2, dim=-1)
tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False) # target pos
target_pos = tar_cube_pos.clone().detach()
target_pos[:,2] = target_pos[:,2] + 0.02
# target_pos[:,0] = target_pos[:,0] -0.015
lfinger_pos, lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False) # franka finger
rfinger_pos, rfinger_rot = self._frankas._rfingers.get_world_poses(clone=False)
finger_pos = (lfinger_pos + rfinger_pos)/2
# 1st reward: cube to target distance
cube_targe_dist = torch.norm(target_pos - cube_pos, p=2, dim=-1)
cube_tar_dist_reward = 1.0/(1.0+cube_targe_dist)
cube_targe_XY_dist = torch.norm(target_pos[:,0:2] - cube_pos[:,0:2] , p=2, dim=-1)
cube_tar_XY_dist_reward = 1.0/(1.0+cube_targe_XY_dist**2)
# 2nd reward: if cube is stacked, task complete
finger_to_cube_dist = torch.norm(finger_pos - cube_pos, p=2, dim=-1)
is_stacked = torch.where(torch.logical_and(cube_targe_dist<0.05, cube_pos[:,2]>=target_pos[:,2]),
torch.ones_like(cube_tar_dist_reward), torch.zeros_like(cube_tar_dist_reward))
self.is_complete = torch.where(torch.logical_and(finger_to_cube_dist>0.05, torch.logical_and(cube_vel_norm<0.05, is_stacked==1)),
torch.ones_like(cube_tar_dist_reward), torch.zeros_like(cube_tar_dist_reward))
# self.is_complete = torch.where(torch.logical_and(finger_to_cube_dist>0.05, torch.logical_and(cube_vel_norm<0.05, is_stacked==1)),
# torch.ones_like(cube_tar_dist_reward), torch.zeros_like(cube_tar_dist_reward))
# 3rd reward: finger to cube distanfce
finger_cube_dist_reward = 1.0/(1.0+finger_to_cube_dist)
finger_cube_dist_reward = torch.where(is_stacked==1, 1-finger_cube_dist_reward, finger_cube_dist_reward)
# 4th reward: finger closeness reward
# finger_close_reward = torch.zeros_like(cube_tar_dist_reward)
finger_close_reward = (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8])
finger_close_reward = torch.where(is_stacked !=1, finger_close_reward, -finger_close_reward)
# 5th reward: cube velocity reward
cube_vel_reward = 1.0/(1.0+cube_vel_norm)
# if cube falls on the ground
self.is_fall = torch.where(cube_pos[:,2]<0.05, torch.ones_like(cube_tar_dist_reward), cube_tar_dist_reward)
# final reward
final_reward = 2*cube_tar_dist_reward + 0.0*finger_cube_dist_reward + 0.0*finger_close_reward + 0.0*cube_vel_reward \
+ 10*self.is_complete - 0.5*self.is_fall + 0.0*is_stacked + 0.0*cube_tar_XY_dist_reward
final_reward = torch.where(cube_targe_dist<0.2, final_reward+2.0*cube_tar_XY_dist_reward, final_reward)
self.rew_buf[:] = final_reward
self.episode_sums["success_rate"] += self.is_complete
self.episode_sums["cube_cube_dist"] += cube_targe_dist
self.episode_sums["finger_to_cube_dist"] += finger_to_cube_dist
self.episode_sums["is_stacked"] += is_stacked
def is_done(self) -> None:
if not self.is_test:
# reset: if task is complete
# self.reset_buf = torch.where(self.is_complete==1, torch.ones_like(self.reset_buf), self.reset_buf)
# reset if cube falls on the ground
cube_pos, cube_rot = self._cube.get_world_poses(clone=False)
# self.reset_buf = torch.where(self.is_fall==1, torch.ones_like(self.reset_buf), self.reset_buf)
# rest if cube is too far away from the target cube
tar_cube_pos, tar_cube_rot = self._target_cube.get_world_poses(clone=False)
cube_target_XY_dist = torch.norm(tar_cube_pos[:,0:2] - cube_pos[:,0:2] , p=2, dim=-1)
self.reset_buf = torch.where(cube_target_XY_dist > 0.8, torch.ones_like(self.reset_buf), self.reset_buf)
# reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 19,860 | Python | 43.53139 | 159 | 0.611329 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Peg_In_Hole.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from Models.peg_in_hole.table import Table
from Models.peg_in_hole.tool import Tool
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Usd, UsdGeom
class FrankaPegInHoleTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.location_ball_radius = self._task_cfg["env"]["locationBallRadius"]
self.location_ball_initial_position = self._task_cfg["env"]["locationBallPosition"]
self.location_ball_initial_orientation = self._task_cfg["env"]["locationBallInitialOrientation"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 28
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"tool_hole_XY_dist": torch_zeros(), "tool_hole_Z_dist": torch_zeros(), "tool_hole_dist": torch_zeros(),
"tool_rot_error": torch_zeros(), "peg_rate": torch_zeros(), "norm_finger_vel": torch_zeros(), "rewards": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
franka_translation = torch.tensor([0.5, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_table()
self.get_tool()
super().set_up_scene(scene)
# Add Franka
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add Table
self._table = RigidPrimView(prim_paths_expr="/World/envs/.*/table/table/table_mesh", name="table_view", reset_xform_properties=False)
# Add Tool
self._tool = RigidPrimView(prim_paths_expr="/World/envs/.*/tool/tool/tool", name="tool_view", reset_xform_properties=False)
# Add location_ball
self._location_ball = RigidPrimView(prim_paths_expr="/World/envs/.*/table/table/location_ball", name="location_ball_view", reset_xform_properties=False)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._table)
scene.add(self._tool)
scene.add(self._location_ball)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_table(self):
table = Table(prim_path=self.default_zero_env_path + "/table", name="table")
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def get_tool(self):
tool = Tool(prim_path=self.default_zero_env_path + "/tool", name="tool")
self._sim_config.apply_articulation_settings("tool", get_prim_at_path(tool.prim_path), self._sim_config.parse_actor_config("tool"))
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
# tool reference rotation
self.tool_ref_rot = torch.tensor([0.5, 0.5, 0.5, 0.5], device=self._device)
# self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
# self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
# default franka pos: for initially grap the tool
self.franka_default_dof_pos = torch.tensor(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.015, 0.015], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
# Tool
self.tool_pos, self.tool_rot = self._tool.get_world_poses(clone=False)
hole_pos, hole_rot = self._location_ball.get_world_poses()
to_target = self.tool_pos - hole_pos
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
# print(torch.norm(to_target, p=2, dim=-1))
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
self.tool_pos,
self.tool_rot,
to_target
# self.location_ball_pos
# self.cabinet_dof_pos[:, 3].unsqueeze(-1),
# self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
# NOTE HERE: right now I fix the finger movement so that the object will always be grasped in hand
# Later: if the reward is good enough, the hand should be released once the object is in the hole,
# this means the last two dofs are also in the action
# self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7]
# self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8]
# release the finger if tool is right above the hole
hole_pos, hole_rot = self._location_ball.get_world_poses()
tool_pos, tool_rot = self._tool.get_world_poses()
hole_pos[:,2] = 0.39
tool_hole_dist = torch.norm(tool_pos - hole_pos, p=2, dim=-1)
tool_hole_XY_dist = torch.norm(tool_pos[:,0:2] - hole_pos[:,0:2], p=2, dim=-1)
tool_hole_Z_dist = torch.norm(tool_pos[:,2] - hole_pos[:,2], p=2, dim=-1)
tool_rot_error = torch.norm(tool_rot - self.tool_ref_rot, p=2, dim=-1)
# self.release_condition = torch.logical_and(tool_hole_XY_dist <= 0.1, tool_rot_error<=1)
# self.release_condition = torch.logical_and(self.release_condition, tool_hole_Z_dist<=0.1)
# self.release_condition = torch.logical_and(tool_hole_dist<0.08, self.is_released)
self.release_condition = tool_hole_dist<=0.024
# self.release_condition = torch.logical_and(tool_hole_XY_dist<=0.04, tool_hole_Z_dist<=0.07)
# self.release_condition = torch.logical_and(self.release_condition, tool_rot_error<=1)
# self.is_released = self.release_condition.clone().detach()
self.franka_dof_targets[:,7] = torch.where(self.release_condition, 0.1, 0.015)
self.franka_dof_targets[:,8] = torch.where(self.release_condition, 0.1, 0.015)
# set franka target joint position
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka (due to initial grasping, cannot randomize)
pos = torch.clamp(
self.franka_default_dof_pos.unsqueeze(0),
# + 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# reset tool
self._tool.set_world_poses(self.default_tool_pos[env_ids], self.default_tool_rot[env_ids], indices = indices)
self._tool.set_velocities(self.default_tool_velocity[env_ids], indices = indices)
if not self.is_test:
# reset table
# reset positions: x: [-0.2,0.2], y:[-0.2,0.2]
random_x = (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2
random_y = (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2
self.new_table_pos = self.default_table_pos.clone().detach()
self.new_table_pos[:,0] = self.default_table_pos[:,0] + random_x
self.new_table_pos[:,1] = self.default_table_pos[:,1] + random_y
self._table.set_world_poses(self.new_table_pos[env_ids], self.default_table_rot[env_ids], indices = indices)
self._table.set_velocities(self.default_table_velocity[env_ids], indices = indices)
else:
self.new_table_pos = self.default_table_pos.clone().detach()
self.new_table_pos[:,0] = self.default_table_pos[:,0] + self.initial_test_value[0]
self.new_table_pos[:,1] = self.default_table_pos[:,1] + self.initial_test_value[1]
self._table.set_world_poses(self.new_table_pos[env_ids], self.default_table_rot[env_ids], indices = indices)
self._table.set_velocities(self.default_table_velocity[env_ids], indices = indices)
self.is_released = torch.zeros((1,self._num_envs), device=self._device)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
if key == "peg_rate":
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])
else:
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0
def post_reset(self):
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# tool
self.default_tool_pos, self.default_tool_rot = self._tool.get_world_poses()
self.default_tool_velocity = self._tool.get_velocities()
# table
self.default_table_pos, self.default_table_rot = self._table.get_world_poses()
self.default_table_velocity = self._table.get_velocities()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
# Envoroonment parameters:
# table height: 0.4
# hole depth: 0.05
# hole radius: 0.01
# tool at surface: Z = 0.43
# tool pegged in hole: Z = 0.38
# tool_pos to tool bottom: Z = 0.03
# tool body length: 0.06
# tool cap length: 0.01
# tool vertical orient: [0.5, 0.5, 0.5, 0.5]
# tool_ref_rot = self.tool_ref_rot # tool reference vertical rotation
num_envs = self._num_envs
tool_pos, tool_rot = self._tool.get_world_poses(clone=False)
hole_pos, hole_rot = self._location_ball.get_world_poses(clone=False)
hole_pos[:,2] = 0.38 # fix hole pos
hole_surf_pos = hole_pos.clone().detach()
hole_surf_pos[:,2] = hole_surf_pos[:,2]
hole_target_pos = hole_pos.clone().detach()
hole_target_pos[:,2] = 0.39
# tool_ref_rot = torch.zeros_like(tool_rot)
# tool_ref_rot[:,:] = self.tool_ref_rot # tool reference vertical rotation
tool_ref_rot= self.tool_ref_rot
lfinger_pos, lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
rfinger_pos, rfinger_rot = self._frankas._rfingers.get_world_poses(clone=False)
finger_rot = (lfinger_rot + rfinger_rot)/2
finger_pos = (lfinger_pos + rfinger_pos)/2
finger_rot_ref = torch.tensor([0.0325, -0.3824, 0.9233, -0.0135], device=self._device)
# finger velocity
lfinger_vel = self._frankas._lfingers.get_velocities()
rfinger_vel = self._frankas._rfingers.get_velocities()
finger_vel = (lfinger_vel[:,0:3]+rfinger_vel[:,0:3])/2
norm_finger_vel = torch.norm(finger_vel, p=2, dim=-1)
# direction vector
ref_vector = torch.zeros([num_envs,3], device=self._device)
ref_vector[:,0] = 2*(tool_ref_rot[0]*tool_ref_rot[2] - tool_ref_rot[3]*tool_ref_rot[1])
ref_vector[:,1] = 2*(tool_ref_rot[1]*tool_ref_rot[2] + tool_ref_rot[3]*tool_ref_rot[0])
ref_vector[:,2] = 1 - 2*(tool_ref_rot[0]*tool_ref_rot[0] + tool_ref_rot[1]*tool_ref_rot[1])
tool_vector = torch.zeros([num_envs,3], device=self._device)
tool_vector[:,0] = 2*(tool_rot[:,0]*tool_rot[:,2] - tool_rot[:,3]*tool_rot[:,1])
tool_vector[:,1] = 2*(tool_rot[:,1]*tool_rot[:,2] + tool_rot[:,3]*tool_rot[:,0])
tool_vector[:,2] = 1 - 2*(tool_rot[:,0]*tool_rot[:,0] + tool_rot[:,1]*tool_rot[:,1])
# roll = atan2(2.0 * (q.q3 * q.q2 + q.q0 * q.q1) , 1.0 - 2.0 * (q.q1 * q.q1 + q.q2 * q.q2));
# pitch = asin(2.0 * (q.q2 * q.q0 - q.q3 * q.q1));
# yaw = atan2(2.0 * (q.q3 * q.q0 + q.q1 * q.q2) , - 1.0 + 2.0 * (q.q0 * q.q0 + q.q1 * q.q1));
tool_roll = torch.atan2(2.0*(tool_rot[:,0]*tool_rot[:,1] + tool_rot[:,2]*tool_rot[:,3]), 1.0-2.0*(tool_rot[:,2]*tool_rot[:,2]+tool_rot[:,1]*tool_rot[:,1]))
tool_yaw= torch.atan2(2.0*(tool_rot[:,3]*tool_rot[:,2] + tool_rot[:,0]*tool_rot[:,1]), 1.0-2.0*(tool_rot[:,1]*tool_rot[:,1]+tool_rot[:,2]*tool_rot[:,2]))
tool_pitch = torch.asin(2.0*(tool_rot[:,0]*tool_rot[:,2] - tool_rot[:,1]*tool_rot[:,3]))
tool_ref_roll = torch.atan2(2.0*(tool_ref_rot[0]*tool_ref_rot[1] + tool_ref_rot[2]*tool_ref_rot[3]), 1.0-2.0*(tool_ref_rot[2]*tool_ref_rot[2]+tool_ref_rot[1]*tool_ref_rot[1]))
tool_ref_yaw = torch.atan2(2.0*(tool_ref_rot[3]*tool_ref_rot[2] + tool_ref_rot[0]*tool_ref_rot[1]), 1.0-2.0*(tool_ref_rot[1]*tool_ref_rot[1]+tool_ref_rot[2]*tool_ref_rot[2]))
tool_ref_pitch = torch.asin(2.0*(tool_ref_rot[0]*tool_ref_rot[2] - tool_ref_rot[1]*tool_ref_rot[3]))
tool_roll_error = torch.abs(tool_roll - tool_ref_roll)
tool_pitch_error = torch.abs(tool_pitch - tool_ref_pitch)
tool_roll_pitch_reward = 1 - torch.tanh(2*tool_roll_error) + 1 - torch.tanh(2*tool_pitch_error)
# tool_roll_yaw_reward = 1 - torch.tanh(2*tool_roll_error) + 1 - torch.tanh(2*tool_yaw_error)
# Handle Nan exception
# tool_roll_pitch_reward = torch.where(torch.isnan(tool_roll_error+tool_pitch_error), torch.ones_like(tool_roll_pitch_reward), tool_roll_pitch_reward)
# 1st reward: tool XY position
tool_hole_dist = torch.norm(tool_pos - hole_pos, p=2, dim=-1)
tool_hole_XY_dist = torch.norm(tool_pos[:,0:2] - hole_pos[:,0:2], p=2, dim=-1)
# tool_XY_pos_reward = 1.0 / (1.0 + (tool_hole_XY_dist) ** 2)
tool_XY_pos_reward = 1 - torch.tanh(5*tool_hole_XY_dist)
tool_hole_surf_dist = torch.norm(tool_pos - hole_surf_pos, p=2, dim=-1)
# tool_surf_pos_reward = 1.0 / (1.0 + (tool_hole_surf_dist) ** 2)
tool_surf_pos_reward = 1 - torch.tanh(8*tool_hole_surf_dist)
# 2nd reward: tool rotation
# tool_rot_error = torch.norm(tool_rot - tool_ref_rot, p=2, dim=-1)
tool_rot_error = torch.norm(tool_vector - ref_vector, p=2, dim=-1)
# tool_rot_reward = 1.0 / (1.0 + (tool_rot_error) ** 2)
tool_rot_reward = 1 - torch.tanh(3*tool_rot_error)
self.rot_error = tool_roll_error + tool_pitch_error
# 3rd reward: pegging in when tool is above the hole
tool_hole_Z_dist = torch.abs(tool_pos[:,2] - hole_pos[:,2])
# tool_pegging_reward = 1.0 / (1.0 + (tool_hole_Z_dist) ** 2)
tool_pegging_reward = 1 - torch.tanh(6*tool_hole_Z_dist)
# 4th reward: tool hole XYZ position
tool_hole_dist = torch.norm(tool_pos - hole_pos, p=2, dim=-1)
tool_target_dist = torch.norm(tool_pos - hole_target_pos, p=2, dim=-1)
# tool_pos_reward = 1.0 / (1.0 + (tool_hole_dist) ** 2)
tool_pos_reward = 1 - torch.tanh(5*tool_hole_dist)
finger_rot_error = torch.norm(finger_rot - finger_rot_ref, p=2, dim=-1)
finger_rot_reward = 1.0 / (1.0 + (finger_rot_error) ** 2)
finger_XY_pos_dist = torch.norm(finger_pos[:,0:2] - hole_pos[:,0:2], p=2, dim=-1)
finger_pos_reward = 1 - torch.tanh(5*finger_XY_pos_dist)
# 1st penalty: action
action_penalty = torch.sum(self.actions[:,0:7] ** 2, dim=-1)
action_penalty = 1 - 1.0 / (1.0 + action_penalty)
finger_vel_penalty = torch.tanh(20*torch.abs(norm_finger_vel-0.1))
# tool_rot_penalty = 1 - 1.0 / (1.0 + (tool_rot_error) ** 2)
# tool_pos_penalty = 1 - 1.0 / (1.0 + (tool_hole_dist) ** 2)
# final cumulative reward
# final_reward = 5*tool_XY_pos_reward + 5*tool_rot_reward + 2*tool_pegging_reward- 0.001*action_penalty
# final_reward = 10*tool_surf_pos_reward + 5*tool_rot_reward + 0*tool_hole_XY_dist- 0.001*action_penalty - 1.0*tool_rot_penalty - 1.0*tool_pos_penalty
# final_reward = torch.where(tool_hole_surf_dist<0.05, 10*tool_pos_reward + 5*tool_rot_reward- 0.001*action_penalty, final_reward)
# final_reward = torch.where(tool_hole_dist<0.1, 1*tool_pos_reward + 3*tool_rot_reward , 3*tool_pos_reward + 1*tool_rot_reward)
# final_reward = 2*tool_surf_pos_reward + 2*tool_rot_reward + 0*finger_rot_reward - 0.001*action_penalty
# final_reward = torch.where(tool_surf_pos_reward<0.1, 2*tool_pos_reward + 2*tool_rot_reward + 0*finger_rot_reward + 2*tool_pegging_reward-0.001*action_penalty, final_reward)
final_reward = 3.5*tool_XY_pos_reward + 1.48*tool_roll_pitch_reward- 0.001*action_penalty + 2.0*tool_pegging_reward
final_reward = torch.where((self.rot_error)<0.08, final_reward+0.5, final_reward)
final_reward = torch.where((self.rot_error)>0.2, final_reward-1, final_reward)
final_reward = torch.where(tool_hole_Z_dist>0.15, final_reward-1, final_reward)
final_reward = torch.where(tool_hole_Z_dist<0.05, final_reward+0.1, final_reward)
final_reward = torch.where(tool_hole_XY_dist<0.05, final_reward+0.5, final_reward)
final_reward = torch.where(tool_hole_XY_dist>0.1, final_reward-10, final_reward)
final_reward = torch.where(norm_finger_vel>0.15, final_reward-1, final_reward)
# amplify different sub-rewards w.r.t. conditions
# final_reward = torch.where(tool_hole_XY_dist>=0.005, final_reward + 2*tool_XY_pos_reward, final_reward) # tool-hole XY position
# final_reward = torch.where(tool_rot_error > 0.05, final_reward + 2*tool_rot_reward, final_reward) # tool rotation position
# final_reward = torch.where(torch.logical_and(tool_hole_XY_dist<0.05, tool_rot_error<0.05), final_reward + 10*tool_pegging_reward+2*tool_rot_reward, final_reward) # tool-hole Z position
# final_reward = torch.where(torch.logical_and(tool_hole_surf_dist<0.05, tool_rot_error<0.06),
# 10*tool_pos_reward + 5*tool_rot_reward + 2*tool_pegging_reward- 0.001*action_penalty,
# final_reward) # tool-hole Z position
# extra bonus/penalty cases:
# final_reward = torch.where(tool_hole_XY_dist<=0.01, final_reward+0.1, final_reward) # tool-hole XY position bonus
# final_reward = torch.where(tool_rot_error <0.1, final_reward+0.01, final_reward)
# final_reward = torch.where(tool_hole_XY_dist <0.005, final_reward+0.01, final_reward)
# final_reward = torch.where(tool_hole_Z_dist <0.1, final_reward+0.02, final_reward)
# final_reward = 10*tool_pos_reward + 4*tool_rot_reward
# final_reward = torch.where(tool_hole_XY_dist>0.1, 5.0*tool_pos_reward + 1.0*tool_rot_reward, 1.0*tool_pos_reward + 5.0*tool_rot_reward)
# final_reward = torch.where(tool_rot_error<0.1, final_reward+2*tool_pos_reward, final_reward)
# final_reward = torch.where(tool_hole_XY_dist<0.05, final_reward+5*tool_rot_reward, final_reward)
# final_reward = torch.where(tool_rot_error <0.1, final_reward+0.2, final_reward)
# final_reward = torch.where(tool_hole_XY_dist <0.1, final_reward+0.5, final_reward)
# final_reward = torch.where(torch.logical_and(tool_hole_Z_dist <0.15, tool_hole_XY_dist <0.1), final_reward+1, final_reward)
# final_reward = torch.where(torch.logical_and(tool_hole_XY_dist<=0.005, tool_hole_Z_dist<=0.005), final_reward+10000, final_reward) # task complete
final_reward = torch.where(tool_target_dist<0.01, final_reward+100, final_reward) # task complete
final_reward = torch.where(torch.isnan(final_reward), torch.zeros_like(final_reward), final_reward) # task complete
# trigger to determine if job is done
self.is_pegged = torch.where(tool_target_dist<0.01, torch.ones_like(final_reward), torch.zeros_like(final_reward)) # task complete
self.rew_buf[:] = final_reward
# print("hole_Z_pos", hole_pos[:2])
# print("tool_Z_pos", tool_pos[:2])
# print("tool_hole_XY_dist", tool_hole_XY_dist)
# print("tool_hole_Z_dist", tool_hole_Z_dist)
# print("tool_target_dist", tool_target_dist)
# print("hole_surf_pos", hole_surf_pos)
# print("norm_finger_vel", norm_finger_vel)
# print("tool_rot", tool_rot)
# print("tool_rot_error", self.rot_error )
# print("tool_ref_rot", tool_ref_rot)
# print("hole_rot", hole_rot)
# print("finger_rot", finger_rot)
# finger_rot_ref: 0.0325, -0.3824, 0.9233, -0.0135
# 0.0 0.92388 0.3826 0
# hole_pos tensor([[ 1.5000, 0.0000, 0.3800], [-1.5000, 0.0000, 0.3800]], device='cuda:0')
# tool_hole_Z_dist tensor([0.0820, 0.0789], device='cuda:0')
# tool_rot_error tensor([0.0629, 0.0621], device='cuda:0')
# tool_hole_XY_dist tensor([0.0012, 0.0037], device='cuda:0')
# tool_rot_error tensor([0.7979, 0.7810, 0.7889, 0.7811], device='cuda:0')
# tool_hole_XY_dist tensor([0.0536, 0.0585, 0.0378, 0.0451], device='cuda:0')
# tool_hole_Z_dist tensor([0.0343, 0.0353, 0.0368, 0.0350], device='cuda:0')
# tool_hole_dist tensor([0.0636, 0.0683, 0.0528, 0.0571], device='cuda:0')
self.episode_sums["tool_hole_XY_dist"] += tool_hole_XY_dist
self.episode_sums["tool_hole_Z_dist"] += tool_hole_Z_dist
self.episode_sums["tool_hole_dist"] += tool_hole_dist
self.episode_sums["tool_rot_error"] += tool_roll_error+tool_pitch_error
# self.episode_sums["tool_X_pos"] += tool_pos[:,0]
# self.episode_sums["tool_Y_pos"] += tool_pos[:,1]
# self.episode_sums["tool_Z_pos"] += tool_pos[:,2]
# self.episode_sums["tool_rot"] += tool_rot
self.episode_sums["peg_rate"] += self.is_pegged
self.episode_sums["norm_finger_vel"] += norm_finger_vel
self.episode_sums["rewards"] += final_reward
def is_done(self) -> None:
if not self.is_test:
# reset if tool is pegged in hole
# self.reset_buf = torch.where(self.is_pegged==1, torch.ones_like(self.reset_buf), self.reset_buf)
# reset if tool is below the table and not pegged in hole
# self.reset_buf = torch.where(self.tool_pos[:,2] < 0.3, torch.ones_like(self.reset_buf), self.reset_buf)
#
# self.reset_buf = torch.where(torch.logical_and(self.tool_pos[:,2] < 0.43, self.rot_error>1.5), torch.ones_like(self.reset_buf), self.reset_buf)
# reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# self.reset_buf = torch.where(self.is_pegged==1, torch.ones_like(self.reset_buf), self.reset_buf)
| 30,444 | Python | 49.405629 | 195 | 0.612633 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Door_Open.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from Models.door_open.door import Door
from Models.door_open.door_view import DoorView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Usd, UsdGeom
class FrankaDoorOpenTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 28
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"door_yaw_deg": torch_zeros(), "grasp_handle_dist": torch_zeros(), "handle_yaw_deg": torch_zeros(),
"handle_pos_error": torch_zeros(), "open_rate": torch_zeros(), "rewards": torch_zeros(), "handle_yaw_error": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
franka_translation = torch.tensor([0.5, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_door()
super().set_up_scene(scene)
# Add Franka
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add door
self._door = DoorView(prim_paths_expr="/World/envs/.*/door/door", name="door_view")
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._door)
scene.add(self._door._handle)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_door(self):
door = Door(prim_path=self.default_zero_env_path + "/door", name="door")
self._sim_config.apply_articulation_settings("door", get_prim_at_path(door.prim_path), self._sim_config.parse_actor_config("door"))
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
# finger pos
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
# franka grasp local pose
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
# XXX assume to be the local pos of the handle
door_local_handle_pose = torch.tensor([-0.1, -0.23, 0.81, 1.0, 0.0, 0.0, 0.0], device=self._device)
self.door_local_handle_pos = door_local_handle_pose[0:3].repeat((self._num_envs, 1))
self.door_local_handle_rot = door_local_handle_pose[3:7].repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.door_inward_axis = torch.tensor([-1, 0, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.door_up_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.franka_default_dof_pos = torch.tensor(
[1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
# Franka
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
self.door_pos, self.door_rot = self._door.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
self.door_dof_pos = self._door.get_joint_positions(clone=False)
self.door_dor_vel = self._door.get_joint_velocities(clone=False)
self.franka_grasp_rot, self.franka_grasp_pos, self.door_handle_rot, self.door_handle_pos = self.compute_grasp_transforms(
hand_rot,
hand_pos,
self.franka_local_grasp_rot,
self.franka_local_grasp_pos,
self.door_rot,
self.door_pos,
self.door_local_handle_rot,
self.door_local_handle_pos,
)
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
# handle
self.handle_pos, self.handle_rot = self._door._handle.get_world_poses(clone=False)
self.handle_pos[:,1] = self.handle_pos[:,1] - 0.3 # fix hand-point y-axis error
# position error: from franka grasp to door handle
grasp_handle_pos_error = self.handle_pos - self.franka_grasp_pos
# grasp_handle_pos_error = self.handle_pos - (self.franka_lfinger_pos + self.franka_rfinger_pos)/2
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
self.handle_pos,
self.handle_rot,
grasp_handle_pos_error,
# self.handle_pos,
# self.handle_rot,
# self.location_ball_pos
# self.cabinet_dof_pos[:, 3].unsqueeze(-1),
# self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def compute_grasp_transforms(
self,
hand_rot,
hand_pos,
franka_local_grasp_rot,
franka_local_grasp_pos,
door_rot,
door_pos,
door_local_handle_rot,
door_local_handle_pos,
):
global_franka_rot, global_franka_pos = tf_combine(
hand_rot, hand_pos, franka_local_grasp_rot, franka_local_grasp_pos
)
global_door_rot, global_door_pos = tf_combine(
door_rot, door_pos, door_local_handle_rot, door_local_handle_pos
)
return global_franka_rot, global_franka_pos, global_door_rot, global_door_pos
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = torch.clamp(
self.franka_default_dof_pos.unsqueeze(0)
+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
if not self.is_test:
# reset door: only 1 joint
# reset door positions: x: [-0.1,0.1], y:[-0.4,0.4]
self.new_door_pos = self.default_door_pos.clone().detach()
self.new_door_pos[:,0] = self.default_door_pos[:,0] + (0.05 + 0.05) * torch.rand(self._num_envs, device=self._device) -0.05
self.new_door_pos[:,1] = self.default_door_pos[:,1] + (0.1 + 0.1) * torch.rand(self._num_envs, device=self._device) -0.1
self._door.set_world_poses(self.new_door_pos[env_ids], self.default_door_rot[env_ids], indices = indices)
else:
self.new_door_pos = self.default_door_pos.clone().detach()
self.new_door_pos[:,0] = self.default_door_pos[:,0] + self.initial_test_value[0]
self.new_door_pos[:,1] = self.default_door_pos[:,1] + self.initial_test_value[1]
self._door.set_world_poses(self.new_door_pos[env_ids], self.default_door_rot[env_ids], indices = indices)
# reset door joints
door_pos = torch.zeros((num_indices, 1), device=self._device)
door_vel = torch.zeros((num_indices, 1), device=self._device)
self._door.set_joint_positions(door_pos, indices=indices)
self._door.set_joint_velocities(door_vel, indices=indices)
self._door.set_joint_position_targets(self.door_dof_targets[env_ids], indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
if key == "open_rate":
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])
else:
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0.0
def post_reset(self):
# Franka
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# Door
self.door_dof_targets = torch.zeros(
(self._num_envs, 1), dtype=torch.float, device=self._device
)
self.default_door_pos, self.default_door_rot = self._door.get_world_poses()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
# info extraction
# env
num_envs = self._num_envs
# Franka
joint_positions = self.franka_dof_pos
gripper_forward_axis = self.gripper_forward_axis
gripper_up_axis = self.gripper_up_axis
franka_grasp_pos, franka_grasp_rot = self.franka_grasp_pos, self.franka_grasp_rot
franka_lfinger_pos, franka_lfinger_rot = self.franka_lfinger_pos, self.franka_lfinger_rot
franka_rfinger_pos, franka_rfinger_rot = self.franka_rfinger_pos, self.franka_rfinger_rot
actions = self.actions
finger_pos = (franka_lfinger_pos + franka_rfinger_pos)/2
finger_rot = (franka_lfinger_pos + franka_rfinger_pos)/2
# door
door_inward_axis = self.door_inward_axis
door_up_axis = self.door_up_axis
door_dof_pos = self.door_dof_pos
door_pos, door_rot = self.door_pos, self.door_rot
# handle
handle_pos, handle_rot = self.handle_pos, self.handle_rot
# handle_pos[:,1] = handle_pos[:,1] - 0.3 # fix hand-point y-axis error
handle_local_pos, handle_local_rot = self._door._handle.get_local_poses()
# preprocessing
# distance from grasp to handle
grasp_handle_dist = torch.norm(finger_pos - handle_pos, p=2, dim=-1)
# distance of each finger to the handle along Z-axis
lfinger_Z_dist = torch.abs(franka_lfinger_pos[:, 2] - handle_pos[:, 2])
rfinger_Z_dist = torch.abs(franka_rfinger_pos[:, 2] - handle_pos[:, 2])
# how far the door has been opened out
# quaternions to euler angles
door_yaw = torch.atan2(2.0*(door_rot[:,0]*door_rot[:,3] + door_rot[:,1]*door_rot[:,2]), 1.0-2.0*(door_rot[:,2]*door_rot[:,2]+door_rot[:,3]*door_rot[:,3]))
handle_yaw = torch.atan2(2.0*(handle_rot[:,0]*handle_rot[:,3] + handle_rot[:,1]*handle_rot[:,2]), 1.0-2.0*(handle_rot[:,2]*handle_rot[:,2]+handle_rot[:,3]*handle_rot[:,3]))
door_ref_yaw = torch.deg2rad(torch.tensor([60], device=self._device))
door_yaw_error = torch.abs(door_ref_yaw - handle_yaw)
self.door_yaw_error = door_yaw_error.clone().detach()
# handle destination if opened
handle_ref_pos = handle_pos.clone().detach()
# target_open_deg = door_ref_yaw*torch.ones((num_envs,1), device=self._device) # open the door by 60 degrees
# target_open_rad = math.radians(60)
handle_ref_pos[:,0] = handle_ref_pos[:,0]*torch.cos(door_ref_yaw) + handle_ref_pos[:,1]*torch.sin(door_ref_yaw)
handle_ref_pos[:,1] = -handle_ref_pos[:,0]*torch.sin(door_ref_yaw) + handle_ref_pos[:,1]*torch.cos(door_ref_yaw)
self.handle_pos_error = torch.norm(handle_ref_pos - handle_pos, p=2, dim=-1)
# gripper direction alignment
axis1 = tf_vector(franka_grasp_rot, gripper_forward_axis)
axis2 = tf_vector(handle_rot, door_inward_axis)
axis3 = tf_vector(franka_grasp_rot, gripper_up_axis)
axis4 = tf_vector(handle_rot, door_up_axis)
dot1 = torch.bmm(axis1.view(num_envs, 1, 3), axis2.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of forward axis for gripper
dot2 = torch.bmm(axis3.view(num_envs, 1, 3), axis4.view(num_envs, 3, 1)).squeeze(-1).squeeze(-1) # alignment of up axis for gripper
# reward functions
# 1st rewards: distance from hand to the drawer
grasp_dist_reward = 1.0 / (1.0 + grasp_handle_dist ** 2)
grasp_dist_reward *= grasp_dist_reward
grasp_dist_reward = torch.where(grasp_handle_dist <= 0.02, grasp_dist_reward * 2, grasp_dist_reward)
# 2nd reward for matching the orientation of the hand to the drawer (fingers wrapped)
rot_reward = 0.5 * (torch.sign(dot1) * dot1 ** 2 + torch.sign(dot2) * dot2 ** 2)
# 3rd reward: bonus if left finger is above the drawer handle and right below
around_handle_reward = torch.zeros_like(rot_reward)
around_handle_reward = torch.where(self.franka_lfinger_pos[:, 2] > handle_pos[:, 2],
torch.where(self.franka_rfinger_pos[:, 2] < handle_pos[:, 2],
around_handle_reward + 0.5, around_handle_reward), around_handle_reward)
# 4th reward: distance of each finger from the handle
finger_dist_reward = torch.zeros_like(rot_reward)
finger_dist_reward = torch.where(franka_lfinger_pos[:, 2] > handle_pos[:, 2],
torch.where(franka_rfinger_pos[:, 2] < handle_pos[:, 2],
(0.04 - lfinger_Z_dist) + (0.04 - rfinger_Z_dist), finger_dist_reward), finger_dist_reward)
# 5th reward: finger closeness
finger_close_reward = torch.zeros_like(rot_reward)
finger_close_reward = torch.where(grasp_handle_dist <=0.03, (0.04 - joint_positions[:, 7]) + (0.04 - joint_positions[:, 8]), finger_close_reward)
# 6th reward: how far the door has been opened out
# instead of using rotation, may use pos as reference
open_reward = (1.0 / (1.0 + door_yaw_error ** 2)) * around_handle_reward + handle_yaw
# open_reward = (1.0 / (1.0 + self.handle_pos_error)) * around_handle_reward
# 1st penalty
action_penalty = torch.sum(actions ** 2, dim=-1)
final_reward = 2.0 * grasp_dist_reward + 0.5 * rot_reward + 10.0 * around_handle_reward + 70.0 * open_reward + \
100.0 * finger_dist_reward+ 10.0 * finger_close_reward - 0.01 * action_penalty
# bonus for opening door properly
final_reward = torch.where(door_yaw_error < 0.7, final_reward + 0.5, final_reward)
final_reward = torch.where(door_yaw_error < 0.5, final_reward + around_handle_reward, final_reward)
final_reward = torch.where(door_yaw_error < 0.2, final_reward + (2.0 * around_handle_reward), final_reward)
# in case: Nan value occur
final_reward = torch.where(torch.isnan(final_reward), torch.zeros_like(final_reward), final_reward)
self.rew_buf[:] = final_reward
# self.rew_buf[:] = torch.rand(self._num_envs)
# if the door is opened to ref position -> task complete
self.is_opened = torch.where(torch.rad2deg(handle_yaw)>=70, torch.ones_like(final_reward), torch.zeros_like(final_reward))
self.episode_sums["door_yaw_deg"] += torch.rad2deg(door_yaw)
self.episode_sums["handle_yaw_deg"] += torch.rad2deg(handle_yaw)
self.episode_sums["handle_pos_error"] += self.handle_pos_error
self.episode_sums["handle_yaw_error"] += door_yaw_error
self.episode_sums["grasp_handle_dist"] += grasp_handle_dist
self.episode_sums["open_rate"] += self.is_opened
self.episode_sums["rewards"] += final_reward
# print("handle_pos", handle_pos)
# print("handle_rot", handle_rot)
# print("door_pos", door_pos)
# print("door_rot", door_rot)
# print("handle_local_pos", handle_local_pos)
# print("handle_local_rot", handle_local_rot)
# print("grasp_handle_dist", grasp_handle_dist)
# print("door_yaw", door_yaw)
def is_done(self) -> None:
if not self.is_test:
# reset if door is fully opened
# self.reset_buf = torch.where(self.is_opened==1, torch.ones_like(self.reset_buf), self.reset_buf)
# reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# self.reset_buf = torch.where(self.is_opened==1, torch.ones_like(self.reset_buf), self.reset_buf)
| 23,580 | Python | 45.510848 | 180 | 0.611196 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Ball_Pushing.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from Models.ball_pushing.table import Table
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Usd, UsdGeom
class FrankaBallPushingTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.ball_radius = self._task_cfg["env"]["ballRadius"]
self.ball_initial_position = self._task_cfg["env"]["ballInitialPosition"]
self.ball_initial_orientation = self._task_cfg["env"]["ballInitialOrientation"]
# self.ball_initial_position[0] = (0.1 + 0.1) * np.random.rand(1) -0.1
# self.ball_initial_position[1] = (0.2 + 0.2) * np.random.rand(1) -0.2
# initial_x = (0.1 + 0.1) * torch.rand(self._num_envs) -0.1
# initial_y = (0.2 + 0.2) * torch.rand(self._num_envs) -0.2
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 30
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"success_rate": torch_zeros(), "ball_hole_XY_dist": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
franka_translation = torch.tensor([0.6, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_table()
self.get_ball()
super().set_up_scene(scene)
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add ball
self._ball = RigidPrimView(prim_paths_expr="/World/envs/.*/ball", name="ball_view", reset_xform_properties=False)
# Add location_ball
self._location_ball = RigidPrimView(prim_paths_expr="/World/envs/.*/table/table/location_ball", name="location_ball_view", reset_xform_properties=False)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._ball)
scene.add(self._location_ball)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_table(self):
table = Table(prim_path=self.default_zero_env_path + "/table", name="table")
self._sim_config.apply_articulation_settings("table", get_prim_at_path(table.prim_path), self._sim_config.parse_actor_config("table"))
def get_ball(self):
ball = DynamicSphere(
name = 'ball',
position=self.ball_initial_position,
orientation=self.ball_initial_orientation,
prim_path=self.default_zero_env_path + "/ball",
radius=self.ball_radius,
color=np.array([1, 0, 0]),
density = 100
)
self._sim_config.apply_articulation_settings("ball", get_prim_at_path(ball.prim_path), self._sim_config.parse_actor_config("ball"))
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.franka_default_dof_pos = torch.tensor(
[1.157, -1.066, -0.155, -2.239, -1.841, 1.003, 0.469, 0.035, 0.035], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
# self.franka_grasp_rot, self.franka_grasp_pos, self.drawer_grasp_rot, self.drawer_grasp_pos = self.compute_grasp_transforms(
# hand_rot,
# hand_pos,
# self.franka_local_grasp_rot,
# self.franka_local_grasp_pos,
# drawer_rot,
# drawer_pos,
# self.drawer_local_grasp_rot,
# self.drawer_local_grasp_pos,
# )
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
# Ball
self.ball_pos, self.ball_rot = self._ball.get_world_poses(clone=False)
self.ball_vel = self._ball.get_velocities()
# hole-location ball
self.location_ball_pos, self.location_ball_rot = self._location_ball.get_world_poses(clone=False)
to_target = self.location_ball_pos - self.ball_pos
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
self.ball_vel,
to_target,
self.ball_pos,
# self.location_ball_pos
# self.cabinet_dof_pos[:, 3].unsqueeze(-1),
# self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self.franka_dof_targets[:,7] = 0.015
self.franka_dof_targets[:,8] = 0.015
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = tensor_clamp(
self.franka_default_dof_pos.unsqueeze(0)
+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# reset ball
# if not test, randomize ball initial positions for training
if not self.is_test:
# reset ball position: x [-0.1, 0.1], y [-0.1,0.1]
self.new_ball_pos = self.default_ball_pos.clone().detach()
self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + (0.1 + 0.1) * torch.rand(self._num_envs, device=self._device) -0.1
self.new_ball_pos[:,1] = self.default_ball_pos[:,1] + (0.1 + 0.1) * torch.rand(self._num_envs, device=self._device) -0.1
self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices)
self._ball.set_velocities(self.default_ball_velocity[env_ids], indices = indices)
# if is test mode, set the ball to given position (1 environment)
else:
self.new_ball_pos = self.default_ball_pos.clone().detach()
self.new_ball_pos[:,0] = self.default_ball_pos[:,0] + self.initial_test_value[0]
self.new_ball_pos[:,1] = self.default_ball_pos[:,1] +self.initial_test_value[1]
self._ball.set_world_poses(self.new_ball_pos[env_ids], self.default_ball_rot[env_ids], indices = indices)
self._ball.set_velocities(self.default_ball_velocity[env_ids], indices = indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
if key == "success_rate":
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])
else:
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])/self._max_episode_length
self.episode_sums[key][env_ids] = 0.
def post_reset(self):
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# Ball
self.default_ball_pos, self.default_ball_rot = self._ball.get_world_poses()
self.default_ball_velocity = self._ball.get_velocities()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
# get objects positions and orientations
joint_positions = self.franka_dof_pos # franka dofs pos
num_envs = self._num_envs # num of sim env
finger_pos = (self.franka_lfinger_pos + self.franka_lfinger_pos)/2 # franka finger pos (lfinger+rfinger)/2
self.finger_pos = finger_pos
gripper_forward_axis = self.gripper_forward_axis
gripper_up_axis = self.gripper_up_axis
# franka_grasp_pos = self.franka_grasp_pos
# franka_grasp_rot = self.franka_grasp_rot
# ball_grasp_pos = self.ball_grasp_pos
# ball_grasp_rot = self.ball_grasp_rot
# ball_inward_axis = self.ball_inward_axis
# ball_up_axis = self.ball_up_axis
# franka_dof_pos = self.franka_dof_pos
ball_init_pos = self.default_ball_pos
ball_pos = self.ball_pos # ball pos
# ball_rot = self.ball_rot # ball rot
# ball_vel = self._ball.get_velocities() # ball velocity
# table_pos = self.table_pos # table pos
# table_rot = self.table_rot # table rot
hole_pos = self.location_ball_pos # locate hole pos
# hole_pos[:,1] = hole_pos[:,1] - 0.8 # Y-axis
# hole_pos[:,2] = hole_pos[:,2] + 0.44 # Z-axis
# 1st reward: distance from ball to hole
ball_hole_dist = torch.norm(hole_pos - ball_pos, p=2, dim=-1)
ball_hole_XY_dist = torch.norm(hole_pos[:,0:2] - ball_pos[:,0:2], p=2, dim=-1)
# dist_reward = 1.0 / (1.0 + ball_hole_dist ** 2)
# dist_reward *= 2*dist_reward
# dist_reward = torch.where(ball_hole_dist <= 0.05, dist_reward+10, dist_reward)
# ball_hole_dist = torch.norm(hole_pos - ball_pos, p=2, dim=-1)
# dist_reward = 1.0/(1.0+ball_hole_dist**2)
dist_reward = 1-torch.tanh(3*ball_hole_XY_dist) # regulize the dist_reward in [0,1]
# dist_reward = -(ball_hole_XY_dist)**2
# 2nd reward: distance from finger to ball
# finger_ball_dist = torch.norm(finger_pos - ball_pos, p=2, dim=-1)
ball_to_init_dist = torch.norm(ball_pos[:,0:2] - ball_init_pos[:,0:2], p=2, dim=-1)
self.ball_to_init_dist = ball_to_init_dist
finger_ball_dist = torch.norm(finger_pos - ball_pos, p=2, dim=-1)
finger_ball_reward = 1.0/(1.0+finger_ball_dist**2)
# 1st penalty: regularization on the actions (summed for each environment)
action_penalty = torch.sum(self.actions ** 2, dim=-1)
action_penalty = 1-torch.tanh(action_penalty/2.5)
# 5th penalty if ball is not moved
ball_unmove_penalty = torch.zeros_like(dist_reward)
ball_unmove_penalty = torch.where(ball_to_init_dist<0.3, torch.tanh(15*(0.3-ball_to_init_dist)), ball_unmove_penalty)
falling_bonus = torch.where(torch.logical_and(ball_hole_XY_dist < 0.1 , ball_pos[:,2]<0.38), torch.ones_like(dist_reward), torch.zeros_like(dist_reward))
falling_penalty = torch.zeros_like(dist_reward)
falling_penalty = torch.where(torch.logical_and(ball_hole_XY_dist > 0.001 , ball_pos[:,2]<0.38), falling_penalty+10, falling_penalty)
# falling_penalty = torch.where(ball_hole_XY_dist<0.2, falling_penalty-100, falling_penalty)
# dist_reward = torch.where(ball_hole_XY_dist<0.3, 1-torch.tanh(10*ball_hole_XY_dist), dist_reward)
# dist_reward = torch.where(ball_to_init_dist>0.01, dist_reward, dist_reward*0)
dist_reward = torch.where(ball_pos[:,0]<hole_pos[:,0], torch.zeros_like(dist_reward), dist_reward)
dist_penalty = torch.tanh(3*ball_hole_XY_dist)
final_reward = 10.0*dist_reward - 0.0*ball_unmove_penalty + 100.0*falling_bonus - 0.0*action_penalty \
- 0.0*falling_penalty + 0.0*finger_ball_reward - 0.0*dist_penalty
# final_reward = torch.where(finger_pos[:,2] < (ball_pos[:,2]), final_reward-0.5, final_reward)
# final_reward = torch.where(torch.logical_and(finger_ball_dist > 0, ball_to_init_dist<0.05), final_reward-0.5, final_reward)
# final_reward = torch.where(ball_hole_XY_dist>0.2, final_reward-1, final_reward)
self.is_complete = torch.where(torch.logical_and(ball_hole_XY_dist < 0.01 , ball_pos[:,2]<0.38), torch.ones_like(final_reward), torch.zeros_like(final_reward)) # task complete
# final_reward = torch.where(ball_hole_XY_dist < 0.6, final_reward+3.0*dist_reward, final_reward)
self.rew_buf[:] = final_reward
self.episode_sums["success_rate"] += self.is_complete
self.episode_sums["ball_hole_XY_dist"] += ball_hole_XY_dist
def is_done(self) -> None:
if not self.is_test:
# reset if ball falls from the edge or in hole
self.reset_buf = torch.where(self.ball_pos[:, 2] < 0.1, torch.ones_like(self.reset_buf), self.reset_buf)
# self.reset_buf = torch.where(self.is_complete==1, torch.ones_like(self.reset_buf), self.reset_buf)
# reset if franka grasp is below the ball and ball is not moved
# self.reset_buf = torch.where(self.finger_pos[:, 2] < 0.2, torch.ones_like(self.reset_buf), self.reset_buf)
# self.reset_buf = torch.where(torch.logical_and(self.finger_pos[:, 2] < 0.3, self.ball_to_init_dist < 0.1), torch.ones_like(self.reset_buf), self.reset_buf)
# reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 20,783 | Python | 45.496644 | 185 | 0.610258 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Point_Reaching.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from Models.point_reaching.target_ball import TargetBall
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Gf, Usd, UsdGeom
class FrankaPointReachingTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 27
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"success_rate": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
# Franka
franka_translation = torch.tensor([0.3, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_target_ball()
# Here the env is cloned
super().set_up_scene(scene)
# Add Franka
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add location_ball
self._target_ball = RigidPrimView(prim_paths_expr="/World/envs/.*/target_ball/target_ball/ball_mesh", name="target_ball_view", reset_xform_properties=False)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._target_ball)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_target_ball(self):
target_ball = TargetBall(prim_path=self.default_zero_env_path + "/target_ball", name="target_ball")
self._sim_config.apply_articulation_settings("target_ball", get_prim_at_path(target_ball.prim_path), self._sim_config.parse_actor_config("target_ball"))
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
# finger pos
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
# franka grasp local pose
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.franka_default_dof_pos = torch.tensor(
[0.0, -0.872, 0.0, -2.0, 0.0, 2.618, 0.785, 0.01, 0.01], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
# Franka
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
self.franka_dof_pos = franka_dof_pos
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
finger_center = (self.franka_lfinger_pos + self.franka_rfinger_pos)/2
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
# target ball
target_ball_pos, target_ball_rot = self._target_ball.get_world_poses(clone=False) # tool position
to_target = finger_center - target_ball_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
target_ball_pos,
finger_center,
to_target,
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
self.franka_dof_targets[:,7] = self.franka_default_dof_pos[7]
self.franka_dof_targets[:,8] = self.franka_default_dof_pos[8]
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = torch.clamp(
self.franka_default_dof_pos.unsqueeze(0),
#+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
if not self.is_test:
# reset target cube
# reset target cube position within an area: x [-0.2, 0.2], y [-0.4,0.4], z [-0.2,0.2]
self.new_cube_pos = self.default_target_ball_pos.clone().detach()
self.new_cube_pos[:,0] = self.default_target_ball_pos[:,0] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2
self.new_cube_pos[:,1] = self.default_target_ball_pos[:,1] + (0.4 + 0.4) * torch.rand(self._num_envs, device=self._device) -0.4
self.new_cube_pos[:,2] = self.default_target_ball_pos[:,2] + (0.2 + 0.2) * torch.rand(self._num_envs, device=self._device) -0.2
self._target_ball.set_world_poses(self.new_cube_pos[env_ids], self.default_target_ball_rot[env_ids], indices = indices)
self._target_ball.set_velocities(self.default_target_ball_velocity[env_ids], indices = indices)
# if is test mode
else:
self.new_cube_pos = self.default_target_ball_pos.clone().detach()
self.new_cube_pos[:,0] = self.default_target_ball_pos[:,0] + self.initial_test_value[0]
self.new_cube_pos[:,1] = self.default_target_ball_pos[:,1] + self.initial_test_value[1]
self.new_cube_pos[:,2] = self.default_target_ball_pos[:,2] + self.initial_test_value[2]
self._target_ball.set_world_poses(self.new_cube_pos[env_ids], self.default_target_ball_rot[env_ids], indices = indices)
self._target_ball.set_velocities(self.default_target_ball_velocity[env_ids], indices = indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
# fill extras
self.extras["episode"] = {}
for key in self.episode_sums.keys():
if key == "success_rate":
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids])
else:
self.extras["episode"][key] = torch.mean(self.episode_sums[key][env_ids]) / self._max_episode_length
self.episode_sums[key][env_ids] = 0
def post_reset(self):
# Franka
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# Target cube
self.default_target_ball_pos, self.default_target_ball_rot = self._target_ball.get_world_poses()
self.default_target_ball_velocity = self._target_ball.get_velocities()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
# Reward info
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses()
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses()
finger_center = (self.franka_lfinger_pos + self.franka_rfinger_pos)/2
lfinger_vel = self._frankas._lfingers.get_velocities()
rfinger_vel = self._frankas._lfingers.get_velocities()
finger_vel = (lfinger_vel + rfinger_vel)/2
finger_vel_norm = torch.norm(finger_vel, p=2, dim=-1)
target_ball_pos, target_ball_rot = self._target_ball.get_world_poses()
# distance
ball_center_dist = torch.norm(target_ball_pos - finger_center, p=2, dim=-1)
center_dist_reward = 1.0/(1.0+ball_center_dist)
# velocity
finger_vel_reward = 1.0/(1.0+finger_vel_norm)
# is complete
is_complete = torch.where( torch.logical_and(ball_center_dist<0.03, finger_vel_norm<0.02),
torch.ones_like(finger_vel_norm), torch.zeros_like(finger_vel_norm))
final_reward = 1.0*center_dist_reward + 10.0*is_complete + 0.1*finger_vel_reward
self.rew_buf[:] = final_reward
self.episode_sums["success_rate"] += is_complete
def is_done(self) -> None:
if not self.is_test:
# reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 15,304 | Python | 42.112676 | 164 | 0.622517 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/Tasks/Franka_Cloth_Placing.py | from omniisaacgymenvs.tasks.base.rl_task import RLTask
from Models.Franka.Franka import Franka
from Models.Franka.Franka_view import FrankaView
from Models.cloth_placing.target_table import TargetTable
from omni.isaac.core.prims import ParticleSystem, ClothPrim, ClothPrimView
from omni.isaac.core.materials import ParticleMaterial
from omni.physx.scripts import physicsUtils, particleUtils, deformableUtils
from omni.isaac.core.objects import DynamicCuboid
from omni.isaac.core.prims import RigidPrim, RigidPrimView
from omni.isaac.core.utils.prims import get_prim_at_path
from omni.isaac.core.utils.stage import get_current_stage, add_reference_to_stage
from omni.isaac.core.utils.torch.transformations import *
from omni.isaac.core.utils.torch.rotations import *
from omni.isaac.cloner import Cloner
import numpy as np
import torch
import math
from pxr import Gf, Usd, UsdGeom
class FrankaClothPlacingTask(RLTask):
def __init__(
self,
name,
sim_config,
env,
offset=None
) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self.action_scale = self._task_cfg["env"]["actionScale"]
self.start_position_noise = self._task_cfg["env"]["startPositionNoise"]
self.start_rotation_noise = self._task_cfg["env"]["startRotationNoise"]
self.dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self.dist_reward_scale = self._task_cfg["env"]["distRewardScale"]
self.rot_reward_scale = self._task_cfg["env"]["rotRewardScale"]
self.around_handle_reward_scale = self._task_cfg["env"]["aroundHandleRewardScale"]
self.open_reward_scale = self._task_cfg["env"]["openRewardScale"]
self.finger_dist_reward_scale = self._task_cfg["env"]["fingerDistRewardScale"]
self.action_penalty_scale = self._task_cfg["env"]["actionPenaltyScale"]
self.finger_close_reward_scale = self._task_cfg["env"]["fingerCloseRewardScale"]
self.distX_offset = 0.04
self.dt = 1/60.
self._num_observations = 27
self._num_actions = 9
# Flag for testing
self.is_test = False
self.initial_test_value = None
self.is_action_noise = False
RLTask.__init__(self, name, env)
# Extra info for TensorBoard
self.extras = {}
torch_zeros = lambda: torch.zeros(self.num_envs, dtype=torch.float, device=self.device, requires_grad=False)
self.episode_sums = {"center_dist": torch_zeros()}
return
def set_up_scene(self, scene) -> None:
# Franka
franka_translation = torch.tensor([0.3, 0.0, 0.0])
self.get_franka(franka_translation)
self.get_table()
# Here the env is cloned (cannot clone particle systems right now)
super().set_up_scene(scene)
# Add Franka
self._frankas = FrankaView(prim_paths_expr="/World/envs/.*/franka", name="franka_view")
# Add bin
self._target_table = RigidPrimView(prim_paths_expr="/World/envs/.*/target_table/target_table/mesh", name="target_table_view", reset_xform_properties=False)
# Add location_ball
self._location_cube = RigidPrimView(prim_paths_expr="/World/envs/.*/target_table/target_table/location_cube", name="location_cube_view", reset_xform_properties=False)
scene.add(self._frankas)
scene.add(self._frankas._hands)
scene.add(self._frankas._lfingers)
scene.add(self._frankas._rfingers)
scene.add(self._location_cube)
scene.add(self._target_table)
# generate cloth near franka
franka_positions = self._frankas.get_world_poses()[0]
self.initialize_cloth(franka_positions)
# Create a view to deal with all the cloths
self._cloths = ClothPrimView(prim_paths_expr="/World/Env*/cloth", name="cloth_view")
self._scene.add(self._cloths)
self.init_data()
return
def get_franka(self, translation):
franka = Franka(prim_path=self.default_zero_env_path + "/franka", name="franka", translation = translation, use_modified_collision = True)
self._sim_config.apply_articulation_settings("franka", get_prim_at_path(franka.prim_path), self._sim_config.parse_actor_config("franka"))
def get_table(self):
target_table = TargetTable(prim_path=self.default_zero_env_path + "/target_table", name="target_table")
self._sim_config.apply_articulation_settings("target_table", get_prim_at_path(target_table.prim_path), self._sim_config.parse_actor_config("target_table"))
# Set as testing mode
def set_as_test(self):
self.is_test = True
# Set action noise
def set_action_noise(self):
self.is_action_noise = True
# Set initial test values for testing mode
def set_initial_test_value(self, value):
# for ball pushing: initial x,y positions of the ball
self.initial_test_value = value
def init_data(self) -> None:
def get_env_local_pose(env_pos, xformable, device):
"""Compute pose in env-local coordinates"""
world_transform = xformable.ComputeLocalToWorldTransform(0)
world_pos = world_transform.ExtractTranslation()
world_quat = world_transform.ExtractRotationQuat()
px = world_pos[0] - env_pos[0]
py = world_pos[1] - env_pos[1]
pz = world_pos[2] - env_pos[2]
qx = world_quat.imaginary[0]
qy = world_quat.imaginary[1]
qz = world_quat.imaginary[2]
qw = world_quat.real
return torch.tensor([px, py, pz, qw, qx, qy, qz], device=device, dtype=torch.float)
stage = get_current_stage()
hand_pose = get_env_local_pose(self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_link7")), self._device)
lfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_leftfinger")), self._device
)
rfinger_pose = get_env_local_pose(
self._env_pos[0], UsdGeom.Xformable(stage.GetPrimAtPath("/World/envs/env_0/franka/panda_rightfinger")), self._device
)
# finger pos
finger_pose = torch.zeros(7, device=self._device)
finger_pose[0:3] = (lfinger_pose[0:3] + rfinger_pose[0:3]) / 2.0
finger_pose[3:7] = lfinger_pose[3:7]
hand_pose_inv_rot, hand_pose_inv_pos = (tf_inverse(hand_pose[3:7], hand_pose[0:3]))
# franka grasp local pose
grasp_pose_axis = 1
franka_local_grasp_pose_rot, franka_local_pose_pos = tf_combine(hand_pose_inv_rot, hand_pose_inv_pos, finger_pose[3:7], finger_pose[0:3])
franka_local_pose_pos += torch.tensor([0, 0.04, 0], device=self._device)
self.franka_local_grasp_pos = franka_local_pose_pos.repeat((self._num_envs, 1))
self.franka_local_grasp_rot = franka_local_grasp_pose_rot.repeat((self._num_envs, 1))
self.gripper_forward_axis = torch.tensor([0, 0, 1], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.gripper_up_axis = torch.tensor([0, 1, 0], device=self._device, dtype=torch.float).repeat((self._num_envs, 1))
self.franka_default_dof_pos = torch.tensor(
[0.0, 0.0, 0.0, -1.0, 0.0, 1.0, 0.5, 0.0001, 0.0001], device=self._device
)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
def get_observations(self) -> dict:
# Franka
hand_pos, hand_rot = self._frankas._hands.get_world_poses(clone=False)
franka_dof_pos = self._frankas.get_joint_positions(clone=False)
franka_dof_pos = torch.nan_to_num(franka_dof_pos)
franka_dof_vel = self._frankas.get_joint_velocities(clone=False)
franka_dof_vel = torch.nan_to_num(franka_dof_vel)
self.franka_lfinger_pos, self.franka_lfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
self.franka_rfinger_pos, self.franka_rfinger_rot = self._frankas._lfingers.get_world_poses(clone=False)
dof_pos_scaled = (
2.0
* (franka_dof_pos - self.franka_dof_lower_limits)
/ (self.franka_dof_upper_limits - self.franka_dof_lower_limits)
- 1.0
)
# Cloth
self.cloths_pos = self._cloths.get_world_positions(clone=False)
self.cloths_pos = torch.nan_to_num(self.cloths_pos) # shape (M,121,3)
# cloths_pos_flat = torch.flatten(self.cloths_pos, start_dim=1) # shape (M,121,3)
cloth_mean_x = torch.mean(self.cloths_pos[:,:,0], dim=1).reshape(self.num_envs, 1)
cloth_mean_y = torch.mean(self.cloths_pos[:,:,1], dim=1).reshape(self.num_envs, 1)
cloth_mean_z = torch.mean(self.cloths_pos[:,:,2], dim=1).reshape(self.num_envs, 1)
self.cloths_pos_mean = torch.cat((cloth_mean_x, cloth_mean_y, cloth_mean_z),1)
# location cube
self.location_cube_pos, self.location_cube_rot = self._location_cube.get_world_poses(clone=False)
self.location_cube_pos = torch.nan_to_num(self.location_cube_pos)
to_target = self.cloths_pos_mean - self.location_cube_pos
self.obs_buf = torch.cat(
(
dof_pos_scaled,
franka_dof_vel * self.dof_vel_scale,
# cloths_pos_flat,
self.cloths_pos_mean,
to_target,
self.location_cube_pos,
# self.handle_rot,
# self.location_ball_pos
# self.cabinet_dof_pos[:, 3].unsqueeze(-1),
# self.cabinet_dof_vel[:, 3].unsqueeze(-1),
),
dim=-1,
)
observations = {
self._frankas.name: {
"obs_buf": self.obs_buf
}
}
return observations
def pre_physics_step(self, actions) -> None:
if not self._env._world.is_playing():
return
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
# if action noise
if self.is_action_noise is True:
# Gaussian white noise with 0.01 variance
self.actions = self.actions + (0.5)*torch.randn_like(self.actions)
targets = self.franka_dof_targets + self.franka_dof_speed_scales * self.dt * self.actions * self.action_scale
self.franka_dof_targets[:] = tensor_clamp(targets, self.franka_dof_lower_limits, self.franka_dof_upper_limits)
env_ids_int32 = torch.arange(self._frankas.count, dtype=torch.int32, device=self._device)
# Release condition
location_cube_pos, location_cube_rot = self._location_cube.get_world_poses()
location_cube_pos = torch.nan_to_num(location_cube_pos)
cloths_pos = self._cloths.get_world_positions()
cloths_pos = torch.nan_to_num(cloths_pos)
cloth_mean_x = torch.mean(cloths_pos[:,:,0], dim=1).reshape(self.num_envs, 1)
cloth_mean_y = torch.mean(cloths_pos[:,:,1], dim=1).reshape(self.num_envs, 1)
cloth_mean_z = torch.mean(cloths_pos[:,:,2], dim=1).reshape(self.num_envs, 1)
cloths_pos_mean = torch.cat((cloth_mean_x, cloth_mean_y, cloth_mean_z),1)
center_dist = torch.norm(location_cube_pos[:,0:2] - cloths_pos_mean[:,0:2], p=2, dim=-1)
cloth_vel = self._cloths.get_velocities()
cloth_vel = torch.nan_to_num(cloth_vel)
cloth_vel_x = torch.mean(cloth_vel[:,:,0], dim=1).reshape(self.num_envs, 1)
cloth_vel_y = torch.mean(cloth_vel[:,:,1], dim=1).reshape(self.num_envs, 1)
cloth_vel_z = torch.mean(cloth_vel[:,:,2], dim=1).reshape(self.num_envs, 1)
cloths_vel_mean = torch.cat((cloth_vel_x, cloth_vel_y, cloth_vel_z),1)
vel = torch.norm(cloths_vel_mean, p=2, dim=-1)
release_condition = torch.logical_and(center_dist<0.07, cloths_pos_mean[:,2] > location_cube_pos[:,2])
release_condition = torch.logical_and(release_condition, vel<0.1)
self.franka_dof_targets[:,7] = torch.where(release_condition, 0.15, self.franka_dof_targets[:,7])
self.franka_dof_targets[:,8] = torch.where(release_condition, 0.15, self.franka_dof_targets[:,8])
self._frankas.set_joint_position_targets(self.franka_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids):
indices = env_ids.to(dtype=torch.int32)
num_indices = len(indices)
# reset franka
pos = torch.clamp(
self.franka_default_dof_pos.unsqueeze(0),
#+ 0.25 * (torch.rand((len(env_ids), self.num_franka_dofs), device=self._device) - 0.5),
self.franka_dof_lower_limits,
self.franka_dof_upper_limits,
)
dof_pos = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_vel = torch.zeros((num_indices, self._frankas.num_dof), device=self._device)
dof_pos[:, :] = pos
self.franka_dof_targets[env_ids, :] = pos
self.franka_dof_pos[env_ids, :] = pos
self._frankas.set_joint_position_targets(self.franka_dof_targets[env_ids], indices=indices)
self._frankas.set_joint_positions(dof_pos, indices=indices)
self._frankas.set_joint_velocities(dof_vel, indices=indices)
# Reset cloth
self._cloths.set_world_positions(self.default_cloth_pos, indices=indices)
self._cloths.set_velocities(self.default_cloth_vel, indices=indices)
if not self.is_test:
# Reset cloth bin
# reset positions: x: [-0.1,0.2], y:[-0.35,0.35]
random_x = (0.2 + 0.1) * torch.rand(self._num_envs, device=self._device) - 0.1
random_y = (0.35 + 0.35) * torch.rand(self._num_envs, device=self._device) - 0.35
self.new_location_cube_pos = self.default_target_table_pos.clone().detach()
self.new_location_cube_pos[:,0] = self.default_target_table_pos[:,0] + random_x
self.new_location_cube_pos[:,1] = self.default_target_table_pos[:,1] + random_y
self._target_table.set_world_poses(self.new_location_cube_pos[env_ids], self.default_target_table_rot[env_ids], indices = indices)
self._target_table.set_velocities(self.default_target_table_velocity[env_ids], indices = indices)
else:
random_x = self.initial_test_value[0]
random_y = self.initial_test_value[1]
self.new_location_cube_pos = self.default_target_table_pos.clone().detach()
self.new_location_cube_pos[:,0] = self.default_target_table_pos[:,0] + random_x
self.new_location_cube_pos[:,1] = self.default_target_table_pos[:,1] + random_y
self._target_table.set_world_poses(self.new_location_cube_pos[env_ids], self.default_target_table_rot[env_ids], indices = indices)
self._target_table.set_velocities(self.default_target_table_velocity[env_ids], indices = indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
# Franka
self.num_franka_dofs = self._frankas.num_dof
self.franka_dof_pos = torch.zeros((self.num_envs, self.num_franka_dofs), device=self._device)
dof_limits = self._frankas.get_dof_limits()
self.franka_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.franka_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.franka_dof_speed_scales = torch.ones_like(self.franka_dof_lower_limits)
self.franka_dof_speed_scales[self._frankas.gripper_indices] = 0.1
self.franka_dof_targets = torch.zeros(
(self._num_envs, self.num_franka_dofs), dtype=torch.float, device=self._device
)
# Cloth
self.default_cloth_pos = self._cloths.get_world_positions()
self.default_cloth_vel = torch.zeros([self._num_envs, self._cloths.max_particles_per_cloth, 3], device=self._device)
# Target table
self.default_target_table_pos, self.default_target_table_rot = self._target_table.get_world_poses()
self.default_target_table_velocity = self._target_table.get_velocities()
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def initialize_cloth(self, franka_positions):
stage = get_current_stage()
# parameters
dimx = 10
dimy = 10
scale = 0.3
for i in range(self._num_envs):
# Note here: cannot put into the same envs (env/env_i) due to unknown bugs
env_path = "/World/Env" + str(i)
env = UsdGeom.Xform.Define(stage, env_path)
# set up the geometry
cloth_path = env.GetPrim().GetPath().AppendChild("cloth")
plane_mesh = UsdGeom.Mesh.Define(stage, cloth_path)
tri_points, tri_indices = deformableUtils.create_triangle_mesh_square(dimx=dimx, dimy=dimy, scale=scale)
initial_positions = torch.zeros((self.num_envs, len(tri_points), 3))
plane_mesh.GetPointsAttr().Set(tri_points)
plane_mesh.GetFaceVertexIndicesAttr().Set(tri_indices)
plane_mesh.GetFaceVertexCountsAttr().Set([3] * (len(tri_indices) // 3))
# initial locations of the cloth
franka_positions_np = franka_positions.detach().to('cpu').numpy()
init_loc = Gf.Vec3f(float(franka_positions_np[i][0] - 0.5), float(franka_positions_np[i][1] ), float(franka_positions_np[i][2] + 0.65))
physicsUtils.setup_transform_as_scale_orient_translate(plane_mesh)
physicsUtils.set_or_add_translate_op(plane_mesh, init_loc)
physicsUtils.set_or_add_orient_op(plane_mesh, Gf.Rotation(Gf.Vec3d([1, 0, 0]), 90).GetQuat())
initial_positions[i] = torch.tensor(init_loc) + torch.tensor(plane_mesh.GetPointsAttr().Get())
particle_system_path = env.GetPrim().GetPath().AppendChild("particleSystem")
particle_material_path = env.GetPrim().GetPath().AppendChild("particleMaterial")
particle_material = ParticleMaterial(
prim_path=particle_material_path, drag=0.1, lift=0.3, friction=10.0
)
# parameters for the properties of the cloth
# radius = 0.005
radius = 0.5 * (scale / dimx) # size rest offset according to plane resolution and width so that particles are just touching at rest
restOffset = radius
contactOffset = restOffset * 1.5
particle_system = ParticleSystem(
prim_path=particle_system_path,
simulation_owner=self._env._world.get_physics_context().prim_path,
rest_offset=restOffset,
contact_offset=contactOffset,
solid_rest_offset=restOffset,
fluid_rest_offset=restOffset,
particle_contact_offset=contactOffset,
)
# note that no particle material is applied to the particle system at this point.
# this can be done manually via self.particle_system.apply_particle_material(self.particle_material)
# or to pass the material to the clothPrim which binds it internally to the particle system
stretch_stiffness = 100000.0
bend_stiffness = 100.0
shear_stiffness = 100.0
spring_damping = 0.1
particle_mass = 0.005
cloth = ClothPrim(
name="clothPrim" + str(i),
prim_path=str(cloth_path),
particle_system=particle_system,
particle_material=particle_material,
stretch_stiffness=stretch_stiffness,
bend_stiffness=bend_stiffness,
shear_stiffness=shear_stiffness,
spring_damping=spring_damping,
particle_mass = particle_mass,
self_collision=True,
self_collision_filter=True,
)
self._scene.add(cloth)
def calculate_metrics(self) -> None:
# center_dist = torch.norm(self.location_cube_pos - self.cloths_pos_mean, p=2, dim=-1)
location_cube_pos = self.location_cube_pos
center_dist = torch.norm(location_cube_pos - self.cloths_pos_mean, p=2, dim=-1)
center_dist_reward = 1.0/(1.0+center_dist)
# finger reward
# franka_lfinger_pos = torch.nan_to_num(self.franka_lfinger_pos)
# franka_rfinger_pos = torch.nan_to_num(self.franka_rfinger_pos)
# finger_center = (franka_lfinger_pos + franka_rfinger_pos)/2
# target = self.location_cube_pos
# target[:,2] = target[:,2] + 0.3
# finger_dist = torch.norm(finger_center - target, p=2, dim=-1)
# finger_dist_reward = 1.0/(1.0+finger_dist)
lfinger_vel = torch.nan_to_num(self._frankas._lfingers.get_velocities())
rfinger_vel = torch.nan_to_num(self._frankas._rfingers.get_velocities())
finger_vel = (lfinger_vel + rfinger_vel)/2
finger_vel_norm = torch.norm(finger_vel, p=2, dim=-1)
finger_vel_reward = 1.0/(1.0+finger_vel_norm)
# finger rotation
franka_lfinger_rot = torch.nan_to_num(self.franka_lfinger_rot)
franka_rfinger_rot = torch.nan_to_num(self.franka_rfinger_rot)
mean_rot = (franka_lfinger_rot + franka_rfinger_rot)/2
rot_target = torch.zeros_like(franka_lfinger_rot)
rot_target[:,2] = 1
rot_distance = torch.norm(mean_rot - rot_target, p=2, dim=-1)
rot_distance_reward = 1.0/(1.0+rot_distance)
# cloth velocities
cloth_vel = self._cloths.get_velocities()
cloth_vel = torch.nan_to_num(cloth_vel)
cloth_vel_x = torch.mean(cloth_vel[:,:,0], dim=1).reshape(self.num_envs, 1)
cloth_vel_y = torch.mean(cloth_vel[:,:,1], dim=1).reshape(self.num_envs, 1)
cloth_vel_z = torch.mean(cloth_vel[:,:,2], dim=1).reshape(self.num_envs, 1)
cloths_vel_mean = torch.cat((cloth_vel_x, cloth_vel_y, cloth_vel_z),1)
vel = torch.norm(cloths_vel_mean, p=2, dim=-1)
vel_reward = 1.0/(1.0+vel)
# stay alive
live_reward = torch.where(self.cloths_pos_mean[:,2] > 0.3, torch.ones_like(self.cloths_pos_mean[:,2]), torch.zeros_like(self.cloths_pos_mean[:,2]))
# franka velocities
# franka_dof_vel = self._frankas.get_joint_velocities()
# franka_dof_vel = torch.nan_to_num(franka_dof_vel)
# dof_vel_mean = torch.norm(franka_dof_vel, p=2, dim=-1)
# dof_vel_reward = 1.0/(1.0+dof_vel_mean)
# is complete
is_complete = torch.where(torch.logical_and(center_dist < 0.05, vel<0.1), torch.ones_like(center_dist), torch.zeros_like(center_dist))
# if torch.any(torch.isnan(self.cloths_pos_mean)):
# print("NAN", self.cloths_pos_mean)
# x_dist = torch.abs(self.location_cube_pos[:,0] - self.cloths_pos_mean[:,0])
# x_dist_reward = 1.0/(1.0+x_dist)
# y_dist = torch.abs(self.location_cube_pos[:,1] - self.cloths_pos_mean[:,1])
# y_dist_reward = 1.0/(1.0+y_dist)
# z_dist = torch.abs(self.location_cube_pos[:,2] - self.cloths_pos_mean[:,2])
# z_dist_reward = 1.0/(1.0+z_dist)
final_reward = 7.0*center_dist_reward + 10.0*is_complete + 1.0*rot_distance_reward + 1.0*live_reward \
+ 1.0*vel_reward + 1.0*finger_vel_reward
# TO BE IMPLEMENTED
self.rew_buf[:] = final_reward
# log additional info
self.episode_sums["center_dist"] += center_dist
# self.episode_sums["y_dist"] += y_dist
# self.episode_sums["z_dist"] += z_dist
def is_done(self) -> None:
if not self.is_test:
cloths_pos_z = self.cloths_pos_mean[:,2]
center_dist = torch.norm(self.location_cube_pos- self.cloths_pos_mean, p=2, dim=-1)
# if cloth falls to the ground
self.reset_buf = torch.where( (cloths_pos_z < 0.1), torch.ones_like(self.reset_buf), self.reset_buf)
# if error in franka positions
franka_dof_pos = self._frankas.get_joint_positions()
is_pos_nan = torch.isnan(franka_dof_pos)
is_pos_fault = torch.any(is_pos_nan,1)
self.reset_buf = torch.where( is_pos_fault == True, torch.ones_like(self.reset_buf), self.reset_buf)
franka_dof_vel = self._frankas.get_joint_velocities()
is_vel_nan = torch.isnan(franka_dof_vel)
is_vel_fault = torch.any(is_vel_nan,1)
self.reset_buf = torch.where( is_vel_fault == True, torch.ones_like(self.reset_buf), self.reset_buf)
# or complete the task
# reset if max length reached
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
else:
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 25,478 | Python | 45.750459 | 174 | 0.618455 |
zhehuazhou/ai-cps-robotics-benchmark/Gym_Envs/utils/task_util.py |
def initialize_task(config, env, init_sim=True):
from Tasks.Franka_Door_Open import FrankaDoorOpenTask
from Tasks.Franka_Cloth_Placing import FrankaClothPlacingTask
from Tasks.Franka_Cube_Stacking import FrankaCubeStackingTask
from Tasks.Franka_Ball_Pushing import FrankaBallPushingTask
from Tasks.Franka_Ball_Balancing import FrankaBallBalancingTask
from Tasks.Franka_Ball_Catching import FrankaBallCatchingTask
from Tasks.Franka_Peg_In_Hole import FrankaPegInHoleTask
from Tasks.Franka_Point_Reaching import FrankaPointReachingTask
# Mappings from strings to environments
task_map = {
"FrankaDoorOpen": FrankaDoorOpenTask,
"FrankaBallPushing": FrankaBallPushingTask,
"FrankaBallBalancing": FrankaBallBalancingTask,
"FrankaBallCatching": FrankaBallCatchingTask,
"FrankaPegInHole": FrankaPegInHoleTask,
"FrankaClothPlacing": FrankaClothPlacingTask,
"FrankaCubeStacking": FrankaCubeStackingTask,
"FrankaPointReaching": FrankaPointReachingTask,
}
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig(config)
cfg = sim_config.config
task = task_map[cfg["task_name"]](
name=cfg["task_name"], sim_config=sim_config, env=env
)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=init_sim)
return task
| 1,433 | Python | 36.736841 | 107 | 0.742498 |
zhehuazhou/ai-cps-robotics-benchmark/Falsification_Tool/optimizer/optimizer.py | from typing import Optional
import sys
import numpy as np
import torch
import time
from scipy.optimize import minimize
from scipy.optimize import dual_annealing
class Optimizer(object):
"""Optimizer class for testing
task_name: the task name of environment
test_model: the model under test
monitor: the monitor for the STL specification
opt_type: type of the optimizer
budget_size: local budget size
"""
def __init__(
self,
task_name,
test_model,
monitor,
opt_type: Optional[str] = "random",
budget_size: Optional[int] = 1000,
):
self.task_name = task_name
self.test_model = test_model
self.monitor = monitor
self.opt_type = opt_type
self.budget_size = budget_size
self.fal_succ = False
self.start_time = time.time()
self.fal_time = 0
self.fal_sim = 0
self.worst_rob = 1000
# initial value bounds
if self.task_name is "FrankaBallPushing":
self.bnds = ((-0.1, 0.1), (-0.1, 0.1))
elif self.task_name is "FrankaBallBalancing":
self.bnds = ((-0.15, 0.15), (-0.15, 0.15))
elif self.task_name is "FrankaBallCatching":
# self.bnds = ((-0.1, 0.1), (-0.2, 0.2), (1.0, 3.0), (-1.0, 1.0))
self.bnds = ((-0.05, 0.05), (-0.05, 0.05), (1.0, 1.001), (0.0, 0.001))
elif self.task_name is "FrankaCubeStacking":
self.bnds = ((-0.2, 0.2), (-0.2, 0.2))
elif self.task_name is "FrankaDoorOpen":
self.bnds = ((-0.025, 0.025), (-0.05, 0.05))
elif self.task_name is "FrankaPegInHole":
self.bnds = ((-0.1, 0.1), (-0.1, 0.1))
elif self.task_name is "FrankaPointReaching":
self.bnds = ((-0.2, 0.2), (-0.4, 0.4), (-0.2, 0.2))
elif self.task_name is "FrankaClothPlacing":
self.bnds = ((-0.1, 0.2), (-0.35, 0.35))
else:
raise ValueError("Task name unknown for generating the initial values")
# generate initial values based on the task type
def generate_initial(self):
if self.task_name is "FrankaBallPushing":
# ball inside an area x:[-0.1,0.1], y:[-0.1,0.1]
value_1 = np.random.rand(1) * (0.1 + 0.1) - 0.1
value_2 = np.random.rand(1) * (0.1 + 0.1) - 0.1
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaBallBalancing":
# ball inside an area x:[-0.15,0.15], y:[-0.15,0.15]
value_1 = np.random.rand(1) * (0.15 + 0.15) - 0.15
value_2 = np.random.rand(1) * (0.15 + 0.15) - 0.15
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaBallCatching":
# ball inside an area x:[-0.1,0.1], y:[-0.1,0.1]
# ball velociry: vx: [1.0,1.5], vy: [0.0,0.2]
value_1 = np.random.rand(1) * (0.05 + 0.05) - 0.05
value_2 = np.random.rand(1) * (0.05 + 0.05) - 0.05
value_3 = np.random.rand(1) * (1.0 - 1.0) + 1.0
value_4 = np.random.rand(1) * (0.0 + 0.0) + 0.0
initial_value = np.hstack((value_1, value_2, value_3, value_4))
elif self.task_name is "FrankaCubeStacking":
# target cube inside an area x:[-0.2,0.2], y:[-0.2,0.2]
value_1 = np.random.rand(1) * (0.2 + 0.2) - 0.2
value_2 = np.random.rand(1) * (0.2 + 0.2) - 0.2
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaDoorOpen":
# target inside an area x:[-0.1,0.1], y:[-0.4,0.4]
value_1 = np.random.rand(1) * (0.005 + 0.005) - 0.005
value_2 = np.random.rand(1) * (0.025 + 0.025) - 0.025
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaPegInHole":
# target inside an area x:[-0.2,0.2], y:[-0.2,0.2]
value_1 = np.random.rand(1) * (0.1 + 0.1) - 0.1
value_2 = np.random.rand(1) * (0.1 + 0.1) - 0.1
initial_value = np.hstack((value_1, value_2))
elif self.task_name is "FrankaPointReaching":
# target inside an area x:[-0.2,0.2], y:[-0.4,0.4], z:[-0.2,0.2]
value_1 = np.random.rand(1) * (0.2 + 0.2) - 0.2
value_2 = np.random.rand(1) * (0.4 + 0.4) - 0.4
value_3 = np.random.rand(1) * (0.2 + 0.2) - 0.2
initial_value = np.hstack((value_1, value_2, value_3))
elif self.task_name is "FrankaClothPlacing":
# target inside an area x:[-0.1,0.2], y:[-0.35,0.35]
value_1 = np.random.rand(1) * (0.2 + 0.1) - 0.1
value_2 = np.random.rand(1) * (0.35 + 0.35) - 0.35
initial_value = np.hstack((value_1, value_2))
else:
raise ValueError("Task name unknown for generating the initial values")
return initial_value
# Generate one function (input: initial values, output: robustness) for testing algorithms
def robustness_function(self, initial_value):
# print("Initial Value:", initial_value)
# Get trace
trace = self.test_model.compute_trace(initial_value)
indexed_trace = self.test_model.merge_trace(trace)
# compute robustness
rob_sequence = self.monitor.compute_robustness(indexed_trace)
rob_sequence = np.array(rob_sequence)
# RTAMT is for monitoring, so for eventually, the robustness computed from the current timepoint to the end
# workaround to compute the maximum
if (
self.task_name is "FrankaBallPushing"
or self.task_name is "FrankaCubeStacking"
or self.task_name is "FrankaDoorOpen"
or self.task_name is "FrankaPegInHole"
or self.task_name is "FrankaClothPlacing"
):
min_rob = np.max(rob_sequence[:, 1])
else:
min_rob = np.min(rob_sequence[:, 1])
# print("Min Robustness:", min_rob)
if min_rob < self.worst_rob:
self.worst_rob = min_rob
if min_rob < 0 and self.fal_succ == False:
self.fal_succ = True
self.fal_time = time.time() - self.start_time
elif self.fal_succ == False:
self.fal_sim += 1
return min_rob
# optimization based on the optimizer type
def optimize(self):
if self.opt_type is "random":
results = self.optimize_random()
return results
elif self.opt_type is "NelderMead":
results = self.optimize_nelder_mead()
return results
elif self.opt_type is "DualAnnealing":
results = self.optimize_dual_annealing()
return results
else:
raise ValueError("Optimizer type undefined!")
# Random optimization
def optimize_random(self):
# worst_initial = None
# worst_trace = None
initial_value_record = None
rob_value_record = None
# Random optimizer
for i in range(self.budget_size):
# random initial value
initial_value = self.generate_initial()
# compute robustness
min_rob = self.robustness_function(initial_value)
# update record
if i == 0:
initial_value_record = initial_value
rob_value_record = np.array([min_rob])
# worst_initial = initial_value
# worst_trace = trace
self.worst_rob = min_rob
else:
initial_value_record = np.vstack((initial_value_record, initial_value))
rob_value_record = np.vstack((rob_value_record, np.array([min_rob])))
if min_rob < self.worst_rob:
# worst_initial = initial_value
# worst_trace = trace
self.worst_rob = min_rob
if min_rob < 0:
# self.fal_succ = True
# self.fal_time = time.time() - self.start_time
if i == 0:
self.fal_sim = 1
break
# results = {'worst_initial': worst_initial, 'worst_rob': worst_rob,
# 'initial_value_record': initial_value_record, 'rob_value_record': rob_value_record}
if self.fal_succ == False:
self.fal_time = time.time() - self.start_time
results = [self.fal_succ, self.fal_time, self.fal_sim, self.worst_rob]
return results
# Nelder Mead optimization
def optimize_nelder_mead(self):
initial_guess = self.generate_initial()
# minimization
results = minimize(
self.robustness_function,
initial_guess,
method="Nelder-Mead",
bounds=self.bnds,
options={"maxfev": self.budget_size, "disp": True},
)
if self.fal_succ == False:
self.fal_time = time.time() - self.start_time
results = [self.fal_succ, self.fal_time, self.fal_sim, self.worst_rob]
return results
# Dual Annealing optimization
def optimize_dual_annealing(self):
# minimization
results = dual_annealing(
self.robustness_function,
bounds=self.bnds,
# maxiter=self.budget_size, # global search number
maxfun=self.budget_size, # local search number
# no_local_search = True,
)
if self.fal_succ == False:
self.fal_time = time.time() - self.start_time
results = [self.fal_succ, self.fal_time, self.fal_sim, self.worst_rob]
return results
| 9,690 | Python | 30.77377 | 115 | 0.540248 |
StanfordVL/OmniGibson/setup.py | # read the contents of your README file
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
lines = f.readlines()
# remove images from README
lines = [x for x in lines if ".png" not in x]
long_description = "".join(lines)
setup(
name="omnigibson",
version="1.0.0",
author="Stanford University",
long_description_content_type="text/markdown",
long_description=long_description,
url="https://github.com/StanfordVL/OmniGibson",
zip_safe=False,
packages=find_packages(),
install_requires=[
"gym~=0.26.2",
"numpy~=1.23.5",
"scipy~=1.10.1",
"GitPython~=3.1.40",
"transforms3d~=0.4.1",
"networkx~=3.2.1",
"PyYAML~=6.0.1",
"addict~=2.4.0",
"ipython~=8.20.0",
"future~=0.18.3",
"trimesh~=4.0.8",
"h5py~=3.10.0",
"cryptography~=41.0.7",
"bddl~=3.5.0",
"opencv-python~=4.8.1",
"nest_asyncio~=1.5.8",
"imageio~=2.33.1",
"imageio-ffmpeg~=0.4.9",
"termcolor~=2.4.0",
"progressbar~=2.5",
"pymeshlab~=2022.2",
"click~=8.1.3",
"aenum~=3.1.15",
"rtree~=1.2.0",
],
tests_require=[],
python_requires=">=3",
package_data={"": ["omnigibson/global_config.yaml"]},
include_package_data=True,
) # yapf: disable
| 1,483 | Python | 26.481481 | 73 | 0.559676 |
StanfordVL/OmniGibson/scripts/download_datasets.py | """
Helper script to download OmniGibson dataset and assets.
"""
import os
os.environ["OMNIGIBSON_NO_OMNIVERSE"] = "1"
from omnigibson.macros import gm
from omnigibson.utils.asset_utils import download_og_dataset, download_assets
import click
def main():
# Only execute if the dataset path or asset path does not exist
dataset_exists, assets_exist = os.path.exists(gm.DATASET_PATH), os.path.exists(gm.ASSET_PATH)
if not (dataset_exists and assets_exist):
# Ask user which dataset to install
print(f"OmniGibson will now install data under the following locations:")
print(f" dataset (~25GB): {gm.DATASET_PATH}")
print(f" assets (~2.5GB): {gm.ASSET_PATH}")
print(f"If you want to install data under a different path, please change the DATA_PATH variable in omnigibson/macros.py and rerun scripts/download_dataset.py.")
if click.confirm("Do you want to continue?"):
# Only download if the dataset path doesn't exist
if not dataset_exists:
print("Downloading dataset...")
download_og_dataset()
# Only download if the asset path doesn't exist
if not assets_exist:
print("Downloading assets...")
download_assets()
print("\nOmniGibson setup completed!\n")
else:
print("You chose not to install dataset for now. You can install it later by running python scripts/download_dataset.py.")
if __name__ == "__main__":
main()
| 1,533 | Python | 38.333332 | 169 | 0.64775 |
StanfordVL/OmniGibson/omnigibson/simulator.py | from collections import defaultdict
import itertools
import contextlib
import logging
import os
import shutil
import socket
from pathlib import Path
import atexit
import signal
from contextlib import nullcontext
import numpy as np
import json
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.utils.constants import LightingMode
from omnigibson.utils.config_utils import NumpyEncoder
from omnigibson.utils.python_utils import clear as clear_pu, create_object_from_init_info, Serializable
from omnigibson.utils.sim_utils import meets_minimum_isaac_version
from omnigibson.utils.usd_utils import clear as clear_uu, FlatcacheAPI, RigidContactAPI, PoseAPI
from omnigibson.utils.ui_utils import (CameraMover, disclaimer, create_module_logger, suppress_omni_log,
print_icon, print_logo, logo_small)
from omnigibson.scenes import Scene
from omnigibson.objects.object_base import BaseObject
from omnigibson.objects.stateful_object import StatefulObject
from omnigibson.object_states.contact_subscribed_state_mixin import ContactSubscribedStateMixin
from omnigibson.object_states.joint_break_subscribed_state_mixin import JointBreakSubscribedStateMixin
from omnigibson.object_states.factory import get_states_by_dependency_order
from omnigibson.object_states.update_state_mixin import UpdateStateMixin, GlobalUpdateStateMixin
from omnigibson.prims.material_prim import MaterialPrim
from omnigibson.sensors.vision_sensor import VisionSensor
from omnigibson.systems.macro_particle_system import MacroPhysicalParticleSystem
from omnigibson.transition_rules import TransitionRuleAPI
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_VIEWER_CAMERA_POS = (-0.201028, -2.72566 , 1.0654)
m.DEFAULT_VIEWER_CAMERA_QUAT = (0.68196617, -0.00155408, -0.00166678, 0.73138017)
m.OBJECT_GRAVEYARD_POS = (100.0, 100.0, 100.0)
# Helper functions for starting omnigibson
def print_save_usd_warning(_):
log.warning("Exporting individual USDs has been disabled in OG due to copyrights.")
def _launch_app():
log.info(f"{'-' * 5} Starting {logo_small()}. This will take 10-30 seconds... {'-' * 5}")
# If multi_gpu is used, og.sim.render() will cause a segfault when called during on_contact callbacks,
# e.g. when an attachment joint is being created due to contacts (create_joint calls og.sim.render() internally).
gpu_id = None if gm.GPU_ID is None else int(gm.GPU_ID)
config_kwargs = {"headless": gm.HEADLESS or bool(gm.REMOTE_STREAMING), "multi_gpu": False}
if gpu_id is not None:
config_kwargs["active_gpu"] = gpu_id
config_kwargs["physics_gpu"] = gpu_id
# Omni's logging is super annoying and overly verbose, so suppress it by modifying the logging levels
if not gm.DEBUG:
import sys
from numba.core.errors import NumbaPerformanceWarning
import warnings
# TODO: Find a more elegant way to prune omni logging
# sys.argv.append("--/log/level=warning")
# sys.argv.append("--/log/fileLogLevel=warning")
# sys.argv.append("--/log/outputStreamLevel=error")
warnings.simplefilter("ignore", category=NumbaPerformanceWarning)
# Copy the OmniGibson kit file to the Isaac Sim apps directory. This is necessary because the Isaac Sim app
# expects the extensions to be reachable in the parent directory of the kit file. We copy on every launch to
# ensure that the kit file is always up to date.
assert "EXP_PATH" in os.environ, "The EXP_PATH variable is not set. Are you in an Isaac Sim installed environment?"
kit_file = Path(__file__).parent / "omnigibson.kit"
kit_file_target = Path(os.environ["EXP_PATH"]) / "omnigibson.kit"
try:
shutil.copy(kit_file, kit_file_target)
except Exception as e:
raise e from ValueError("Failed to copy omnigibson.kit to Isaac Sim apps directory.")
launch_context = nullcontext if gm.DEBUG else suppress_omni_log
with launch_context(None):
app = lazy.omni.isaac.kit.SimulationApp(config_kwargs, experience=str(kit_file_target.resolve(strict=True)))
assert meets_minimum_isaac_version("2023.1.1"), "This version of OmniGibson supports Isaac Sim 2023.1.1 and above. Please update Isaac Sim."
# Omni overrides the global logger to be DEBUG, which is very annoying, so we re-override it to the default WARN
# TODO: Remove this once omniverse fixes it
logging.getLogger().setLevel(logging.WARNING)
# Enable additional extensions we need
lazy.omni.isaac.core.utils.extensions.enable_extension("omni.flowusd")
lazy.omni.isaac.core.utils.extensions.enable_extension("omni.particle.system.bundle")
# Additional import for windows
if os.name == "nt":
lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.window.viewport")
# Default Livestream settings
if gm.REMOTE_STREAMING:
app.set_setting("/app/window/drawMouse", True)
app.set_setting("/app/livestream/proto", "ws")
app.set_setting("/app/livestream/websocket/framerate_limit", 120)
app.set_setting("/ngx/enabled", False)
# Find our IP address
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
# Note: Only one livestream extension can be enabled at a time
if gm.REMOTE_STREAMING == "native":
# Enable Native Livestream extension
# Default App: Streaming Client from the Omniverse Launcher
lazy.omni.isaac.core.utils.extensions.enable_extension("omni.kit.livestream.native")
print(f"Now streaming on {ip} via Omniverse Streaming Client")
elif gm.REMOTE_STREAMING == "webrtc":
# Enable WebRTC Livestream extension
app.set_setting("/exts/omni.services.transport.server.http/port", gm.HTTP_PORT)
app.set_setting("/app/livestream/port", gm.WEBRTC_PORT)
lazy.omni.isaac.core.utils.extensions.enable_extension("omni.services.streamclient.webrtc")
print(f"Now streaming on: http://{ip}:{gm.HTTP_PORT}/streaming/webrtc-client?server={ip}")
else:
raise ValueError(f"Invalid REMOTE_STREAMING option {gm.REMOTE_STREAMING}. Must be one of None, native, webrtc.")
# If we're headless, suppress all warnings about GLFW
if gm.HEADLESS:
og_log = lazy.omni.log.get_log()
og_log.set_channel_enabled("carb.windowing-glfw.plugin", False, lazy.omni.log.SettingBehavior.OVERRIDE)
# Globally suppress certain logging modules (unless we're in debug mode) since they produce spurious warnings
if not gm.DEBUG:
og_log = lazy.omni.log.get_log()
for channel in ["omni.hydra.scene_delegate.plugin", "omni.kit.manipulator.prim.model"]:
og_log.set_channel_enabled(channel, False, lazy.omni.log.SettingBehavior.OVERRIDE)
# Possibly hide windows if in debug mode
hide_window_names = []
if not gm.RENDER_VIEWER_CAMERA:
hide_window_names.append("Viewport")
if gm.GUI_VIEWPORT_ONLY:
hide_window_names.extend(["Console", "Main ToolBar", "Stage", "Layer", "Property", "Render Settings", "Content",
"Flow", "Semantics Schema Editor"])
for name in hide_window_names:
window = lazy.omni.ui.Workspace.get_window(name)
if window is not None:
window.visible = False
app.update()
lazy.omni.kit.widget.stage.context_menu.ContextMenu.save_prim = print_save_usd_warning
# TODO: Automated cleanup in callback doesn't work for some reason. Need to investigate.
shutdown_stream = lazy.omni.kit.app.get_app().get_shutdown_event_stream()
sub = shutdown_stream.create_subscription_to_pop(og.cleanup, name="og_cleanup", order=0)
# Loading Isaac Sim disables Ctrl+C, so we need to re-enable it
signal.signal(signal.SIGINT, og.shutdown_handler)
return app
def launch_simulator(*args, **kwargs):
if not og.app:
og.app = _launch_app()
class Simulator(lazy.omni.isaac.core.simulation_context.SimulationContext, Serializable):
"""
Simulator class for directly interfacing with the physx physics engine.
NOTE: This is a monolithic class.
All created Simulator() instances will reference the same underlying Simulator object
Args:
gravity (float): gravity on z direction.
physics_dt (float): dt between physics steps. Defaults to 1.0 / 120.0.
rendering_dt (float): dt between rendering steps. Note: rendering means rendering a frame of the current
application and not only rendering a frame to the viewports/ cameras. So UI elements of Isaac Sim will
be refreshed with this dt as well if running non-headless. Defaults to 1.0 / 30.0.
stage_units_in_meters (float): The metric units of assets. This will affect gravity value..etc.
Defaults to 0.01.
viewer_width (int): width of the camera image, in pixels
viewer_height (int): height of the camera image, in pixels
device (None or str): specifies the device to be used if running on the gpu with torch backend
"""
_world_initialized = False
def __init__(
self,
gravity=9.81,
physics_dt=1.0 / 120.0,
rendering_dt=1.0 / 30.0,
stage_units_in_meters=1.0,
viewer_width=gm.DEFAULT_VIEWER_WIDTH,
viewer_height=gm.DEFAULT_VIEWER_HEIGHT,
device=None,
):
# Store vars needed for initialization
self.gravity = gravity
self._viewer_camera = None
self._camera_mover = None
# Run super init
super().__init__(
physics_dt=physics_dt,
rendering_dt=rendering_dt,
stage_units_in_meters=stage_units_in_meters,
device=device,
)
if self._world_initialized:
return
Simulator._world_initialized = True
# Store other references to variables that will be initialized later
self._scene = None
self._physx_interface = None
self._physx_simulation_interface = None
self._physx_scene_query_interface = None
self._contact_callback = None
self._simulation_event_callback = None
# List of objects that need to be initialized during whenever the next sim step occurs
self._objects_to_initialize = []
self._objects_require_contact_callback = False
self._objects_require_joint_break_callback = False
# Maps callback name to callback
self._callbacks_on_play = dict()
self._callbacks_on_stop = dict()
self._callbacks_on_import_obj = dict()
self._callbacks_on_remove_obj = dict()
# Mapping from link IDs assigned from omni to the object that they reference
self._link_id_to_objects = dict()
# Set of categories that can be grasped by assisted grasping
self.object_state_types = get_states_by_dependency_order()
self.object_state_types_requiring_update = \
[state for state in self.object_state_types if (issubclass(state, UpdateStateMixin) or issubclass(state, GlobalUpdateStateMixin))]
self.object_state_types_on_contact = \
{state for state in self.object_state_types if issubclass(state, ContactSubscribedStateMixin)}
self.object_state_types_on_joint_break = \
{state for state in self.object_state_types if issubclass(state, JointBreakSubscribedStateMixin)}
# Auto-load the dummy stage
self.clear()
# Set the viewer dimensions
if gm.RENDER_VIEWER_CAMERA:
self.viewer_width = viewer_width
self.viewer_height = viewer_height
# Toggle simulator state once so that downstream omni features can be used without bugs
# e.g.: particle sampling, which for some reason requires sim.play() to be called at least once
self.play()
self.stop()
# Update the physics settings
# This needs to be done now, after an initial step + stop for some reason if we want to use GPU
# dynamics, otherwise we get very strange behavior, e.g., PhysX complains about invalid transforms
# and crashes
self._set_physics_engine_settings()
def __new__(
cls,
gravity=9.81,
physics_dt=1.0 / 120.0,
rendering_dt=1.0 / 30.0,
stage_units_in_meters=1.0,
viewer_width=gm.DEFAULT_VIEWER_WIDTH,
viewer_height=gm.DEFAULT_VIEWER_HEIGHT,
device_idx=0,
):
# Overwrite since we have different kwargs
if Simulator._instance is None:
Simulator._instance = object.__new__(cls)
else:
lazy.carb.log_info("Simulator is defined already, returning the previously defined one")
return Simulator._instance
def _set_viewer_camera(self, prim_path="/World/viewer_camera", viewport_name="Viewport"):
"""
Creates a camera prim dedicated for this viewer at @prim_path if it doesn't exist,
and sets this camera as the active camera for the viewer
Args:
prim_path (str): Path to check for / create the viewer camera
viewport_name (str): Name of the viewport this camera should attach to. Default is "Viewport", which is
the default viewport's name in Isaac Sim
"""
self._viewer_camera = VisionSensor(
prim_path=prim_path,
name=prim_path.split("/")[-1], # Assume name is the lowest-level name in the prim_path
modalities="rgb",
image_height=self.viewer_height,
image_width=self.viewer_width,
viewport_name=viewport_name,
)
if not self._viewer_camera.loaded:
self._viewer_camera.load()
# We update its clipping range and focal length so we get a good FOV and so that it doesn't clip
# nearby objects (default min is 1 m)
self._viewer_camera.clipping_range = [0.001, 10000000.0]
self._viewer_camera.focal_length = 17.0
# Initialize the sensor
self._viewer_camera.initialize()
# Also need to potentially update our camera mover if it already exists
if self._camera_mover is not None:
self._camera_mover.set_cam(cam=self._viewer_camera)
def _set_physics_engine_settings(self):
"""
Set the physics engine with specified settings
"""
assert self.is_stopped(), f"Cannot set simulator physics settings while simulation is playing!"
self._physics_context.set_gravity(value=-self.gravity)
# Also make sure we don't invert the collision group filter settings so that different collision groups by
# default collide with each other, and modify settings for speed optimization
self._physics_context.set_invert_collision_group_filter(False)
self._physics_context.enable_ccd(gm.ENABLE_CCD)
self._physics_context.enable_fabric(gm.ENABLE_FLATCACHE)
# Enable GPU dynamics based on whether we need omni particles feature
if gm.USE_GPU_DYNAMICS:
self._physics_context.enable_gpu_dynamics(True)
self._physics_context.set_broadphase_type("GPU")
else:
self._physics_context.enable_gpu_dynamics(False)
self._physics_context.set_broadphase_type("MBP")
# Set GPU Pairs capacity and other GPU settings
self._physics_context.set_gpu_found_lost_pairs_capacity(gm.GPU_PAIRS_CAPACITY)
self._physics_context.set_gpu_found_lost_aggregate_pairs_capacity(gm.GPU_AGGR_PAIRS_CAPACITY)
self._physics_context.set_gpu_total_aggregate_pairs_capacity(gm.GPU_AGGR_PAIRS_CAPACITY)
self._physics_context.set_gpu_max_particle_contacts(gm.GPU_MAX_PARTICLE_CONTACTS)
self._physics_context.set_gpu_max_rigid_contact_count(gm.GPU_MAX_RIGID_CONTACT_COUNT)
self._physics_context.set_gpu_max_rigid_patch_count(gm.GPU_MAX_RIGID_PATCH_COUNT)
def _set_renderer_settings(self):
if gm.ENABLE_HQ_RENDERING:
lazy.carb.settings.get_settings().set_bool("/rtx/reflections/enabled", True)
lazy.carb.settings.get_settings().set_bool("/rtx/indirectDiffuse/enabled", True)
lazy.carb.settings.get_settings().set_int("/rtx/post/dlss/execMode", 3) # "Auto"
lazy.carb.settings.get_settings().set_bool("/rtx/ambientOcclusion/enabled", True)
lazy.carb.settings.get_settings().set_bool("/rtx/directLighting/sampledLighting/enabled", False)
else:
lazy.carb.settings.get_settings().set_bool("/rtx/reflections/enabled", False)
lazy.carb.settings.get_settings().set_bool("/rtx/indirectDiffuse/enabled", False)
lazy.carb.settings.get_settings().set_int("/rtx/post/dlss/execMode", 0) # "Performance"
lazy.carb.settings.get_settings().set_bool("/rtx/ambientOcclusion/enabled", False)
lazy.carb.settings.get_settings().set_bool("/rtx/directLighting/sampledLighting/enabled", True)
lazy.carb.settings.get_settings().set_int("/rtx/raytracing/showLights", 1)
lazy.carb.settings.get_settings().set_float("/rtx/sceneDb/ambientLightIntensity", 0.1)
@property
def viewer_visibility(self):
"""
Returns:
bool: Whether the viewer is visible or not
"""
return self._viewer_camera.viewer_visibility
@viewer_visibility.setter
def viewer_visibility(self, visible):
"""
Sets whether the viewer should be visible or not in the Omni UI
Args:
visible (bool): Whether the viewer should be visible or not
"""
self._viewer_camera.viewer_visibility = visible
@property
def viewer_height(self):
"""
Returns:
int: viewer height of this sensor, in pixels
"""
# If the viewer camera hasn't been created yet, utilize the default width
return gm.DEFAULT_VIEWER_HEIGHT if self._viewer_camera is None else self._viewer_camera.image_height
@viewer_height.setter
def viewer_height(self, height):
"""
Sets the viewer height @height for this sensor
Args:
height (int): viewer height, in pixels
"""
self._viewer_camera.image_height = height
@property
def viewer_width(self):
"""
Returns:
int: viewer width of this sensor, in pixels
"""
# If the viewer camera hasn't been created yet, utilize the default height
return gm.DEFAULT_VIEWER_WIDTH if self._viewer_camera is None else self._viewer_camera.image_width
@viewer_width.setter
def viewer_width(self, width):
"""
Sets the viewer width @width for this sensor
Args:
width (int): viewer width, in pixels
"""
self._viewer_camera.image_width = width
def set_lighting_mode(self, mode):
"""
Sets the active lighting mode in the current simulator. Valid options are one of LightingMode
Args:
mode (LightingMode): Lighting mode to set
"""
lazy.omni.kit.commands.execute("SetLightingMenuModeCommand", lighting_mode=mode)
def enable_viewer_camera_teleoperation(self):
"""
Enables keyboard control of the active viewer camera for this simulation
"""
assert gm.RENDER_VIEWER_CAMERA, "Viewer camera must be enabled to enable teleoperation!"
self._camera_mover = CameraMover(cam=self._viewer_camera)
self._camera_mover.print_info()
return self._camera_mover
def import_scene(self, scene):
"""
Import a scene into the simulator. A scene could be a synthetic one or a realistic Gibson Environment.
Args:
scene (Scene): a scene object to load
"""
assert self.is_stopped(), "Simulator must be stopped while importing a scene!"
assert isinstance(scene, Scene), "import_scene can only be called with Scene"
# Clear the existing scene if any
self.clear()
# Initialize all global updatable object states
for state in self.object_state_types_requiring_update:
if issubclass(state, GlobalUpdateStateMixin):
state.global_initialize()
self._scene = scene
self._scene.load()
# Make sure simulator is not running, then start it so that we can initialize the scene
assert self.is_stopped(), "Simulator must be stopped after importing a scene!"
self.play()
# Initialize the scene
self._scene.initialize()
# Need to one more step for particle systems to work
self.step()
self.stop()
log.info("Imported scene.")
def initialize_object_on_next_sim_step(self, obj):
"""
Initializes the object upon the next simulation step
Args:
obj (BasePrim): Object to initialize as soon as a new sim step is called
"""
self._objects_to_initialize.append(obj)
def import_object(self, obj, register=True):
"""
Import an object into the simulator.
Args:
obj (BaseObject): an object to load
register (bool): whether to register this object internally in the scene registry
"""
assert isinstance(obj, BaseObject), "import_object can only be called with BaseObject"
# Make sure scene is loaded -- objects should not be loaded unless we have a reference to a scene
assert self.scene is not None, "import_object needs to be called after import_scene"
# Load the object in omniverse by adding it to the scene
self.scene.add_object(obj, register=register, _is_call_from_simulator=True)
# Run any callbacks
for callback in self._callbacks_on_import_obj.values():
callback(obj)
# Cache the mapping from link IDs to object
for link in obj.links.values():
self._link_id_to_objects[lazy.pxr.PhysicsSchemaTools.sdfPathToInt(link.prim_path)] = obj
# Lastly, additionally add this object automatically to be initialized as soon as another simulator step occurs
self.initialize_object_on_next_sim_step(obj=obj)
def remove_object(self, obj):
"""
Remove one or a list of non-robot object from the simulator.
Args:
obj (BaseObject or Iterable[BaseObject]): one or a list of non-robot objects to remove
"""
objs = [obj] if isinstance(obj, BaseObject) else obj
if self.is_playing():
state = self.dump_state()
# Omniverse has a strange bug where if GPU dynamics is on and the object to remove is in contact with
# with another object (in some specific configuration only, not always), the simulator crashes. Therefore,
# we first move the object to a safe location, then remove it.
pos = list(m.OBJECT_GRAVEYARD_POS)
for ob in objs:
ob.set_position_orientation(pos, [0, 0, 0, 1])
pos[0] += max(ob.aabb_extent)
# One physics timestep will elapse
self.step_physics()
for ob in objs:
self._remove_object(ob)
if self.is_playing():
# Update all handles that are now broken because objects have changed
self.update_handles()
# Load the state back
self.load_state(state)
# Refresh all current rules
TransitionRuleAPI.prune_active_rules()
def _remove_object(self, obj):
"""
Remove a non-robot object from the simulator. Should not be called directly by the user.
Args:
obj (BaseObject): a non-robot object to remove
"""
# Run any callbacks
for callback in self._callbacks_on_remove_obj.values():
callback(obj)
# pop all link ids
for link in obj.links.values():
self._link_id_to_objects.pop(lazy.pxr.PhysicsSchemaTools.sdfPathToInt(link.prim_path))
# If it was queued up to be initialized, remove it from the queue as well
for i, initialize_obj in enumerate(self._objects_to_initialize):
if obj.name == initialize_obj.name:
self._objects_to_initialize.pop(i)
break
self._scene.remove_object(obj)
def remove_prim(self, prim):
"""
Remove a prim from the simulator.
Args:
prim (BasePrim): a prim to remove
"""
# [omni.physx.tensors.plugin] prim '[prim_path]' was deleted while being used by a shape in a tensor view
# class. The physics.tensors simulationView was invalidated.
with suppress_omni_log(channels=["omni.physx.tensors.plugin"]):
# Remove prim
prim.remove()
# Update all handles that are now broken because prims have changed
self.update_handles()
def _reset_variables(self):
"""
Reset internal variables when a new stage is loaded
"""
def render(self):
super().render()
# During rendering, the Fabric API is updated, so we can mark it as clean
PoseAPI.mark_valid()
def update_handles(self):
# Handles are only relevant when physx is running
if not self.is_playing():
return
# First, refresh the physics sim view
self._physics_sim_view = lazy.omni.physics.tensors.create_simulation_view(self.backend)
self._physics_sim_view.set_subspace_roots("/")
# Then update the handles for all objects
if self.scene is not None and self.scene.initialized:
for obj in self.scene.objects:
# Only need to update if object is already initialized as well
if obj.initialized:
obj.update_handles()
for system in self.scene.systems:
if issubclass(system, MacroPhysicalParticleSystem):
system.refresh_particles_view()
# Finally update any unified views
RigidContactAPI.initialize_view()
def _non_physics_step(self):
"""
Complete any non-physics steps such as state updates.
"""
# If we don't have a valid scene, immediately return
if self._scene is None:
return
# Update omni
self._omni_update_step()
# If we're playing we, also run additional logic
if self.is_playing():
# Check to see if any objects should be initialized (only done IF we're playing)
n_objects_to_initialize = len(self._objects_to_initialize)
if n_objects_to_initialize > 0 and self.is_playing():
# We iterate through the objects to initialize
# Note that we don't explicitly do for obj in self._objects_to_initialize because additional objects
# may be added mid-iteration!!
# For this same reason, after we finish the loop, we keep any objects that are yet to be initialized
# First call zero-physics step update, so that handles are properly propagated
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
for i in range(n_objects_to_initialize):
obj = self._objects_to_initialize[i]
obj.initialize()
if len(obj.states.keys() & self.object_state_types_on_contact) > 0:
self._objects_require_contact_callback = True
if len(obj.states.keys() & self.object_state_types_on_joint_break) > 0:
self._objects_require_joint_break_callback = True
self._objects_to_initialize = self._objects_to_initialize[n_objects_to_initialize:]
# Re-initialize the physics view because the number of objects has changed
self.update_handles()
# Also refresh the transition rules that are currently active
TransitionRuleAPI.refresh_all_rules()
# Update any system-related state
for system in self.scene.systems:
system.update()
# Propagate states if the feature is enabled
if gm.ENABLE_OBJECT_STATES:
# Step the object states in global topological order (if the scene exists)
for state_type in self.object_state_types_requiring_update:
if issubclass(state_type, GlobalUpdateStateMixin):
state_type.global_update()
if issubclass(state_type, UpdateStateMixin):
for obj in self.scene.get_objects_with_state(state_type):
# Update the state (object should already be initialized since
# this step will only occur after objects are initialized and sim
# is playing
obj.states[state_type].update()
for obj in self.scene.objects:
# Only update visuals for objects that have been initialized so far
if isinstance(obj, StatefulObject) and obj.initialized:
obj.update_visuals()
# Possibly run transition rule step
if gm.ENABLE_TRANSITION_RULES:
TransitionRuleAPI.step()
def _omni_update_step(self):
"""
Step any omni-related things
"""
# Clear the bounding box and contact caches so that they get updated during the next time they're called
RigidContactAPI.clear()
def play(self):
if not self.is_playing():
# Track whether we're starting the simulator fresh -- i.e.: whether we were stopped previously
was_stopped = self.is_stopped()
# Run super first
# We suppress warnings from omni.usd because it complains about values set in the native USD
# These warnings occur because the native USD file has some type mismatch in the `scale` property,
# where the property expects a double but for whatever reason the USD interprets its values as floats
# We suppress omni.physicsschema.plugin when kinematic_only objects are placed with scale ~1.0, to suppress
# the following error:
# [omni.physicsschema.plugin] ScaleOrientation is not supported for rigid bodies, prim path: [...] You may
# ignore this if the scale is close to uniform.
# We also need to suppress the following error when flat cache is used:
# [omni.physx.plugin] Transformation change on non-root links is not supported.
channels = ["omni.usd", "omni.physicsschema.plugin"]
if gm.ENABLE_FLATCACHE:
channels.append("omni.physx.plugin")
with suppress_omni_log(channels=channels):
super().play()
# Take a render step -- this is needed so that certain (unknown, maybe omni internal state?) is populated
# correctly.
self.render()
# Update all object handles, unless this is a play during initialization
if og.sim is not None:
self.update_handles()
if was_stopped:
# We need to update controller mode because kp and kd were set to the original (incorrect) values when
# sim was stopped. We need to reset them to default_kp and default_kd defined in ControllableObject.
# We also need to take an additional sim step to make sure simulator is functioning properly.
# We need to do this because for some reason omniverse exhibits strange behavior if we do certain
# operations immediately after playing; e.g.: syncing USD poses when flatcache is enabled
if self.scene is not None and self.scene.initialized:
for robot in self.scene.robots:
if robot.initialized:
robot.update_controller_mode()
# Also refresh any transition rules that became stale while sim was stopped
TransitionRuleAPI.refresh_all_rules()
# Additionally run non physics things
self._non_physics_step()
# Run all callbacks
for callback in self._callbacks_on_play.values():
callback()
def pause(self):
if not self.is_paused():
super().pause()
def stop(self):
if not self.is_stopped():
super().stop()
# If we're using flatcache, we also need to reset its API
if gm.ENABLE_FLATCACHE:
FlatcacheAPI.reset()
# Run all callbacks
for callback in self._callbacks_on_stop.values():
callback()
@property
def n_physics_timesteps_per_render(self):
"""
Number of physics timesteps per rendering timestep. rendering_dt has to be a multiple of physics_dt.
Returns:
int: Discrete number of physics timesteps to take per step
"""
n_physics_timesteps_per_render = self.get_rendering_dt() / self.get_physics_dt()
assert n_physics_timesteps_per_render.is_integer(), "render_timestep must be a multiple of physics_timestep"
return int(n_physics_timesteps_per_render)
def step(self, render=True):
"""
Step the simulation at self.render_timestep
Args:
render (bool): Whether rendering should occur or not
"""
# If we have imported any objects within the last timestep, we render the app once, since otherwise calling
# step() may not step physics
if len(self._objects_to_initialize) > 0:
self.render()
if render:
super().step(render=True)
else:
for i in range(self.n_physics_timesteps_per_render):
super().step(render=False)
# Additionally run non physics things
self._non_physics_step()
# TODO (eric): After stage changes (e.g. pose, texture change), it will take two super().step(render=True) for
# the result to propagate to the rendering. We could have called super().render() here but it will introduce
# a big performance regression.
def step_physics(self):
"""
Step the physics a single step.
"""
self._physics_context._step(current_time=self.current_time)
self._omni_update_step()
PoseAPI.invalidate()
def _on_contact(self, contact_headers, contact_data):
"""
This callback will be invoked after every PHYSICS step if there is any contact.
For each of the pair of objects in each contact, we invoke the on_contact function for each of its states
that subclass ContactSubscribedStateMixin. These states update based on contact events.
"""
if gm.ENABLE_OBJECT_STATES and self._objects_require_contact_callback:
headers = defaultdict(list)
for contact_header in contact_headers:
actor0_obj = self._link_id_to_objects.get(contact_header.actor0, None)
actor1_obj = self._link_id_to_objects.get(contact_header.actor1, None)
# If any of the objects cannot be found, skip
if actor0_obj is None or actor1_obj is None:
continue
# If any of the objects is not initialized, skip
if not actor0_obj.initialized or not actor1_obj.initialized:
continue
# If any of the objects is not stateful, skip
if not isinstance(actor0_obj, StatefulObject) or not isinstance(actor1_obj, StatefulObject):
continue
# If any of the objects doesn't have states that require on_contact callbacks, skip
if len(actor0_obj.states.keys() & self.object_state_types_on_contact) == 0 or len(actor1_obj.states.keys() & self.object_state_types_on_contact) == 0:
continue
headers[tuple(sorted((actor0_obj, actor1_obj), key=lambda x: x.uuid))].append(contact_header)
for (actor0_obj, actor1_obj) in headers:
for obj0, obj1 in [(actor0_obj, actor1_obj), (actor1_obj, actor0_obj)]:
for state_type in self.object_state_types_on_contact:
if state_type in obj0.states:
obj0.states[state_type].on_contact(obj1, headers[(actor0_obj, actor1_obj)], contact_data)
def _on_simulation_event(self, event):
"""
This callback will be invoked if there is any simulation event. Currently it only processes JOINT_BREAK event.
"""
if gm.ENABLE_OBJECT_STATES:
if event.type == int(lazy.omni.physx.bindings._physx.SimulationEvent.JOINT_BREAK) and self._objects_require_joint_break_callback:
joint_path = str(lazy.pxr.PhysicsSchemaTools.decodeSdfPath(event.payload["jointPath"][0], event.payload["jointPath"][1]))
obj = None
# TODO: recursively try to find the parent object of this joint
tokens = joint_path.split("/")
for i in range(2, len(tokens) + 1):
obj = self._scene.object_registry("prim_path", "/".join(tokens[:i]))
if obj is not None:
break
if obj is None or not obj.initialized or not isinstance(obj, StatefulObject):
return
if len(obj.states.keys() & self.object_state_types_on_joint_break) == 0:
return
for state_type in self.object_state_types_on_joint_break:
if state_type in obj.states:
obj.states[state_type].on_joint_break(joint_path)
def is_paused(self):
"""
Returns:
bool: True if the simulator is paused, otherwise False
"""
return not (self.is_stopped() or self.is_playing())
@contextlib.contextmanager
def stopped(self):
"""
A context scope for making sure the simulator is stopped during execution within this scope.
Upon leaving the scope, the prior simulator state is restored.
"""
# Infer what state we're currently in, then stop, yield, and then restore the original state
sim_is_playing, sim_is_paused = self.is_playing(), self.is_paused()
if sim_is_playing or sim_is_paused:
self.stop()
yield
if sim_is_playing: self.play()
elif sim_is_paused: self.pause()
@contextlib.contextmanager
def playing(self):
"""
A context scope for making sure the simulator is playing during execution within this scope.
Upon leaving the scope, the prior simulator state is restored.
"""
# Infer what state we're currently in, then stop, yield, and then restore the original state
sim_is_stopped, sim_is_paused = self.is_stopped(), self.is_paused()
if sim_is_stopped or sim_is_paused:
self.play()
yield
if sim_is_stopped: self.stop()
elif sim_is_paused: self.pause()
@contextlib.contextmanager
def paused(self):
"""
A context scope for making sure the simulator is paused during execution within this scope.
Upon leaving the scope, the prior simulator state is restored.
"""
# Infer what state we're currently in, then stop, yield, and then restore the original state
sim_is_stopped, sim_is_playing = self.is_stopped(), self.is_playing()
if sim_is_stopped or sim_is_playing:
self.pause()
yield
if sim_is_stopped: self.stop()
elif sim_is_playing: self.play()
@contextlib.contextmanager
def slowed(self, dt):
"""
A context scope for making the simulator simulation dt slowed, e.g.: for taking micro-steps for propagating
instantaneous kinematics with minimal impact on physics propagation.
NOTE: This will set both the physics dt and rendering dt to the same value during this scope.
Upon leaving the scope, the prior simulator state is restored.
"""
# Set dt, yield, then restore the original dt
physics_dt, rendering_dt = self.get_physics_dt(), self.get_rendering_dt()
self.set_simulation_dt(physics_dt=dt, rendering_dt=dt)
yield
self.set_simulation_dt(physics_dt=physics_dt, rendering_dt=rendering_dt)
def add_callback_on_play(self, name, callback):
"""
Adds a function @callback, referenced by @name, to be executed every time sim.play() is called
Args:
name (str): Name of the callback
callback (function): Callback function. Function signature is expected to be:
def callback() --> None
"""
self._callbacks_on_play[name] = callback
def add_callback_on_stop(self, name, callback):
"""
Adds a function @callback, referenced by @name, to be executed every time sim.stop() is called
Args:
name (str): Name of the callback
callback (function): Callback function. Function signature is expected to be:
def callback() --> None
"""
self._callbacks_on_stop[name] = callback
def add_callback_on_import_obj(self, name, callback):
"""
Adds a function @callback, referenced by @name, to be executed every time sim.import_object() is called
Args:
name (str): Name of the callback
callback (function): Callback function. Function signature is expected to be:
def callback(obj: BaseObject) --> None
"""
self._callbacks_on_import_obj[name] = callback
def add_callback_on_remove_obj(self, name, callback):
"""
Adds a function @callback, referenced by @name, to be executed every time sim.remove_object() is called
Args:
name (str): Name of the callback
callback (function): Callback function. Function signature is expected to be:
def callback(obj: BaseObject) --> None
"""
self._callbacks_on_remove_obj[name] = callback
def remove_callback_on_play(self, name):
"""
Remove play callback whose reference is @name
Args:
name (str): Name of the callback
"""
self._callbacks_on_play.pop(name, None)
def remove_callback_on_stop(self, name):
"""
Remove stop callback whose reference is @name
Args:
name (str): Name of the callback
"""
self._callbacks_on_stop.pop(name, None)
def remove_callback_on_import_obj(self, name):
"""
Remove stop callback whose reference is @name
Args:
name (str): Name of the callback
"""
self._callbacks_on_import_obj.pop(name, None)
def remove_callback_on_remove_obj(self, name):
"""
Remove stop callback whose reference is @name
Args:
name (str): Name of the callback
"""
self._callbacks_on_remove_obj.pop(name, None)
@classmethod
def clear_instance(cls):
lazy.omni.isaac.core.simulation_context.SimulationContext.clear_instance()
Simulator._world_initialized = None
return
def __del__(self):
lazy.omni.isaac.core.simulation_context.SimulationContext.__del__(self)
Simulator._world_initialized = None
return
@property
def pi(self):
"""
Returns:
PhysX: Physx Interface (pi) for controlling low-level physx engine
"""
return self._physx_interface
@property
def psi(self):
"""
Returns:
IPhysxSimulation: Physx Simulation Interface (psi) for controlling low-level physx simulation
"""
return self._physx_simulation_interface
@property
def psqi(self):
"""
Returns:
PhysXSceneQuery: Physx Scene Query Interface (psqi) for running low-level scene queries
"""
return self._physx_scene_query_interface
@property
def scene(self):
"""
Returns:
None or Scene: Scene currently loaded in this simulator. If no scene is loaded, returns None
"""
return self._scene
@property
def viewer_camera(self):
"""
Returns:
VisionSensor: Active camera sensor corresponding to the active viewport window instance shown in the omni UI
"""
return self._viewer_camera
@property
def camera_mover(self):
"""
Returns:
None or CameraMover: If enabled, the teleoperation interface for controlling the active viewer camera
"""
return self._camera_mover
@property
def world_prim(self):
"""
Returns:
Usd.Prim: Prim at /World
"""
return lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path="/World")
def clear(self) -> None:
"""
Clears the stage leaving the PhysicsScene only if under /World.
"""
# Stop the physics
self.stop()
# Clear any pre-existing scene if it exists
if self._scene is not None:
self.scene.clear()
self._scene = None
# Clear all vision sensors and remove viewer camera reference and camera mover reference
VisionSensor.clear()
self._viewer_camera = None
if self._camera_mover is not None:
self._camera_mover.clear()
self._camera_mover = None
# Clear all global update states
for state in self.object_state_types_requiring_update:
if issubclass(state, GlobalUpdateStateMixin):
state.global_clear()
# Clear all materials
MaterialPrim.clear()
# Clear all transition rules
TransitionRuleAPI.clear()
# Clear uniquely named items and other internal states
clear_pu()
clear_uu()
self._objects_to_initialize = []
self._objects_require_contact_callback = False
self._objects_require_joint_break_callback = False
self._link_id_to_objects = dict()
self._callbacks_on_play = dict()
self._callbacks_on_stop = dict()
self._callbacks_on_import_obj = dict()
self._callbacks_on_remove_obj = dict()
# Load dummy stage, but don't clear sim to prevent circular loops
self._open_new_stage()
def write_metadata(self, key, data):
"""
Writes metadata @data to the current global metadata dict using key @key
Args:
key (str): Keyword entry in the global metadata dictionary to use
data (dict): Data to write to @key in the global metadata dictionary
"""
self.world_prim.SetCustomDataByKey(key, data)
def get_metadata(self, key):
"""
Grabs metadata from the current global metadata dict using key @key
Args:
key (str): Keyword entry in the global metadata dictionary to use
"""
return self.world_prim.GetCustomDataByKey(key)
def restore(self, json_path):
"""
Restore a simulation environment from @json_path.
Args:
json_path (str): Full path of JSON file to load, which contains information
to recreate a scene.
"""
if not json_path.endswith(".json"):
log.error(f"You have to define the full json_path to load from. Got: {json_path}")
return
# Load the info from the json
with open(json_path, "r") as f:
scene_info = json.load(f)
init_info = scene_info["init_info"]
state = scene_info["state"]
# Override the init info with our json path
init_info["args"]["scene_file"] = json_path
# Also make sure we have any additional modifications necessary from the specific scene
og.REGISTERED_SCENES[init_info["class_name"]].modify_init_info_for_restoring(init_info=init_info)
# Recreate and import the saved scene
og.sim.stop()
recreated_scene = create_object_from_init_info(init_info)
self.import_scene(scene=recreated_scene)
# Start the simulation and restore the dynamic state of the scene and then pause again
self.play()
self.load_state(state, serialized=False)
log.info("The saved simulation environment loaded.")
return
def save(self, json_path):
"""
Saves the current simulation environment to @json_path.
Args:
json_path (str): Full path of JSON file to save (should end with .json), which contains information
to recreate the current scene.
"""
# Make sure the sim is not stopped, since we need to grab joint states
assert not self.is_stopped(), "Simulator cannot be stopped when saving to USD!"
# Make sure there are no objects in the initialization queue, if not, terminate early and notify user
# Also run other sanity checks before saving
if len(self._objects_to_initialize) > 0:
log.error("There are still objects to initialize! Please take one additional sim step and then save.")
return
if not self.scene:
log.warning("Scene has not been loaded. Nothing to save.")
return
if not json_path.endswith(".json"):
log.error(f"You have to define the full json_path to save the scene to. Got: {json_path}")
return
# Update scene info
self.scene.update_objects_info()
# Dump saved current state and also scene init info
scene_info = {
"metadata": self.world_prim.GetCustomData(),
"state": self.scene.dump_state(serialized=False),
"init_info": self.scene.get_init_info(),
"objects_info": self.scene.get_objects_info(),
}
# Write this to the json file
Path(os.path.dirname(json_path)).mkdir(parents=True, exist_ok=True)
with open(json_path, "w+") as f:
json.dump(scene_info, f, cls=NumpyEncoder, indent=4)
log.info("The current simulation environment saved.")
def _open_new_stage(self):
"""
Opens a new stage
"""
# Stop the physics if we're playing
if not self.is_stopped():
log.warning("Stopping simulation in order to open new stage.")
self.stop()
# Store physics dt and rendering dt to reuse later
# Note that the stage may have been deleted previously; if so, we use the default values
# of 1/120, 1/30
try:
physics_dt = self.get_physics_dt()
except:
print("WARNING: Invalid or non-existent physics scene found. Setting physics dt to 1/120.")
physics_dt = 1 / 120.
rendering_dt = self.get_rendering_dt()
# Open new stage -- suppressing warning that we're opening a new stage
with suppress_omni_log(None):
lazy.omni.isaac.core.utils.stage.create_new_stage()
# Clear physics context
self._physics_context = None
self._physx_fabric_interface = None
# Create world prim
self.stage.DefinePrim("/World", "Xform")
self._init_stage(physics_dt=physics_dt, rendering_dt=rendering_dt)
def _load_stage(self, usd_path):
"""
Open the stage specified by USD file at @usd_path
Args:
usd_path (str): Absolute filepath to USD stage that should be loaded
"""
# Stop the physics if we're playing
if not self.is_stopped():
log.warning("Stopping simulation in order to load stage.")
self.stop()
# Store physics dt and rendering dt to reuse later
# Note that the stage may have been deleted previously; if so, we use the default values
# of 1/120, 1/30
try:
physics_dt = self.get_physics_dt()
except:
print("WARNING: Invalid or non-existent physics scene found. Setting physics dt to 1/120.")
physics_dt = 1/120.
rendering_dt = self.get_rendering_dt()
# Open new stage -- suppressing warning that we're opening a new stage
with suppress_omni_log(None):
lazy.omni.isaac.core.utils.stage.open_stage(usd_path=usd_path)
self._init_stage(physics_dt=physics_dt, rendering_dt=rendering_dt)
def _init_stage(
self,
physics_dt=None,
rendering_dt=None,
stage_units_in_meters=None,
physics_prim_path="/physicsScene",
sim_params=None,
set_defaults=True,
backend="numpy",
device=None,
):
# Run super first
super()._init_stage(
physics_dt=physics_dt,
rendering_dt=rendering_dt,
stage_units_in_meters=stage_units_in_meters,
physics_prim_path=physics_prim_path,
sim_params=sim_params,
set_defaults=set_defaults,
backend=backend,
device=device,
)
# Update internal vars
self._physx_interface = lazy.omni.physx.get_physx_interface()
self._physx_simulation_interface = lazy.omni.physx.get_physx_simulation_interface()
self._physx_scene_query_interface = lazy.omni.physx.get_physx_scene_query_interface()
# Update internal settings
self._set_physics_engine_settings()
self._set_renderer_settings()
# Update internal callbacks
self._setup_default_callback_fns()
self._stage_open_callback = (
lazy.omni.usd.get_context().get_stage_event_stream().create_subscription_to_pop(self._stage_open_callback_fn)
)
self._contact_callback = self._physics_context._physx_sim_interface.subscribe_contact_report_events(self._on_contact)
self._simulation_event_callback = self._physx_interface.get_simulation_event_stream_v2().create_subscription_to_pop(self._on_simulation_event)
# Set the lighting mode to be stage by default
self.set_lighting_mode(mode=LightingMode.STAGE)
# Set the viewer camera, and then set its default pose
if gm.RENDER_VIEWER_CAMERA:
self._set_viewer_camera()
self.viewer_camera.set_position_orientation(
position=np.array(m.DEFAULT_VIEWER_CAMERA_POS),
orientation=np.array(m.DEFAULT_VIEWER_CAMERA_QUAT),
)
def close(self):
"""
Shuts down the OmniGibson application
"""
self._app.shutdown()
@property
def stage_id(self):
"""
Returns:
int: ID of the current active stage
"""
return lazy.pxr.UsdUtils.StageCache.Get().GetId(self.stage).ToLongInt()
@property
def device(self):
"""
Returns:
device (None or str): Device used in simulation backend
"""
return self._device
@device.setter
def device(self, device):
"""
Sets the device used for sim backend
Args:
device (None or str): Device to set for the simulation backend
"""
self._device = device
if self._device is not None and "cuda" in self._device:
device_id = self._settings.get_as_int("/physics/cudaDevice")
self._device = f"cuda:{device_id}"
@property
def state_size(self):
# Total state size is the state size of our scene
return self._scene.state_size
def _dump_state(self):
# Default state is from the scene
return self._scene.dump_state(serialized=False)
def _load_state(self, state):
# Default state is from the scene
self._scene.load_state(state=state, serialized=False)
def load_state(self, state, serialized=False):
# We need to make sure the simulator is playing since joint states only get updated when playing
assert self.is_playing()
# Run super
super().load_state(state=state, serialized=serialized)
# Highlight that at the current step, the non-kinematic states are potentially inaccurate because a sim
# step is needed to propagate specific states in physics backend
# TODO: This should be resolved in a future omniverse release!
disclaimer("Attempting to load simulator state.\n"
"Currently, omniverse does not support exclusively stepping kinematics, so we cannot update some "
"of our object states relying on updated kinematics until a simulator step is taken!\n"
"Object states such as OnTop, Inside, etc. relying on relative spatial information will inaccurate"
"until a single sim step is taken.\n"
"This should be resolved by the next NVIDIA Isaac Sim release.")
def _serialize(self, state):
# Default state is from the scene
return self._scene.serialize(state=state)
def _deserialize(self, state):
# Default state is from the scene
return self._scene.deserialize(state=state), self._scene.state_size
if not og.sim:
og.sim = Simulator(*args, **kwargs)
print()
print_icon()
print_logo()
print()
log.info(f"{'-' * 10} Welcome to {logo_small()}! {'-' * 10}")
return og.sim | 62,251 | Python | 43.370634 | 170 | 0.585661 |
StanfordVL/OmniGibson/omnigibson/transition_rules.py | import operator
from abc import ABCMeta, abstractmethod
from collections import namedtuple, defaultdict
import numpy as np
import json
from copy import copy
import itertools
import os
from collections import defaultdict
import networkx as nx
import omnigibson as og
from omnigibson.macros import gm, create_module_macros
from omnigibson.systems import get_system, is_system_active, PhysicalParticleSystem, VisualParticleSystem, REGISTERED_SYSTEMS
from omnigibson.objects.dataset_object import DatasetObject
from omnigibson.object_states import *
from omnigibson.object_states.factory import get_system_states
from omnigibson.object_states.object_state_base import AbsoluteObjectState, RelativeObjectState
from omnigibson.utils.asset_utils import get_all_object_category_models
from omnigibson.utils.constants import PrimType
from omnigibson.utils.python_utils import Registerable, classproperty, subclass_factory
from omnigibson.utils.registry_utils import Registry
import omnigibson.utils.transform_utils as T
from omnigibson.utils.ui_utils import disclaimer, create_module_logger
from omnigibson.utils.usd_utils import RigidContactAPI
from omnigibson.utils.bddl_utils import translate_bddl_recipe_to_og_recipe, translate_bddl_washer_rule_to_og_washer_rule
import bddl
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Default melting temperature
m.MELTING_TEMPERATURE = 100.0
# Default "trash" system if an invalid mixing rule transition occurs
m.DEFAULT_GARBAGE_SYSTEM = "sludge"
# Tuple of attributes of objects created in transitions.
# `states` field is dict mapping object state class to arguments to pass to setter for that class
_attrs_fields = ["category", "model", "name", "scale", "obj", "pos", "orn", "bb_pos", "bb_orn", "states", "callback"]
# States: dict: mapping state nameargs to pass to the state setter for @obj in order to set the object state
# callback: function: signature callback(obj) -> None to execute after states are set, if any
ObjectAttrs = namedtuple(
"ObjectAttrs", _attrs_fields, defaults=(None,) * len(_attrs_fields))
# Tuple of lists of objects to be added or removed returned from transitions, if not None
TransitionResults = namedtuple(
"TransitionResults", ["add", "remove"], defaults=(None, None))
# Mapping from transition rule json files to rule classe names
_JSON_FILES_TO_RULES = {
"heat_cook.json": ["CookingObjectRule", "CookingSystemRule"],
"mixing_stick.json": ["MixingToolRule"],
"single_toggleable_machine.json": ["ToggleableMachineRule"],
"substance_cooking.json": ["CookingPhysicalParticleRule"],
"substance_watercooking.json": ["CookingPhysicalParticleRule"],
"washer.json": ["WasherRule"],
}
# Global dicts that will contain mappings
REGISTERED_RULES = dict()
class TransitionRuleAPI:
"""
Monolithic class containing methods to check and execute arbitrary discrete state transitions within the simulator
"""
# Set of active rules
ACTIVE_RULES = set()
# Maps BaseObject instances to dictionary with the following keys:
# "states": None or dict mapping object states to arguments to set for that state when the object is initialized
# "callback": None or function to execute when the object is initialized
_INIT_INFO = dict()
@classmethod
def get_rule_candidates(cls, rule, objects):
"""
Computes valid input object candidates for transition rule @rule, if any exist
Args:
rule (BaseTransitionRule): Transition rule whose candidates should be computed
objects (list of BaseObject): List of objects that will be used to compute object candidates
Returns:
None or dict: None if no valid candidates are found, otherwise mapping from filter key to list of object
instances that satisfy that filter
"""
obj_candidates = rule.get_object_candidates(objects=objects)
n_filters_satisfied = sum(len(candidates) > 0 for candidates in obj_candidates.values())
# Return object candidates if all filters are met, otherwise return None
return obj_candidates if n_filters_satisfied == len(rule.candidate_filters) else None
@classmethod
def prune_active_rules(cls):
"""
Prunes the active transition rules, removing any whose filter requirements are not satisfied by all current
objects on the scene. Useful when the current object set changes, e.g.: an object is removed from the simulator
"""
# Need explicit tuple to iterate over because refresh_rules mutates the ACTIVE_RULES set in place
cls.refresh_rules(rules=tuple(cls.ACTIVE_RULES))
@classmethod
def refresh_all_rules(cls):
"""
Refreshes all registered rules given the current set of objects in the scene
"""
global RULES_REGISTRY
# Clear all active rules
cls.ACTIVE_RULES = set()
# Refresh all registered rules
cls.refresh_rules(rules=RULES_REGISTRY.objects)
@classmethod
def refresh_rules(cls, rules):
"""
Refreshes the specified transition rules @rules based on current set of objects in the simulator.
This will prune any pre-existing rules in cls.ACTIVE_RULES if no valid candidates are found, or add / update
the entry if valid candidates are found
Args:
rules (list of BaseTransitionRule): List of transition rules whose candidate lists should be refreshed
"""
objects = og.sim.scene.objects
for rule in rules:
# Check if rule is still valid, if so, update its entry
object_candidates = cls.get_rule_candidates(rule=rule, objects=objects)
# Update candidates if valid, otherwise pop the entry if it exists in cls.ACTIVE_RULES
if object_candidates is not None:
# We have a valid rule which should be active, so grab and initialize all of its conditions
# NOTE: The rule may ALREADY exist in ACTIVE_RULES, but we still need to refresh its candidates because
# the relevant candidate set / information for the rule + its conditions may have changed given the
# new set of objects
rule.refresh(object_candidates=object_candidates)
cls.ACTIVE_RULES.add(rule)
elif rule in cls.ACTIVE_RULES:
cls.ACTIVE_RULES.remove(rule)
@classmethod
def step(cls):
"""
Steps all active transition rules, checking if any are satisfied, and if so, executing their transition
"""
# First apply any transition object init states from before, and then clear the dictionary
for obj, info in cls._INIT_INFO.items():
if info["states"] is not None:
for state, args in info["states"].items():
obj.states[state].set_value(*args)
if info["callback"] is not None:
info["callback"](obj)
cls._INIT_INFO = dict()
# Iterate over all active rules and process the rule for every valid object candidate combination
# Cast to list before iterating since ACTIVE_RULES may get updated mid-iteration
added_obj_attrs = []
removed_objs = []
for rule in tuple(cls.ACTIVE_RULES):
output = rule.step()
# Store objects to be added / removed if we have a valid output
if output is not None:
added_obj_attrs += output.add
removed_objs += output.remove
cls.execute_transition(added_obj_attrs=added_obj_attrs, removed_objs=removed_objs)
@classmethod
def execute_transition(cls, added_obj_attrs, removed_objs):
"""
Executes the transition for the given added and removed objects.
:param added_obj_attrs: List of ObjectAttrs instances to add to the scene
:param removed_objs: List of BaseObject instances to remove from the scene
"""
# Process all transition results
if len(removed_objs) > 0:
# First remove pre-existing objects
og.sim.remove_object(removed_objs)
# Then add new objects
if len(added_obj_attrs) > 0:
state = og.sim.dump_state()
for added_obj_attr in added_obj_attrs:
new_obj = added_obj_attr.obj
og.sim.import_object(new_obj)
# By default, added_obj_attr is populated with all Nones -- so these will all be pass-through operations
# unless pos / orn (or, conversely, bb_pos / bb_orn) is specified
if added_obj_attr.pos is not None or added_obj_attr.orn is not None:
new_obj.set_position_orientation(position=added_obj_attr.pos, orientation=added_obj_attr.orn)
elif isinstance(new_obj, DatasetObject) and \
(added_obj_attr.bb_pos is not None or added_obj_attr.bb_orn is not None):
new_obj.set_bbox_center_position_orientation(position=added_obj_attr.bb_pos,
orientation=added_obj_attr.bb_orn)
else:
raise ValueError("Expected at least one of pos, orn, bb_pos, or bb_orn to be specified in ObjectAttrs!")
# Additionally record any requested states if specified to be updated during the next transition step
if added_obj_attr.states is not None or added_obj_attr.callback is not None:
cls._INIT_INFO[new_obj] = {
"states": added_obj_attr.states,
"callback": added_obj_attr.callback,
}
@classmethod
def clear(cls):
"""
Clears any internal state when the simulator is restarted (e.g.: when a new stage is opened)
"""
global RULES_REGISTRY
# Clear internal dictionaries
cls.ACTIVE_RULES = set()
cls._INIT_INFO = dict()
class ObjectCandidateFilter(metaclass=ABCMeta):
"""
Defines a filter to apply for inferring which objects are valid candidates for checking a transition rule's
condition requirements.
NOTE: These filters should describe STATIC properties about an object -- i.e.: properties that should NOT change
at runtime, once imported
"""
@abstractmethod
def __call__(self, obj):
"""Returns true if the given object passes the filter."""
return False
class CategoryFilter(ObjectCandidateFilter):
"""Filter for object categories."""
def __init__(self, category):
self.category = category
def __call__(self, obj):
return obj.category == self.category
class AbilityFilter(ObjectCandidateFilter):
"""Filter for object abilities."""
def __init__(self, ability):
self.ability = ability
def __call__(self, obj):
return self.ability in obj._abilities
class NameFilter(ObjectCandidateFilter):
"""Filter for object names."""
def __init__(self, name):
self.name = name
def __call__(self, obj):
return self.name in obj.name
class NotFilter(ObjectCandidateFilter):
"""Logical-not of a filter"""
def __init__(self, f):
self.f = f
def __call__(self, obj):
return not self.f(obj)
class OrFilter(ObjectCandidateFilter):
"""Logical-or of a set of filters."""
def __init__(self, filters):
self.filters = filters
def __call__(self, obj):
return any(f(obj) for f in self.filters)
class AndFilter(ObjectCandidateFilter):
"""Logical-and of a set of filters."""
def __init__(self, filters):
self.filters = filters
def __call__(self, obj):
return all(f(obj) for f in self.filters)
class RuleCondition:
"""
Defines a transition rule condition for filtering a given set of input object candidates.
NOTE: These filters should describe DYNAMIC properties about object candidates -- i.e.: properties that MAY change
at runtime, once imported
"""
def refresh(self, object_candidates):
"""
Refreshes any internal state for this rule condition, given set of input object candidates @object_candidates
Args:
object_candidates (dict): Maps filter name to valid object(s) that satisfy that filter
"""
# No-op by default
pass
@abstractmethod
def __call__(self, object_candidates):
"""
Filters @object_candidates and updates the candidates in-place, returning True if there are still valid
candidates
Args:
object_candidates (dict): Maps filter name to valid object(s) that satisfy that filter
Returns:
bool: Whether there are still valid candidates in @object_candidates
"""
# Default is False
return False
@property
def modifies_filter_names(self):
"""
Returns:
set: Filter name(s) whose values may be modified in-place by this condition
"""
raise NotImplementedError
class TouchingAnyCondition(RuleCondition):
"""
Rule condition that prunes object candidates from @filter_1_name, only keeping any that are touching any object
from @filter_2_name
"""
def __init__(self, filter_1_name, filter_2_name):
"""
Args:
filter_1_name (str): Name of the filter whose object candidates will be pruned based on whether or not
they are touching any object from @filter_2_name
filter_2_name (str): Name of the filter whose object candidates will be used to prune the candidates from
@filter_1_name
"""
self._filter_1_name = filter_1_name
self._filter_2_name = filter_2_name
# Will be filled in during self.initialize
# Maps object to the list of rigid body idxs in the global contact matrix corresponding to filter 1
self._filter_1_idxs = None
# If optimized, filter_2_idxs will be used, otherwise filter_2_bodies will be used!
# Maps object to the list of rigid body idxs in the global contact matrix corresponding to filter 2
self._filter_2_idxs = None
# Maps object to set of rigid bodies corresponding to filter 2
self._filter_2_bodies = None
# Flag whether optimized call can be used
self._optimized = None
def refresh(self, object_candidates):
# Check whether we can use optimized computation or not -- this is determined by whether or not any objects
# in our collision set are kinematic only
self._optimized = not np.any([obj.kinematic_only or obj.prim_type == PrimType.CLOTH
for f in (self._filter_1_name, self._filter_2_name) for obj in object_candidates[f]])
if self._optimized:
# Register idx mappings
self._filter_1_idxs = {obj: [RigidContactAPI.get_body_row_idx(link.prim_path) for link in obj.links.values()]
for obj in object_candidates[self._filter_1_name]}
self._filter_2_idxs = {obj: [RigidContactAPI.get_body_col_idx(link.prim_path) for link in obj.links.values()]
for obj in object_candidates[self._filter_2_name]}
else:
# Register body mappings
self._filter_2_bodies = {obj: set(obj.links.values()) for obj in object_candidates[self._filter_2_name]}
def __call__(self, object_candidates):
# Keep any object that has non-zero impulses between itself and any of the @filter_2_name's objects
objs = []
if self._optimized:
# Get all impulses
impulses = RigidContactAPI.get_all_impulses()
idxs_to_check = np.concatenate([self._filter_2_idxs[obj] for obj in object_candidates[self._filter_2_name]])
# Batch check for each object
for obj in object_candidates[self._filter_1_name]:
if np.any(impulses[self._filter_1_idxs[obj]][:, idxs_to_check]):
objs.append(obj)
else:
# Manually check contact
filter_2_bodies = set.union(*(self._filter_2_bodies[obj] for obj in object_candidates[self._filter_2_name]))
for obj in object_candidates[self._filter_1_name]:
if len(obj.states[ContactBodies].get_value().intersection(filter_2_bodies)) > 0:
objs.append(obj)
# Update candidates
object_candidates[self._filter_1_name] = objs
# If objs is empty, return False, otherwise, True
return len(objs) > 0
@property
def modifies_filter_names(self):
# Only modifies values from filter 1
return {self._filter_1_name}
class StateCondition(RuleCondition):
"""
Rule condition that checks all objects from @filter_name whether a state condition is equal to @val for
"""
def __init__(
self,
filter_name,
state,
val,
op=operator.eq,
):
"""
Args:
filter_name (str): Name of the filter whose object candidates will be pruned based on whether or not
the state @state's value is equal to @val
state (BaseObjectState): Object state whose value should be queried as a rule condition
val (any): The value @state should be in order for this condition to be satisfied
op (function): Binary operator to apply between @state's getter and @val. Default is operator.eq,
which does state.get_value() == val.
Expected signature:
def op(state_getter, val) --> bool
"""
self._filter_name = filter_name
self._state = state
self._val = val
self._op = op
def __call__(self, object_candidates):
# Keep any object whose states are satisfied
object_candidates[self._filter_name] = \
[obj for obj in object_candidates[self._filter_name] if self._op(obj.states[self._state].get_value(), self._val)]
# Condition met if any object meets the condition
return len(object_candidates[self._filter_name]) > 0
@property
def modifies_filter_names(self):
return {self._filter_name}
class ChangeConditionWrapper(RuleCondition):
"""
Rule condition wrapper that checks whether the output from @condition
"""
def __init__(
self,
condition,
):
"""
Args:
condition (RuleCondition): Condition whose output will be additionally filtered whether or not its relevant
values have changed since the previous time this condition was called
"""
self._condition = condition
self._last_valid_candidates = {filter_name: set() for filter_name in self.modifies_filter_names}
def refresh(self, object_candidates):
# Refresh nested condition
self._condition.refresh(object_candidates=object_candidates)
def __call__(self, object_candidates):
# Call wrapped method first
valid = self._condition(object_candidates=object_candidates)
# Iterate over all current candidates -- if there's a mismatch in last valid candidates and current,
# then we store it, otherwise, we don't
for filter_name in self.modifies_filter_names:
# Compute current valid candidates
objs = [obj for obj in object_candidates[filter_name] if obj not in self._last_valid_candidates[filter_name]]
# Store last valid objects -- these are all candidates that were validated by self._condition at the
# current timestep
self._last_valid_candidates[filter_name] = set(object_candidates[filter_name])
# Update current object candidates with the change-filtered ones
object_candidates[filter_name] = objs
valid = valid and len(objs) > 0
# Valid if any object conditions have changed and we still have valid objects
return valid
@property
def modifies_filter_names(self):
# Return wrapped names
return self._condition.modifies_filter_names
class OrConditionWrapper(RuleCondition):
"""
Logical OR between multiple RuleConditions
"""
def __init__(self, conditions):
"""
Args:
conditions (list of RuleConditions): Conditions to take logical OR over. This will generate
the UNION of all candidates.
"""
self._conditions = conditions
def refresh(self, object_candidates):
# Refresh nested conditions
for condition in self._conditions:
condition.refresh(object_candidates=object_candidates)
def __call__(self, object_candidates):
# Iterate over all conditions and aggregate their results
pruned_candidates = dict()
for condition in self._conditions:
# Copy the candidates because they get modified in place
pruned_candidates[condition] = copy(object_candidates)
condition(object_candidates=pruned_candidates[condition])
# For each filter, take the union over object candidates across each condition.
# If the result is empty, we immediately return False.
for filter_name in object_candidates:
object_candidates[filter_name] = \
list(set.union(*[set(candidates[filter_name]) for candidates in pruned_candidates.values()]))
if len(object_candidates[filter_name]) == 0:
return False
return True
@property
def modifies_filter_names(self):
# Return all wrapped names
return set.union(*(condition.modifies_filter_names for condition in self._conditions))
class AndConditionWrapper(RuleCondition):
"""
Logical AND between multiple RuleConditions
"""
def __init__(self, conditions):
"""
Args:
conditions (list of RuleConditions): Conditions to take logical AND over. This will generate
the INTERSECTION of all candidates.
"""
self._conditions = conditions
def refresh(self, object_candidates):
# Refresh nested conditions
for condition in self._conditions:
condition.refresh(object_candidates=object_candidates)
def __call__(self, object_candidates):
# Iterate over all conditions and aggregate their results
pruned_candidates = dict()
for condition in self._conditions:
# Copy the candidates because they get modified in place
pruned_candidates[condition] = copy(object_candidates)
condition(object_candidates=pruned_candidates[condition])
# For each filter, take the intersection over object candidates across each condition.
# If the result is empty, we immediately return False.
for filter_name in object_candidates:
object_candidates[filter_name] = \
list(set.intersection(*[set(candidates[filter_name]) for candidates in pruned_candidates.values()]))
if len(object_candidates[filter_name]) == 0:
return False
return True
@property
def modifies_filter_names(self):
# Return all wrapped names
return set.union(*(condition.modifies_filter_names for condition in self._conditions))
class BaseTransitionRule(Registerable):
"""
Defines a set of categories of objects and how to transition their states.
"""
conditions = None
candidates = None
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
# Register this system, and
# make sure at least one filter is specified -- in general, there should never be a rule
# where no filter is specified
# Only run this check for actual rules that are being registered
if cls.__name__ not in cls._do_not_register_classes:
global RULES_REGISTRY
RULES_REGISTRY.add(obj=cls)
assert len(cls.candidate_filters) > 0, \
"At least one of individual_filters or group_filters must be specified!"
# Store conditions
cls.conditions = cls._generate_conditions()
@classproperty
def candidate_filters(cls):
"""
Object candidate filters that this transition rule cares about.
For each name, filter key-value pair, the global transition rule step will produce a
single dictionary of valid filtered objects.
For example, if the group filters are:
{"apple": CategoryFilter("apple"), "knife": CategoryFilter("knife")},
the transition rule step will produce the following dictionary:
{"apple": [apple0, apple1, ...], "knife": [knife0, knife1, ...]}
based on the current instances of each object type in the scene and pass them to conditions in @self.conditions
NOTE: There should always be at least one filter applied for every rule!
Returns:
dict: Maps filter name to filter for inferring valid object candidates for this transition rule
"""
raise NotImplementedError
@classmethod
def _generate_conditions(cls):
"""
Generates rule condition(s)s for this transition rule. These conditions are used to prune object
candidates at runtime, to determine whether a transition rule should occur at the given timestep
Returns:
list of RuleCondition: Condition(s) to enforce to determine whether a transition rule should occur
"""
raise NotImplementedError
@classmethod
def get_object_candidates(cls, objects):
"""
Given the set of objects @objects, compute the valid object candidate combinations that may be valid for
this TransitionRule
Args:
objects (list of BaseObject): Objects to filter for valid transition rule candidates
Returns:
dict: Maps filter name to valid object(s) that satisfy that filter
"""
# Iterate over all objects and add to dictionary if valid
filters = cls.candidate_filters
obj_dict = {filter_name: [] for filter_name in filters.keys()}
for obj in objects:
for fname, f in filters.items():
if f(obj):
obj_dict[fname].append(obj)
return obj_dict
@classmethod
def refresh(cls, object_candidates):
"""
Refresh any internal state for this rule, given set of input object candidates @object_candidates
Args:
object_candidates (dict): Maps filter name to valid object(s) that satisfy that filter
"""
# Store candidates
cls.candidates = object_candidates
# Refresh all conditions
for condition in cls.conditions:
condition.refresh(object_candidates=object_candidates)
@classmethod
def transition(cls, object_candidates):
"""
Rule to apply for each set of objects satisfying the condition.
Args:
object_candidates (dict): Dictionary mapping corresponding keys from @cls.filters to list of individual
object instances where the filter is satisfied
Returns:
TransitionResults: results from the executed transition
"""
raise NotImplementedError()
@classmethod
def step(cls):
"""
Takes a step for this transition rule, checking if all of @cls.conditions are satisified, and if so, taking
a transition via @cls.transition()
Returns:
None or TransitionResults: If a transition occurs, returns its results, otherwise, returns None
"""
# Copy the candidates dictionary since it may be mutated in place by @conditions
object_candidates = {filter_name: candidates.copy() for filter_name, candidates in cls.candidates.items()}
for condition in cls.conditions:
if not condition(object_candidates=object_candidates):
# Condition was not met, so immediately terminate
return
# All conditions are met, take the transition
return cls.transition(object_candidates=object_candidates)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseTransitionRule")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_RULES
return REGISTERED_RULES
# Global dicts that will contain mappings. Must be placed here immediately AFTER BaseTransitionRule!
RULES_REGISTRY = Registry(
name="TransitionRuleRegistry",
class_types=BaseTransitionRule,
default_key="__name__",
)
class WasherDryerRule(BaseTransitionRule):
"""
Transition rule to apply to cloth washers and dryers.
"""
@classmethod
def _generate_conditions(cls):
assert len(cls.candidate_filters.keys()) == 1
machine_type = list(cls.candidate_filters.keys())[0]
return [ChangeConditionWrapper(
condition=AndConditionWrapper(conditions=[
StateCondition(filter_name=machine_type, state=ToggledOn, val=True, op=operator.eq),
StateCondition(filter_name=machine_type, state=Open, val=False, op=operator.eq),
])
)]
@classmethod
def _compute_global_rule_info(cls):
"""
Helper function to compute global information necessary for checking rules. This is executed exactly
once per cls.transition() step
Returns:
dict: Keyword-mapped global rule information
"""
# Compute all obj
obj_positions = np.array([obj.aabb_center for obj in og.sim.scene.objects])
return dict(obj_positions=obj_positions)
@classmethod
def _compute_container_info(cls, object_candidates, container, global_info):
"""
Helper function to compute container-specific information necessary for checking rules. This is executed once
per container per cls.transition() step
Args:
object_candidates (dict): Dictionary mapping corresponding keys from @cls.filters to list of individual
object instances where the filter is satisfied
container (StatefulObject): Relevant container object for computing information
global_info (dict): Output of @cls._compute_global_rule_info(); global information which may be
relevant for computing container information
Returns:
dict: Keyword-mapped container information
"""
del object_candidates
obj_positions = global_info["obj_positions"]
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions)
in_volume_objs = list(np.array(og.sim.scene.objects)[in_volume])
# Remove the container itself
if container in in_volume_objs:
in_volume_objs.remove(container)
return dict(in_volume_objs=in_volume_objs)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("WasherDryerRule")
return classes
class WasherRule(WasherDryerRule):
"""
Transition rule to apply to cloth washers.
1. remove "dirty" particles from the washer if the necessary solvent is present.
2. wet the objects inside by making them either Saturated with or Covered by water.
"""
cleaning_conditions = None
@classmethod
def register_cleaning_conditions(cls, conditions):
"""
Register cleaning conditions for this rule.
Args:
conditions (dict): ictionary mapping the system name (str) to None or list of system names (str). None
represents "never", empty list represents "always", or non-empty list represents at least one of the
systems in the list needs to be present in the washer for the key system to be removed.
E.g. "rust" -> None: "never remove rust from the washer"
E.g. "dust" -> []: "always remove dust from the washer"
E.g. "cooking_oil" -> ["sodium_carbonate", "vinegar"]: "remove cooking_oil from the washer if either
sodium_carbonate or vinegar is present"
For keys not present in the dictionary, the default is []: "always remove"
"""
cls.cleaning_conditions = conditions
@classproperty
def candidate_filters(cls):
return {
"washer": CategoryFilter("washer"),
}
@classmethod
def transition(cls, object_candidates):
water = get_system("water")
global_info = cls._compute_global_rule_info()
for washer in object_candidates["washer"]:
# Remove the systems if the conditions are met
systems_to_remove = []
for system in ParticleRemover.supported_active_systems.values():
# Never remove
if system.name in cls.cleaning_conditions and cls.cleaning_conditions[system.name] is None:
continue
if not washer.states[Contains].get_value(system):
continue
solvents = cls.cleaning_conditions.get(system.name, [])
# Always remove
if len(solvents) == 0:
systems_to_remove.append(system)
else:
solvents = [get_system(solvent) for solvent in solvents if is_system_active(solvent)]
# If any of the solvents are present
if any(washer.states[Contains].get_value(solvent) for solvent in solvents):
systems_to_remove.append(system)
for system in systems_to_remove:
washer.states[Contains].set_value(system, False)
# Make the objects wet
container_info = cls._compute_container_info(object_candidates=object_candidates, container=washer, global_info=global_info)
in_volume_objs = container_info["in_volume_objs"]
for obj in in_volume_objs:
if Saturated in obj.states:
obj.states[Saturated].set_value(water, True)
else:
obj.states[Covered].set_value(water, True)
return TransitionResults(add=[], remove=[])
class DryerRule(WasherDryerRule):
"""
Transition rule to apply to cloth dryers.
1. dry the objects inside by making them not Saturated with water.
2. remove all water from the dryer.
"""
@classproperty
def candidate_filters(cls):
return {
"dryer": CategoryFilter("clothes_dryer"),
}
@classmethod
def transition(cls, object_candidates):
water = get_system("water")
global_info = cls._compute_global_rule_info()
for dryer in object_candidates["dryer"]:
container_info = cls._compute_container_info(object_candidates=object_candidates, container=dryer, global_info=global_info)
in_volume_objs = container_info["in_volume_objs"]
for obj in in_volume_objs:
if Saturated in obj.states:
obj.states[Saturated].set_value(water, False)
dryer.states[Contains].set_value(water, False)
return TransitionResults(add=[], remove=[])
class SlicingRule(BaseTransitionRule):
"""
Transition rule to apply to sliced / slicer object pairs.
"""
@classproperty
def candidate_filters(cls):
return {
"sliceable": AbilityFilter("sliceable"),
"slicer": AbilityFilter("slicer"),
}
@classmethod
def _generate_conditions(cls):
# sliceables should be touching any slicer
return [TouchingAnyCondition(filter_1_name="sliceable", filter_2_name="slicer"),
StateCondition(filter_name="slicer", state=SlicerActive, val=True, op=operator.eq)]
@classmethod
def transition(cls, object_candidates):
objs_to_add, objs_to_remove = [], []
for sliceable_obj in object_candidates["sliceable"]:
# Object parts offset annotation are w.r.t the base link of the whole object.
pos, orn = sliceable_obj.get_position_orientation()
# Load object parts
for i, part in enumerate(sliceable_obj.metadata["object_parts"].values()):
# List of dicts gets replaced by {'0':dict, '1':dict, ...}
# Get bounding box info
part_bb_pos = np.array(part["bb_pos"])
part_bb_orn = np.array(part["bb_orn"])
# Determine the relative scale to apply to the object part from the original object
# Note that proper (rotated) scaling can only be applied when the relative orientation of
# the object part is a multiple of 90 degrees wrt the parent object, so we assert that here
assert T.check_quat_right_angle(part_bb_orn), "Sliceable objects should only have relative object part orientations that are factors of 90 degrees!"
# Scale the offset accordingly.
scale = np.abs(T.quat2mat(part_bb_orn) @ sliceable_obj.scale)
# Calculate global part bounding box pose.
part_bb_pos = pos + T.quat2mat(orn) @ (part_bb_pos * scale)
part_bb_orn = T.quat_multiply(orn, part_bb_orn)
part_obj_name = f"half_{sliceable_obj.name}_{i}"
part_obj = DatasetObject(
name=part_obj_name,
category=part["category"],
model=part["model"],
bounding_box=part["bb_size"] * scale, # equiv. to scale=(part["bb_size"] / self.native_bbox) * (scale)
)
sliceable_obj_state = sliceable_obj.dump_state()
# Propagate non-physical states of the whole object to the half objects, e.g. cooked, saturated, etc.
# Add the new object to the results.
new_obj_attrs = ObjectAttrs(
obj=part_obj,
bb_pos=part_bb_pos,
bb_orn=part_bb_orn,
callback=lambda obj: obj.load_non_kin_state(sliceable_obj_state),
)
objs_to_add.append(new_obj_attrs)
# Delete original object from stage.
objs_to_remove.append(sliceable_obj)
return TransitionResults(add=objs_to_add, remove=objs_to_remove)
class DicingRule(BaseTransitionRule):
"""
Transition rule to apply to diceable / slicer object pairs.
"""
@classproperty
def candidate_filters(cls):
return {
"diceable": AbilityFilter("diceable"),
"slicer": AbilityFilter("slicer"),
}
@classmethod
def _generate_conditions(cls):
# sliceables should be touching any slicer
return [TouchingAnyCondition(filter_1_name="diceable", filter_2_name="slicer"),
StateCondition(filter_name="slicer", state=SlicerActive, val=True, op=operator.eq)]
@classmethod
def transition(cls, object_candidates):
objs_to_remove = []
for diceable_obj in object_candidates["diceable"]:
obj_category = diceable_obj.category
# We expect all diced particle systems to follow the naming convention (cooked__)diced__<category>
system_name = "diced__" + diceable_obj.category.removeprefix("half_")
if Cooked in diceable_obj.states and diceable_obj.states[Cooked].get_value():
system_name = "cooked__" + system_name
system = get_system(system_name)
system.generate_particles_from_link(diceable_obj, diceable_obj.root_link, check_contact=False, use_visual_meshes=False)
# Delete original object from stage.
objs_to_remove.append(diceable_obj)
return TransitionResults(add=[], remove=objs_to_remove)
class MeltingRule(BaseTransitionRule):
"""
Transition rule to apply to meltable objects to simulate melting
Once the object reaches the melting temperature, remove the object and spawn the melted substance in its place.
"""
@classproperty
def candidate_filters(cls):
# We want to find all meltable objects
return {"meltable": AbilityFilter("meltable")}
@classmethod
def _generate_conditions(cls):
return [StateCondition(filter_name="meltable", state=MaxTemperature, val=m.MELTING_TEMPERATURE, op=operator.ge)]
@classmethod
def transition(cls, object_candidates):
objs_to_remove = []
# Convert the meltable object into its melted substance
for meltable_obj in object_candidates["meltable"]:
# All meltable xyz, half_xyz and diced__xyz transform into melted__xyz
root_category = meltable_obj.category.removeprefix("half_").removeprefix("diced__")
system_name = f"melted__{root_category}"
system = get_system(system_name)
system.generate_particles_from_link(meltable_obj, meltable_obj.root_link, check_contact=False, use_visual_meshes=False)
# Delete original object from stage.
objs_to_remove.append(meltable_obj)
return TransitionResults(add=[], remove=objs_to_remove)
class RecipeRule(BaseTransitionRule):
"""
Transition rule to approximate recipe-based transitions
"""
# Maps recipe name to recipe information
_RECIPES = None
# Maps active recipe name to recipe information
_ACTIVE_RECIPES = None
# Maps object category name to indices in the flattened object array for efficient computation
_CATEGORY_IDXS = None
# Flattened array of all simulator objects, sorted by category
_OBJECTS = None
# Maps object to idx within the _OBJECTS array
_OBJECTS_TO_IDX = None
def __init_subclass__(cls, **kwargs):
# Run super first
super().__init_subclass__(**kwargs)
# Initialize recipes
cls._RECIPES = dict()
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
input_states=None,
output_states=None,
fillable_categories=None,
**kwargs,
):
"""
Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform
into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
fillable_categories (None or set of str): If specified, set of fillable categories which are allowed
for this recipe. If None, any fillable is allowed
kwargs (dict): Any additional keyword-arguments to be stored as part of this recipe
"""
input_states = input_states if input_states is not None else defaultdict(lambda: defaultdict(list))
output_states = output_states if output_states is not None else defaultdict(lambda: defaultdict(list))
input_object_tree = None
if cls.is_multi_instance and len(input_objects) > 0:
# Build a tree of input object categories according to the kinematic binary states
# Example: 'raw_egg': {'binary_object': [(OnTop, 'bagel_dough', True)]} results in an edge
# from 'bagel_dough' to 'raw_egg', i.e. 'bagel_dough' is the parent of 'raw_egg'.
input_object_tree = nx.DiGraph()
for obj_category, state_checks in input_states.items():
for state_class, second_obj_category, state_value in state_checks["binary_object"]:
input_object_tree.add_edge(second_obj_category, obj_category)
if nx.is_empty(input_object_tree):
input_object_tree = None
else:
assert nx.is_tree(input_object_tree), f"Input object tree must be a tree! Now: {input_object_tree}."
root_nodes = [node for node in input_object_tree.nodes() if input_object_tree.in_degree(node) == 0]
assert len(root_nodes) == 1, f"Input object tree must have exactly one root node! Now: {root_nodes}."
assert input_objects[root_nodes[0]] == 1, f"Input object tree root node must have exactly one instance! Now: {cls._RECIPES[name]['input_objects'][root_nodes[0]]}."
# Store information for this recipe
cls._RECIPES[name] = {
"name": name,
"input_objects": input_objects,
"input_systems": input_systems,
"output_objects": output_objects,
"output_systems": output_systems,
"input_states": input_states,
"output_states": output_states,
"fillable_categories": fillable_categories,
"input_object_tree": input_object_tree,
**kwargs,
}
@classmethod
def _validate_recipe_container_is_valid(cls, recipe, container):
"""
Validates that @container's category satisfies @recipe's fillable_categories
Args:
recipe (dict): Recipe whose fillable_categories should be checked against @container
container (StatefulObject): Container whose category should match one of @recipe's fillable_categories,
if specified
Returns:
bool: True if @container is valid, else False
"""
fillable_categories = recipe["fillable_categories"]
return fillable_categories is None or container.category in fillable_categories
@classmethod
def _validate_recipe_systems_are_contained(cls, recipe, container):
"""
Validates whether @recipe's input_systems are all contained in @container or not
Args:
recipe (dict): Recipe whose systems should be checked
container (BaseObject): Container object that should contain all of @recipe's input systems
Returns:
bool: True if all the input systems are contained
"""
for system_name in recipe["input_systems"]:
system = get_system(system_name=system_name)
if not container.states[Contains].get_value(system=system):
return False
return True
@classmethod
def _validate_nonrecipe_systems_not_contained(cls, recipe, container):
"""
Validates whether all systems not relevant to @recipe are not contained in @container
Args:
recipe (dict): Recipe whose systems should be checked
container (BaseObject): Container object that should contain all of @recipe's input systems
Returns:
bool: True if none of the non-relevant systems are contained
"""
for system in og.sim.scene.system_registry.objects:
# Skip cloth system
if system.name == "cloth":
continue
if system.name not in recipe["input_systems"] and container.states[Contains].get_value(system=system):
return False
return True
@classmethod
def _validate_recipe_objects_are_contained_and_states_satisfied(cls, recipe, container_info):
"""
Validates whether @recipe's input_objects are contained in the container and whether their states are satisfied
Args:
recipe (dict): Recipe whose objects should be checked
container_info (dict): Output of @cls._compute_container_info(); container-specific information which may
be relevant for computing whether recipe is executable. This will be populated with execution info.
Returns:
bool: True if all the input object quantities are contained
"""
in_volume = container_info["in_volume"]
# Store necessary information for execution
container_info["execution_info"] = dict()
category_to_valid_indices = cls._filter_input_objects_by_unary_and_binary_system_states(recipe=recipe)
container_info["execution_info"]["category_to_valid_indices"] = category_to_valid_indices
if not cls.is_multi_instance:
return cls._validate_recipe_objects_non_multi_instance(
recipe=recipe, category_to_valid_indices=category_to_valid_indices, in_volume=in_volume,
)
else:
return cls._validate_recipe_objects_multi_instance(
recipe=recipe, category_to_valid_indices=category_to_valid_indices, container_info=container_info,
)
@classmethod
def _filter_input_objects_by_unary_and_binary_system_states(cls, recipe):
# Filter input objects based on a subset of input states (unary states and binary system states)
# Map object categories (str) to valid indices (np.ndarray)
category_to_valid_indices = dict()
for obj_category in recipe["input_objects"]:
if obj_category not in recipe["input_states"]:
# If there are no input states, all objects of this category are valid
category_to_valid_indices[obj_category] = cls._CATEGORY_IDXS[obj_category]
else:
category_to_valid_indices[obj_category] = []
for idx in cls._CATEGORY_IDXS[obj_category]:
obj = cls._OBJECTS[idx]
success = True
# Check if unary states are satisfied
for state_class, state_value in recipe["input_states"][obj_category]["unary"]:
if obj.states[state_class].get_value() != state_value:
success = False
break
if not success:
continue
# Check if binary system states are satisfied
for state_class, system_name, state_value in recipe["input_states"][obj_category]["binary_system"]:
if obj.states[state_class].get_value(system=get_system(system_name)) != state_value:
success = False
break
if not success:
continue
category_to_valid_indices[obj_category].append(idx)
# Convert to numpy array for faster indexing
category_to_valid_indices[obj_category] = np.array(category_to_valid_indices[obj_category], dtype=int)
return category_to_valid_indices
@classmethod
def _validate_recipe_objects_non_multi_instance(cls, recipe, category_to_valid_indices, in_volume):
# Check if sufficiently number of objects are contained
for obj_category, obj_quantity in recipe["input_objects"].items():
if np.sum(in_volume[category_to_valid_indices[obj_category]]) < obj_quantity:
return False
return True
@classmethod
def _validate_recipe_objects_multi_instance(cls, recipe, category_to_valid_indices, container_info):
in_volume = container_info["in_volume"]
input_object_tree = recipe["input_object_tree"]
# Map object category to a set of objects that are used in this execution
relevant_objects = defaultdict(set)
# Map system name to a set of particle indices that are used in this execution
relevant_systems = defaultdict(set)
# Number of instances of this recipe that can be produced
num_instances = 0
# Define a recursive function to check the kinematic tree
def check_kinematic_tree(obj, should_check_in_volume=False):
"""
Recursively check if the kinematic tree is satisfied.
Return True/False, and a set of objects that belong to the subtree rooted at the current node
Args:
obj (BaseObject): Subtree root node to check
should_check_in_volume (bool): Whether to check if the object is in the volume or not
Returns:
bool: True if the subtree rooted at the current node is satisfied
set: Set of objects that belong to the subtree rooted at the current node
"""
# Check if obj is in volume
if should_check_in_volume and not in_volume[cls._OBJECTS_TO_IDX[obj]]:
return False, set()
# If the object is a leaf node, return True and the set containing the object
if input_object_tree.out_degree(obj.category) == 0:
return True, set([obj])
children_categories = list(input_object_tree.successors(obj.category))
all_subtree_objs = set()
for child_cat in children_categories:
assert len(input_states[child_cat]["binary_object"]) == 1, \
"Each child node should have exactly one binary object state, i.e. one parent in the input_object_tree"
state_class, _, state_value = input_states[child_cat]["binary_object"][0]
num_valid_children = 0
children_objs = cls._OBJECTS[category_to_valid_indices[child_cat]]
for child_obj in children_objs:
# If the child doesn't satisfy the binary object state, skip
if child_obj.states[state_class].get_value(obj) != state_value:
continue
# Recursively check if the subtree rooted at the child is valid
subtree_valid, subtree_objs = check_kinematic_tree(child_obj)
# If the subtree is valid, increment the number of valid children and aggregate the objects
if subtree_valid:
num_valid_children += 1
all_subtree_objs |= subtree_objs
# If there are not enough valid children, return False
if num_valid_children < recipe["input_objects"][child_cat]:
return False, set()
# If all children categories have sufficient number of objects that satisfy the binary object state,
# e.g. five pieces of pepperoni and two pieces of basil on the pizza, the subtree rooted at the
# current node is valid. Return True and the set of objects in the subtree (all descendants plus
# the current node)
return True, all_subtree_objs | {obj}
# If multi-instance is True but doesn't require kinematic states between objects
if input_object_tree is None:
num_instances = np.inf
# Compute how many instances of this recipe can be produced.
# Example: if a recipe requires 1 apple and 2 bananas, and there are 3 apples and 4 bananas in the
# container, then 2 instance of the recipe can be produced.
for obj_category, obj_quantity in recipe["input_objects"].items():
quantity_in_volume = np.sum(in_volume[category_to_valid_indices[obj_category]])
num_inst = quantity_in_volume // obj_quantity
if num_inst < 1:
return False
num_instances = min(num_instances, num_inst)
# If at least one instance of the recipe can be executed, add all valid objects to be relevant_objects.
# This can be considered as a special case of below where there are no binary kinematic states required.
for obj_category in recipe["input_objects"]:
relevant_objects[obj_category] = set(cls._OBJECTS[category_to_valid_indices[obj_category]])
# If multi-instance is True and requires kinematic states between objects
else:
root_node_category = [node for node in input_object_tree.nodes()
if input_object_tree.in_degree(node) == 0][0]
# A list of objects belonging to the root node category
root_nodes = cls._OBJECTS[category_to_valid_indices[root_node_category]]
input_states = recipe["input_states"]
for root_node in root_nodes:
# should_check_in_volume is True only for the root nodes.
# Example: the bagel dough needs to be in_volume of the container, but the raw egg on top doesn't.
tree_valid, relevant_object_set = check_kinematic_tree(obj=root_node, should_check_in_volume=True)
if tree_valid:
# For each valid tree, increment the number of instances and aggregate the objects
num_instances += 1
for obj in relevant_object_set:
relevant_objects[obj.category].add(obj)
# If there are no valid trees, return False
if num_instances == 0:
return False
# Note that for multi instance recipes, the relevant system particles are NOT the ones in the container.
# Instead, they are the ones that are related to the relevant objects, e.g. salt covering the bagel dough.
for obj_category, objs in relevant_objects.items():
for state_class, system_name, state_value in recipe["input_states"][obj_category]["binary_system"]:
# If the state value is False, skip
if not state_value:
continue
for obj in objs:
if state_class in [Filled, Contains]:
contained_particle_idx = obj.states[ContainedParticles].get_value(get_system(system_name)).in_volume.nonzero()[0]
relevant_systems[system_name] |= contained_particle_idx
elif state_class in [Covered]:
covered_particle_idx = obj.states[ContactParticles].get_value(get_system(system_name))
relevant_systems[system_name] |= covered_particle_idx
# Now we populate the execution info with the relevant objects and systems as well as the number of
# instances of the recipe that can be produced.
container_info["execution_info"]["relevant_objects"] = relevant_objects
container_info["execution_info"]["relevant_systems"] = relevant_systems
container_info["execution_info"]["num_instances"] = num_instances
return True
@classmethod
def _validate_nonrecipe_objects_not_contained(cls, recipe, container_info):
"""
Validates whether all objects not relevant to @recipe are not contained in the container
represented by @in_volume
Args:
recipe (dict): Recipe whose systems should be checked
container_info (dict): Output of @cls._compute_container_info(); container-specific information
which may be relevant for computing whether recipe is executable
Returns:
bool: True if none of the non-relevant objects are contained
"""
in_volume = container_info["in_volume"]
# These are object indices whose objects satisfy the input states
category_to_valid_indices = container_info["execution_info"]["category_to_valid_indices"]
nonrecipe_objects_in_volume = in_volume if len(recipe["input_objects"]) == 0 else \
np.delete(in_volume, np.concatenate([category_to_valid_indices[obj_category]
for obj_category in category_to_valid_indices]))
return not np.any(nonrecipe_objects_in_volume)
@classmethod
def _validate_recipe_systems_exist(cls, recipe):
"""
Validates whether @recipe's input_systems are all active or not
Args:
recipe (dict): Recipe whose systems should be checked
Returns:
bool: True if all the input systems are active
"""
for system_name in recipe["input_systems"]:
if not is_system_active(system_name=system_name):
return False
return True
@classmethod
def _validate_recipe_objects_exist(cls, recipe):
"""
Validates whether @recipe's input_objects exist in the current scene or not
Args:
recipe (dict): Recipe whose objects should be checked
Returns:
bool: True if all the input objects exist in the scene
"""
for obj_category, obj_quantity in recipe["input_objects"].items():
if len(og.sim.scene.object_registry("category", obj_category, default_val=set())) < obj_quantity:
return False
return True
@classmethod
def _validate_recipe_fillables_exist(cls, recipe):
"""
Validates that recipe @recipe's necessary fillable categorie(s) exist in the current scene
Args:
recipe (dict): Recipe whose fillable categories should be checked
Returns:
bool: True if there is at least a single valid fillable category in the current scene, else False
"""
fillable_categories = recipe["fillable_categories"]
if fillable_categories is None:
# Any is valid
return True
# Otherwise, at least one valid type must exist
for category in fillable_categories:
if len(og.sim.scene.object_registry("category", category, default_val=set())) > 0:
return True
# None found, return False
return False
@classmethod
def _is_recipe_active(cls, recipe):
"""
Helper function to determine whether a given recipe @recipe should be actively checked for or not.
Args:
recipe (dict): Maps relevant keyword to corresponding recipe info
Returns:
bool: True if the recipe is active, else False
"""
# Check valid active systems
if not cls._validate_recipe_systems_exist(recipe=recipe):
return False
# Check valid object quantities
if not cls._validate_recipe_objects_exist(recipe=recipe):
return False
# Check valid fillable categories
if not cls._validate_recipe_fillables_exist(recipe=recipe):
return False
return True
@classmethod
def _is_recipe_executable(cls, recipe, container, global_info, container_info):
"""
Helper function to determine whether a given recipe @recipe should be immediately executed or not.
Args:
recipe (dict): Maps relevant keyword to corresponding recipe info
container (StatefulObject): Container in which @recipe may be executed
global_info (dict): Output of @cls._compute_global_rule_info(); global information which may be
relevant for computing whether recipe is executable
container_info (dict): Output of @cls._compute_container_info(); container-specific information
which may be relevant for computing whether recipe is executable
Returns:
bool: True if the recipe is active, else False
"""
in_volume = container_info["in_volume"]
# Verify the container category is valid
if not cls._validate_recipe_container_is_valid(recipe=recipe, container=container):
return False
# Verify all required systems are contained in the container
if not cls.relax_recipe_systems and not cls._validate_recipe_systems_are_contained(recipe=recipe, container=container):
return False
# Verify all required object quantities are contained in the container and their states are satisfied
if not cls._validate_recipe_objects_are_contained_and_states_satisfied(recipe=recipe, container_info=container_info):
return False
# Verify no non-relevant system is contained
if not cls.ignore_nonrecipe_systems and not cls._validate_nonrecipe_systems_not_contained(recipe=recipe, container=container):
return False
# Verify no non-relevant object is contained if we're not ignoring them
if not cls.ignore_nonrecipe_objects and not cls._validate_nonrecipe_objects_not_contained(recipe=recipe, container_info=container_info):
return False
return True
@classmethod
def _compute_global_rule_info(cls):
"""
Helper function to compute global information necessary for checking rules. This is executed exactly
once per cls.transition() step
Returns:
dict: Keyword-mapped global rule information
"""
# Compute all relevant object AABB positions
obj_positions = np.array([obj.aabb_center for obj in cls._OBJECTS])
return dict(obj_positions=obj_positions)
@classmethod
def _compute_container_info(cls, object_candidates, container, global_info):
"""
Helper function to compute container-specific information necessary for checking rules. This is executed once
per container per cls.transition() step
Args:
object_candidates (dict): Dictionary mapping corresponding keys from @cls.filters to list of individual
object instances where the filter is satisfied
container (StatefulObject): Relevant container object for computing information
global_info (dict): Output of @cls._compute_global_rule_info(); global information which may be
relevant for computing container information
Returns:
dict: Keyword-mapped container information
"""
del object_candidates
obj_positions = global_info["obj_positions"]
# Compute in volume for all relevant object positions
# We check for either the object AABB being contained OR the object being on top of the container, in the
# case that the container is too flat for the volume to contain the object
in_volume = container.states[ContainedParticles].check_in_volume(obj_positions) | \
np.array([obj.states[OnTop].get_value(container) for obj in cls._OBJECTS])
# Container itself is never within its own volume
in_volume[cls._OBJECTS_TO_IDX[container]] = False
return dict(in_volume=in_volume)
@classmethod
def refresh(cls, object_candidates):
# Run super first
super().refresh(object_candidates=object_candidates)
# Cache active recipes given the current set of objects
cls._ACTIVE_RECIPES = dict()
cls._CATEGORY_IDXS = dict()
cls._OBJECTS = []
cls._OBJECTS_TO_IDX = dict()
# Prune any recipes whose objects / system requirements are not met by the current set of objects / systems
objects_by_category = og.sim.scene.object_registry.get_dict("category")
for name, recipe in cls._RECIPES.items():
# If all pre-requisites met, add to active recipes
if cls._is_recipe_active(recipe=recipe):
cls._ACTIVE_RECIPES[name] = recipe
# Finally, compute relevant objects and category mapping based on relevant categories
i = 0
for category, objects in objects_by_category.items():
cls._CATEGORY_IDXS[category] = i + np.arange(len(objects))
cls._OBJECTS += list(objects)
for obj in objects:
cls._OBJECTS_TO_IDX[obj] = i
i += 1
# Wrap relevant objects as numpy array so we can index into it efficiently
cls._OBJECTS = np.array(cls._OBJECTS)
@classproperty
def candidate_filters(cls):
# Fillable object required
return {"container": AbilityFilter(ability="fillable")}
@classmethod
def transition(cls, object_candidates):
objs_to_add, objs_to_remove = [], []
# Compute global info
global_info = cls._compute_global_rule_info()
# Iterate over all fillable objects, to execute recipes for each one
for container in object_candidates["container"]:
recipe_results = None
# Compute container info
container_info = cls._compute_container_info(
object_candidates=object_candidates,
container=container,
global_info=global_info,
)
# Check every recipe to find if any is valid
for name, recipe in cls._ACTIVE_RECIPES.items():
if cls._is_recipe_executable(recipe=recipe, container=container, global_info=global_info, container_info=container_info):
# Otherwise, all conditions met, we found a valid recipe and so we execute and terminate early
og.log.info(f"Executing recipe: {name} in container {container.name}!")
# Take the transform and terminate early
recipe_results = cls._execute_recipe(
container=container,
recipe=recipe,
container_info=container_info,
)
objs_to_add += recipe_results.add
objs_to_remove += recipe_results.remove
break
# Otherwise, if we didn't find a valid recipe, we execute a garbage transition instead if requested
if recipe_results is None and cls.use_garbage_fallback_recipe:
og.log.info(f"Did not find a valid recipe for rule {cls.__name__}; generating {m.DEFAULT_GARBAGE_SYSTEM} in {container.name}!")
# Generate garbage fluid
garbage_results = cls._execute_recipe(
container=container,
recipe=dict(
name="garbage",
input_objects=dict(),
input_systems=[],
output_objects=dict(),
output_systems=[m.DEFAULT_GARBAGE_SYSTEM],
output_states=defaultdict(lambda: defaultdict(list)),
),
container_info=container_info,
)
objs_to_add += garbage_results.add
objs_to_remove += garbage_results.remove
return TransitionResults(add=objs_to_add, remove=objs_to_remove)
@classmethod
def _execute_recipe(cls, container, recipe, container_info):
"""
Transforms all items contained in @container into @output_system, generating volume of @output_system
proportional to the number of items transformed.
Args:
container (BaseObject): Container object which will have its contained elements transformed into
@output_system
recipe (dict): Recipe to execute. Should include, at the minimum, "input_objects", "input_systems",
"output_objects", and "output_systems" keys
container_info (dict): Output of @cls._compute_container_info(); container-specific information which may
be relevant for computing whether recipe is executable.
Returns:
TransitionResults: Results of the executed recipe transition
"""
objs_to_add, objs_to_remove = [], []
in_volume = container_info["in_volume"]
if cls.is_multi_instance:
execution_info = container_info["execution_info"]
# Compute total volume of all contained items
volume = 0
if not cls.is_multi_instance:
# Remove either all systems or only the ones specified in the input systems of the recipe
contained_particles_state = container.states[ContainedParticles]
for system in PhysicalParticleSystem.get_active_systems().values():
if not cls.ignore_nonrecipe_systems or system.name in recipe["input_systems"]:
if container.states[Contains].get_value(system):
volume += contained_particles_state.get_value(system).n_in_volume * np.pi * (system.particle_radius ** 3) * 4 / 3
container.states[Contains].set_value(system, False)
for system in VisualParticleSystem.get_active_systems().values():
if not cls.ignore_nonrecipe_systems or system.name in recipe["input_systems"]:
if container.states[Contains].get_value(system):
container.states[Contains].set_value(system, False)
else:
# Remove the particles that are involved in this execution
for system_name, particle_idxs in execution_info["relevant_systems"].items():
system = get_system(system_name)
volume += len(particle_idxs) * np.pi * (system.particle_radius ** 3) * 4 / 3
system.remove_particles(idxs=np.array(list(particle_idxs)))
if not cls.is_multi_instance:
# Remove either all objects or only the ones specified in the input objects of the recipe
object_mask = in_volume.copy()
if cls.ignore_nonrecipe_objects:
object_category_mask = np.zeros_like(object_mask, dtype=bool)
for obj_category in recipe["input_objects"].keys():
object_category_mask[cls._CATEGORY_IDXS[obj_category]] = True
object_mask &= object_category_mask
objs_to_remove.extend(cls._OBJECTS[object_mask])
else:
# Remove the objects that are involved in this execution
for obj_category, objs in execution_info["relevant_objects"].items():
objs_to_remove.extend(objs)
volume += sum(obj.volume for obj in objs_to_remove)
# Define callback for spawning new objects inside container
def _spawn_object_in_container(obj):
# For simplicity sake, sample only OnTop
# TODO: Can we sample inside intelligently?
state = OnTop
# TODO: What to do if setter fails?
if not obj.states[state].set_value(container, True):
log.warning(f"Failed to spawn object {obj.name} in container {container.name}! Directly placing on top instead.")
pos = np.array(container.aabb_center) + np.array([0, 0, container.aabb_extent[2] / 2.0 + obj.aabb_extent[2] / 2.0])
obj.set_bbox_center_position_orientation(position=pos)
# Spawn in new objects
for category, n_instances in recipe["output_objects"].items():
# Multiply by number of instances of execution if this is a multi-instance recipe
if cls.is_multi_instance:
n_instances *= execution_info["num_instances"]
output_states = dict()
for state_type, state_value in recipe["output_states"][category]["unary"]:
output_states[state_type] = (state_value,)
for state_type, system_name, state_value in recipe["output_states"][category]["binary_system"]:
output_states[state_type] = (get_system(system_name), state_value)
n_category_objs = len(og.sim.scene.object_registry("category", category, []))
models = get_all_object_category_models(category=category)
for i in range(n_instances):
obj = DatasetObject(
name=f"{category}_{n_category_objs + i}",
category=category,
model=np.random.choice(models),
)
new_obj_attrs = ObjectAttrs(
obj=obj,
callback=_spawn_object_in_container,
states=output_states,
pos=np.ones(3) * (100.0 + i),
)
objs_to_add.append(new_obj_attrs)
# Spawn in new fluid
if len(recipe["output_systems"]) > 0:
# Only one system is allowed to be spawned
assert len(recipe["output_systems"]) == 1, "Only a single output system can be spawned for a given recipe!"
out_system = get_system(recipe["output_systems"][0])
out_system.generate_particles_from_link(
obj=container,
link=contained_particles_state.link,
# When ignore_nonrecipe_objects is True, we don't necessarily remove all objects in the container.
# Therefore, we need to check for contact when generating output systems.
check_contact=cls.ignore_nonrecipe_objects,
max_samples=int(volume / (np.pi * (out_system.particle_radius ** 3) * 4 / 3)),
)
# Return transition results
return TransitionResults(add=objs_to_add, remove=objs_to_remove)
@classproperty
def relax_recipe_systems(cls):
"""
Returns:
bool: Whether to relax the requirement of having all systems in the recipe contained in the container
"""
raise NotImplementedError("Must be implemented by subclass!")
@classproperty
def ignore_nonrecipe_systems(cls):
"""
Returns:
bool: Whether contained systems not relevant to the recipe should be ignored or not
"""
raise NotImplementedError("Must be implemented by subclass!")
@classproperty
def ignore_nonrecipe_objects(cls):
"""
Returns:
bool: Whether contained rigid objects not relevant to the recipe should be ignored or not
"""
raise NotImplementedError("Must be implemented by subclass!")
@classproperty
def use_garbage_fallback_recipe(cls):
"""
Returns:
bool: Whether this recipe rule should use a garbage fallback recipe if all conditions are met but no
valid recipe is found for a given container
"""
raise NotImplementedError("Must be implemented by subclass!")
@classproperty
def is_multi_instance(cls):
"""
Returns:
bool: Whether this rule can be applied multiple times to the same container, e.g. to cook multiple doughs
"""
return False
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("RecipeRule")
return classes
class CookingPhysicalParticleRule(RecipeRule):
"""
Transition rule to apply to "cook" physical particles.
It comes with two forms of recipes:
1. xyz -> cooked__xyz, e.g. diced__chicken -> cooked__diced__chicken
2. xyz + cooked__water -> cooked__xyz, e.g. rice + cooked__water -> cooked__rice
During execution, we replace the input particles (xyz) with the output particles (cooked__xyz), and remove the
cooked__water if it was used as an input.
"""
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
**kwargs,
):
"""
Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform
into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
"""
assert len(input_objects) == 0, f"No input objects can be specified for {cls.__name__}, recipe: {name}!"
assert len(output_objects) == 0, f"No output objects can be specified for {cls.__name__}, recipe: {name}!"
assert len(input_systems) == 1 or len(input_systems) == 2, \
f"Only one or two input systems can be specified for {cls.__name__}, recipe: {name}!"
if len(input_systems) == 2:
assert input_systems[1] == "cooked__water", \
f"Second input system must be cooked__water for {cls.__name__}, recipe: {name}!"
assert len(output_systems) == 1, \
f"Exactly one output system needs to be specified for {cls.__name__}, recipe: {name}!"
super().add_recipe(
name=name,
input_objects=input_objects,
input_systems=input_systems,
output_objects=output_objects,
output_systems=output_systems,
**kwargs,
)
@classproperty
def candidate_filters(cls):
# Modify the container filter to include the heatable ability as well
candidate_filters = super().candidate_filters
candidate_filters["container"] = AndFilter(filters=[candidate_filters["container"], AbilityFilter(ability="heatable")])
return candidate_filters
@classmethod
def _generate_conditions(cls):
# Only heated objects are valid
return [StateCondition(filter_name="container", state=Heated, val=True, op=operator.eq)]
@classproperty
def relax_recipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_systems(cls):
return True
@classproperty
def ignore_nonrecipe_objects(cls):
return True
@classproperty
def use_garbage_fallback_recipe(cls):
return False
@classmethod
def _execute_recipe(cls, container, recipe, container_info):
system = get_system(recipe["input_systems"][0])
contained_particles_state = container.states[ContainedParticles].get_value(system)
in_volume_idx = np.where(contained_particles_state.in_volume)[0]
assert len(in_volume_idx) > 0, "No particles found in the container when executing recipe!"
# Remove uncooked particles
system.remove_particles(idxs=in_volume_idx)
# Generate cooked particles
cooked_system = get_system(recipe["output_systems"][0])
particle_positions = contained_particles_state.positions[in_volume_idx]
cooked_system.generate_particles(positions=particle_positions)
# Remove water if the cooking requires water
if len(recipe["input_systems"]) > 1:
cooked_water_system = get_system(recipe["input_systems"][1])
container.states[Contains].set_value(cooked_water_system, False)
return TransitionResults(add=[], remove=[])
class ToggleableMachineRule(RecipeRule):
"""
Transition mixing rule that leverages a single toggleable machine (e.g. electric mixer, coffee machine, blender),
which require toggledOn in order to trigger the recipe event.
It comes with two forms of recipes:
1. output is a single object, e.g. flour + butter + sugar -> dough, machine is electric mixer
2. output is a system, e.g. strawberry + milk -> smoothie, machine is blender
"""
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
input_states=None,
output_states=None,
fillable_categories=None,
**kwargs,
):
"""
Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform
into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
fillable_categories (None or set of str): If specified, set of fillable categories which are allowed
for this recipe. If None, any fillable is allowed
"""
if len(output_objects) > 0:
assert len(output_objects) == 1, f"Only one category of output object can be specified for {cls.__name__}, recipe: {name}!"
assert output_objects[list(output_objects.keys())[0]] == 1, f"Only one instance of output object can be specified for {cls.__name__}, recipe: {name}!"
super().add_recipe(
name=name,
input_objects=input_objects,
input_systems=input_systems,
output_objects=output_objects,
output_systems=output_systems,
input_states=input_states,
output_states=output_states,
fillable_categories=fillable_categories,
**kwargs,
)
@classproperty
def candidate_filters(cls):
# Modify the container filter to include toggleable ability as well
candidate_filters = super().candidate_filters
candidate_filters["container"] = AndFilter(filters=[
candidate_filters["container"],
AbilityFilter(ability="toggleable"),
# Exclude washer and clothes dryer because they are handled by WasherRule and DryerRule
NotFilter(CategoryFilter("washer")),
NotFilter(CategoryFilter("clothes_dryer")),
NotFilter(CategoryFilter("hot_tub")),
])
return candidate_filters
@classmethod
def _generate_conditions(cls):
# Container must be toggledOn, and should only be triggered once
return [ChangeConditionWrapper(
condition=StateCondition(filter_name="container", state=ToggledOn, val=True, op=operator.eq)
)]
@classproperty
def relax_recipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_objects(cls):
return False
@classproperty
def use_garbage_fallback_recipe(cls):
return True
class MixingToolRule(RecipeRule):
"""
Transition mixing rule that leverages "mixingTool" ability objects, which require touching between a mixing tool
and a container in order to trigger the recipe event.
Example: water + lemon_juice + sugar -> lemonade, mixing tool is spoon
"""
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
input_states=None,
output_states=None,
**kwargs,
):
"""
Adds a recipe to this recipe rule to check against. This defines a valid mapping of inputs that will transform
into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
"""
assert len(output_objects) == 0, f"No output objects can be specified for {cls.__name__}, recipe: {name}!"
assert len(input_systems) > 0, f"Some input systems need to be specified for {cls.__name__}, recipe: {name}!"
assert len(output_systems) == 1, \
f"Exactly one output system needs to be specified for {cls.__name__}, recipe: {name}!"
super().add_recipe(
name=name,
input_objects=input_objects,
input_systems=input_systems,
output_objects=output_objects,
output_systems=output_systems,
input_states=input_states,
output_states=output_states,
**kwargs,
)
@classproperty
def candidate_filters(cls):
# Add mixing tool filter as well
candidate_filters = super().candidate_filters
candidate_filters["mixingTool"] = AbilityFilter(ability="mixingTool")
return candidate_filters
@classmethod
def _generate_conditions(cls):
# Mixing tool must be touching the container, and should only be triggered once
return [ChangeConditionWrapper(
condition=TouchingAnyCondition(filter_1_name="container", filter_2_name="mixingTool")
)]
@classproperty
def relax_recipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_objects(cls):
return True
@classproperty
def use_garbage_fallback_recipe(cls):
return True
class CookingRule(RecipeRule):
"""
Transition mixing rule that approximates cooking recipes via a container and heatsource.
It is subclassed by CookingObjectRule and CookingSystemRule.
"""
# Counter that increments monotonically
COUNTER = 0
# Maps recipe name to current number of consecutive heating steps
_HEAT_STEPS = None
# Maps recipe name to the last timestep that it was active
_LAST_HEAT_TIMESTEP = None
@classmethod
def refresh(cls, object_candidates):
# Run super first
super().refresh(object_candidates=object_candidates)
# Iterate through all (updated) active recipes and store in internal variables if not already recorded
cls._HEAT_STEPS = dict() if cls._HEAT_STEPS is None else cls._HEAT_STEPS
cls._LAST_HEAT_TIMESTEP = dict() if cls._LAST_HEAT_TIMESTEP is None else cls._LAST_HEAT_TIMESTEP
for name in cls._ACTIVE_RECIPES.keys():
if name not in cls._HEAT_STEPS:
cls._HEAT_STEPS[name] = 0
cls._LAST_HEAT_TIMESTEP[name] = -1
@classmethod
def _validate_recipe_fillables_exist(cls, recipe):
"""
Validates that recipe @recipe's necessary fillable categorie(s) exist in the current scene
Args:
recipe (dict): Recipe whose fillable categories should be checked
Returns:
bool: True if there is at least a single valid fillable category in the current scene, else False
"""
fillable_categories = recipe["fillable_categories"]
if fillable_categories is None:
# Any is valid
return True
# Otherwise, at least one valid type must exist
for category in fillable_categories:
if len(og.sim.scene.object_registry("category", category, default_val=set())) > 0:
return True
# None found, return False
return False
@classmethod
def _validate_recipe_heatsources_exist(cls, recipe):
"""
Validates that recipe @recipe's necessary heatsource categorie(s) exist in the current scene
Args:
recipe (dict): Recipe whose heatsource categories should be checked
Returns:
bool: True if there is at least a single valid heatsource category in the current scene, else False
"""
heatsource_categories = recipe["heatsource_categories"]
if heatsource_categories is None:
# Any is valid
return True
# Otherwise, at least one valid type must exist
for category in heatsource_categories:
if len(og.sim.scene.object_registry("category", category, default_val=set())) > 0:
return True
# None found, return False
return False
@classmethod
def _validate_recipe_heatsource_is_valid(cls, recipe, heatsource_categories):
"""
Validates that there is a valid heatsource category in @heatsource_categories compatible with @recipe
Args:
recipe (dict): Recipe whose heatsource_categories should be checked against @heatsource_categories
heatsource_categories (set of str): Set of potential heatsource categories
Returns:
bool: True if there is a compatible category in @heatsource_categories, else False
"""
required_heatsource_categories = recipe["heatsource_categories"]
# Either no specific required and there is at least 1 heatsource or there is at least 1 matching heatsource
# between the required and available
return (required_heatsource_categories is None and len(heatsource_categories) > 0) or \
len(required_heatsource_categories.intersection(heatsource_categories)) > 0
@classmethod
def _compute_container_info(cls, object_candidates, container, global_info):
# Run super first
info = super()._compute_container_info(object_candidates=object_candidates, container=container, global_info=global_info)
# Compute whether each heatsource is affecting the container
info["heatsource_categories"] = set(obj.category for obj in object_candidates["heatSource"] if
obj.states[HeatSourceOrSink].affects_obj(container))
return info
@classmethod
def _is_recipe_active(cls, recipe):
# Check for heatsource categories first
if not cls._validate_recipe_heatsources_exist(recipe=recipe):
return False
# Otherwise, run super normally
return super()._is_recipe_active(recipe=recipe)
@classmethod
def _is_recipe_executable(cls, recipe, container, global_info, container_info):
# Check for heatsource compatibility first
if not cls._validate_recipe_heatsource_is_valid(recipe=recipe, heatsource_categories=container_info["heatsource_categories"]):
return False
# Run super
executable = super()._is_recipe_executable(
recipe=recipe,
container=container,
global_info=global_info,
container_info=container_info,
)
# If executable, increment heat counter by 1, if we were also active last timestep, else, reset to 1
if executable:
name = recipe["name"]
cls._HEAT_STEPS[name] = cls._HEAT_STEPS[name] + 1 if \
cls._LAST_HEAT_TIMESTEP[name] == cls.COUNTER - 1 else 1
cls._LAST_HEAT_TIMESTEP[name] = cls.COUNTER
# If valid number of timesteps met, recipe is indeed executable
executable = cls._HEAT_STEPS[name] >= recipe["timesteps"]
return executable
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
input_states=None,
output_states=None,
fillable_categories=None,
heatsource_categories=None,
timesteps=None,
):
"""
Adds a recipe to this cooking recipe rule to check against. This defines a valid mapping of inputs that
will transform into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
fillable_categories (None or set of str): If specified, set of fillable categories which are allowed
for this recipe. If None, any fillable is allowed
heatsource_categories (None or set of str): If specified, set of heatsource categories which are allowed
for this recipe. If None, any heatsource is allowed
timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None,
it will be set to be 1, i.e.: instantaneous execution
"""
super().add_recipe(
name=name,
input_objects=input_objects,
input_systems=input_systems,
output_objects=output_objects,
output_systems=output_systems,
input_states=input_states,
output_states=output_states,
fillable_categories=fillable_categories,
heatsource_categories=heatsource_categories,
timesteps=1 if timesteps is None else timesteps,
)
@classproperty
def candidate_filters(cls):
# Add mixing tool filter as well
candidate_filters = super().candidate_filters
candidate_filters["heatSource"] = AbilityFilter(ability="heatSource")
return candidate_filters
@classmethod
def _generate_conditions(cls):
# Define a class to increment this class's internal time counter every time it is triggered
class TimeIncrementCondition(RuleCondition):
def __init__(self, cls):
self.cls = cls
def __call__(self, object_candidates):
# This is just a pass-through, but also increment the time
self.cls.COUNTER += 1
return True
def modifies_filter_names(self):
return set()
# Any heatsource must be active
return [
TimeIncrementCondition(cls=cls),
StateCondition(filter_name="heatSource", state=HeatSourceOrSink, val=True, op=operator.eq),
]
@classproperty
def use_garbage_fallback_recipe(cls):
return False
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("CookingRule")
return classes
class CookingObjectRule(CookingRule):
"""
Cooking rule when output is objects (e.g. one dough can produce many bagels as output).
Example: bagel_dough + egg + sesame_seed -> bagel, heat source is oven, fillable is baking_sheet.
This is the only rule where is_multi_instance is True, where multiple copies of the recipe can be executed.
"""
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
input_states=None,
output_states=None,
fillable_categories=None,
heatsource_categories=None,
timesteps=None,
):
"""
Adds a recipe to this cooking recipe rule to check against. This defines a valid mapping of inputs that
will transform into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
fillable_categories (None or set of str): If specified, set of fillable categories which are allowed
for this recipe. If None, any fillable is allowed
heatsource_categories (None or set of str): If specified, set of heatsource categories which are allowed
for this recipe. If None, any heatsource is allowed
timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None,
it will be set to be 1, i.e.: instantaneous execution
"""
assert len(output_systems) == 0, f"No output systems can be specified for {cls.__name__}, recipe: {name}!"
super().add_recipe(
name=name,
input_objects=input_objects,
input_systems=input_systems,
output_objects=output_objects,
output_systems=output_systems,
input_states=input_states,
output_states=output_states,
fillable_categories=fillable_categories,
heatsource_categories=heatsource_categories,
timesteps=timesteps,
)
@classproperty
def relax_recipe_systems(cls):
# We don't require systems like seasoning/cheese/sesame seeds/etc. to be contained in the baking sheet
return True
@classproperty
def ignore_nonrecipe_systems(cls):
return True
@classproperty
def ignore_nonrecipe_objects(cls):
return True
@classproperty
def is_multi_instance(cls):
return True
class CookingSystemRule(CookingRule):
"""
Cooking rule when output is a system.
Example: beef + tomato + chicken_stock -> stew, heat source is stove, fillable is stockpot.
"""
@classmethod
def add_recipe(
cls,
name,
input_objects,
input_systems,
output_objects,
output_systems,
input_states=None,
output_states=None,
fillable_categories=None,
heatsource_categories=None,
timesteps=None,
):
"""
Adds a recipe to this cooking recipe rule to check against. This defines a valid mapping of inputs that
will transform into the outputs
Args:
name (str): Name of the recipe
input_objects (dict): Maps object categories to number of instances required for the recipe
input_systems (list): List of system names required for the recipe
output_objects (dict): Maps object categories to number of instances to be spawned in the container when the recipe executes
output_systems (list): List of system names to be spawned in the container when the recipe executes. Currently the length is 1.
input_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system", "binary_object"] to a list of states that must be satisfied for the recipe to execute
output_states (None or defaultdict(lambda: defaultdict(list))): Maps object categories to
["unary", "bianry_system"] to a list of states that should be set after the output objects are spawned
fillable_categories (None or set of str): If specified, set of fillable categories which are allowed
for this recipe. If None, any fillable is allowed
heatsource_categories (None or set of str): If specified, set of heatsource categories which are allowed
for this recipe. If None, any heatsource is allowed
timesteps (None or int): Number of subsequent heating steps required for the recipe to execute. If None,
it will be set to be 1, i.e.: instantaneous execution
"""
assert len(output_objects) == 0, f"No output objects can be specified for {cls.__name__}, recipe: {name}!"
super().add_recipe(
name=name,
input_objects=input_objects,
input_systems=input_systems,
output_objects=output_objects,
output_systems=output_systems,
input_states=input_states,
output_states=output_states,
fillable_categories=fillable_categories,
heatsource_categories=heatsource_categories,
timesteps=timesteps,
)
@classproperty
def relax_recipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_systems(cls):
return False
@classproperty
def ignore_nonrecipe_objects(cls):
return False
def import_recipes():
for json_file, rule_names in _JSON_FILES_TO_RULES.items():
recipe_fpath = os.path.join(os.path.dirname(bddl.__file__), "generated_data", "transition_map", "tm_jsons", json_file)
if not os.path.exists(recipe_fpath):
log.warning(f"Cannot find recipe file at {recipe_fpath}. Skipping importing recipes.")
with open(recipe_fpath, "r") as f:
rule_recipes = json.load(f)
for rule_name in rule_names:
rule = REGISTERED_RULES[rule_name]
if rule == WasherRule:
rule.register_cleaning_conditions(translate_bddl_washer_rule_to_og_washer_rule(rule_recipes))
elif issubclass(rule, RecipeRule):
log.info(f"Adding recipes of rule {rule_name}...")
for recipe in rule_recipes:
if "rule_name" in recipe:
recipe["name"] = recipe.pop("rule_name")
if "container" in recipe:
recipe["fillable_synsets"] = set(recipe.pop("container").keys())
if "heat_source" in recipe:
recipe["heatsource_synsets"] = set(recipe.pop("heat_source").keys())
if "machine" in recipe:
recipe["fillable_synsets"] = set(recipe.pop("machine").keys())
# Route the recipe to the correct rule: CookingObjectRule or CookingSystemRule
satisfied = True
og_recipe = translate_bddl_recipe_to_og_recipe(**recipe)
has_output_system = len(og_recipe["output_systems"]) > 0
if (rule == CookingObjectRule and has_output_system) or (rule == CookingSystemRule and not has_output_system):
satisfied = False
if satisfied:
rule.add_recipe(**og_recipe)
log.info(f"All recipes of rule {rule_name} imported successfully.")
import_recipes() | 108,938 | Python | 42.785772 | 179 | 0.630863 |
StanfordVL/OmniGibson/omnigibson/__init__.py | import logging
import os
import shutil
import signal
import tempfile
import builtins
# TODO: Need to fix somehow -- omnigibson gets imported first BEFORE we can actually modify the macros
from omnigibson.macros import gm
from omnigibson.envs import Environment
from omnigibson.scenes import REGISTERED_SCENES
from omnigibson.objects import REGISTERED_OBJECTS
from omnigibson.robots import REGISTERED_ROBOTS
from omnigibson.controllers import REGISTERED_CONTROLLERS
from omnigibson.tasks import REGISTERED_TASKS
from omnigibson.sensors import ALL_SENSOR_MODALITIES
from omnigibson.simulator import launch_simulator as launch
# Create logger
logging.basicConfig(format='[%(levelname)s] [%(name)s] %(message)s')
log = logging.getLogger(__name__)
builtins.ISAAC_LAUNCHED_FROM_JUPYTER = (
os.getenv("ISAAC_JUPYTER_KERNEL") is not None
) # We set this in the kernel.json file
# Always enable nest_asyncio because MaterialPrim calls asyncio.run()
import nest_asyncio
nest_asyncio.apply()
__version__ = "1.0.0"
log.setLevel(logging.DEBUG if gm.DEBUG else logging.INFO)
root_path = os.path.dirname(os.path.realpath(__file__))
# Store paths to example configs
example_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "configs")
# Initialize global variables
app = None # (this is a singleton so it's okay that it's global)
sim = None # (this is a singleton so it's okay that it's global)
# Create and expose a temporary directory for any use cases. It will get destroyed upon omni
# shutdown by the shutdown function.
tempdir = tempfile.mkdtemp()
def cleanup(*args, **kwargs):
# TODO: Currently tempfile removal will fail due to CopyPrim command (for example, GranularSystem in dicing_apple example.)
try:
shutil.rmtree(tempdir)
except PermissionError:
log.info("Permission error when removing temp files. Ignoring")
from omnigibson.simulator import logo_small
log.info(f"{'-' * 10} Shutting Down {logo_small()} {'-' * 10}")
def shutdown(due_to_signal=False):
if app is not None:
# If Isaac is running, we do the cleanup in its shutdown callback to avoid open handles.
# TODO: Automated cleanup in callback doesn't work for some reason. Need to investigate.
# Manually call cleanup for now.
cleanup()
app.close()
else:
# Otherwise, we do the cleanup here.
cleanup()
# If we're not shutting down due to a signal, we need to manually exit
if not due_to_signal:
exit(0)
def shutdown_handler(*args, **kwargs):
shutdown(due_to_signal=True)
return signal.default_int_handler(*args, **kwargs)
# Something somewhere disables the default SIGINT handler, so we need to re-enable it
signal.signal(signal.SIGINT, shutdown_handler)
| 2,812 | Python | 34.1625 | 127 | 0.726885 |
StanfordVL/OmniGibson/omnigibson/macros.py | """
Set of macros to use globally for OmniGibson. These are generally magic numbers that were tuned heuristically.
NOTE: This is generally decentralized -- the monolithic @settings variable is created here with some global values,
but submodules within OmniGibson may import this dictionary and add to it dynamically
"""
import os
import pathlib
from addict import Dict
# Initialize settings
macros = Dict()
gm = macros.globals
# Path (either relative to OmniGibson/omnigibson directory or global absolute path) for data
# Assets correspond to non-objects / scenes (e.g.: robots), and dataset incliudes objects + scene
# can override assets_path and dataset_path from environment variable
gm.ASSET_PATH = "data/assets"
if "OMNIGIBSON_ASSET_PATH" in os.environ:
gm.ASSET_PATH = os.environ["OMNIGIBSON_ASSET_PATH"]
gm.ASSET_PATH = os.path.expanduser(gm.ASSET_PATH)
if not os.path.isabs(gm.ASSET_PATH):
gm.ASSET_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), gm.ASSET_PATH)
gm.DATASET_PATH = "data/og_dataset"
if "OMNIGIBSON_DATASET_PATH" in os.environ:
gm.DATASET_PATH = os.environ["OMNIGIBSON_DATASET_PATH"]
gm.DATASET_PATH = os.path.expanduser(gm.DATASET_PATH)
if not os.path.isabs(gm.DATASET_PATH):
gm.DATASET_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), gm.DATASET_PATH)
gm.KEY_PATH = "data/omnigibson.key"
if "OMNIGIBSON_KEY_PATH" in os.environ:
gm.KEY_PATH = os.environ["OMNIGIBSON_KEY_PATH"]
gm.KEY_PATH = os.path.expanduser(gm.KEY_PATH)
if not os.path.isabs(gm.KEY_PATH):
gm.KEY_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), gm.KEY_PATH)
# Which GPU to use -- None will result in omni automatically using an appropriate GPU. Otherwise, set with either
# integer or string-form integer
gm.GPU_ID = os.getenv("OMNIGIBSON_GPU_ID", None)
# Whether to generate a headless or non-headless application upon OmniGibson startup
gm.HEADLESS = (os.getenv("OMNIGIBSON_HEADLESS", 'False').lower() in ('true', '1', 't'))
# Whether to enable remote streaming. None disables it, other valid options are "native", "webrtc".
gm.REMOTE_STREAMING = os.getenv("OMNIGIBSON_REMOTE_STREAMING", None)
# What port the webrtc and http servers should run on. This is only used if REMOTE_STREAMING is set to "webrtc"
gm.HTTP_PORT = os.getenv("OMNIGIBSON_HTTP_PORT", 8211)
gm.WEBRTC_PORT = os.getenv("OMNIGIBSON_WEBRTC_PORT", 49100)
# Whether only the viewport should be shown in the GUI or not (if not, other peripherals are additionally shown)
# CANNOT be set at runtime
gm.GUI_VIEWPORT_ONLY = False
# Whether to use the viewer camera or not
gm.RENDER_VIEWER_CAMERA = True
# Do not suppress known omni warnings / errors, and also put omnigibson in a debug state
# This includes extra information for things such as object sampling, and also any debug
# logging messages
gm.DEBUG = (os.getenv("OMNIGIBSON_DEBUG", 'False').lower() in ('true', '1', 't'))
# Whether to print out disclaimers (i.e.: known failure cases resulting from Omniverse's current bugs / limitations)
gm.SHOW_DISCLAIMERS = False
# Whether to use omni's GPU dynamics
# This is necessary for certain features; e.g. particles (fluids / cloth)
gm.USE_GPU_DYNAMICS = False
# Whether to use high-fidelity rendering (this includes, e.g., isosurfaces)
gm.ENABLE_HQ_RENDERING = False
# Whether to use omni's flatcache feature or not (can speed up simulation)
gm.ENABLE_FLATCACHE = False
# Whether to use continuous collision detection or not (slower simulation, but can prevent
# objects from tunneling through each other)
gm.ENABLE_CCD = False
# Pairs setting -- USD default is 256 * 1024, physx default apparently is 32 * 1024.
gm.GPU_PAIRS_CAPACITY = 256 * 1024
# Aggregate pairs setting -- default is 1024, but is often insufficient for large scenes
gm.GPU_AGGR_PAIRS_CAPACITY = (2 ** 14) * 1024
# Maximum particle contacts allowed
gm.GPU_MAX_PARTICLE_CONTACTS = 1024 * 1024
# Maximum rigid contacts -- 524288 is default value from omni, but increasing too much can sometimes lead to crashes
gm.GPU_MAX_RIGID_CONTACT_COUNT = 524288 * 4
# Maximum rigid patches -- 81920 is default value from omni, but increasing too much can sometimes lead to crashes
gm.GPU_MAX_RIGID_PATCH_COUNT = 81920 * 4
# Whether to enable object state logic or not
gm.ENABLE_OBJECT_STATES = True
# Whether to enable transition rules or not
gm.ENABLE_TRANSITION_RULES = True
# Default settings for the omni UI viewer
gm.DEFAULT_VIEWER_WIDTH = 1280
gm.DEFAULT_VIEWER_HEIGHT = 720
# (Demo-purpose) Whether to activate Assistive Grasping mode for Cloth (it's handled differently from RigidBody)
gm.AG_CLOTH = False
# Forced light intensity for all DatasetObjects. None if the USD-provided intensities should be respected.
gm.FORCE_LIGHT_INTENSITY = 150000
# Forced roughness for all DatasetObjects. None if the USD-provided roughness maps should be respected.
gm.FORCE_ROUGHNESS = 0.7
# Create helper function for generating sub-dictionaries
def create_module_macros(module_path):
"""
Creates a dictionary that can be populated with module macros based on the module's @module_path
Args:
module_path (str): Relative path from the package root directory pointing to the module. This will be parsed
to generate the appropriate sub-macros dictionary, e.g., for module "dirty" in
omnigibson/object_states_dirty.py, this would generate a dictionary existing at macros.object_states.dirty
Returns:
Dict: addict dictionary which can be populated with values
"""
# Sanity check module path, make sure omnigibson/ is in the path
module_path = pathlib.Path(module_path)
omnigibson_path = pathlib.Path(__file__).parent
# Trim the .py, and anything before and including omnigibson/, and split into its appropriate parts
try:
subsections = module_path.with_suffix("").relative_to(omnigibson_path).parts
except ValueError:
raise ValueError("module_path is expected to be a filepath including the omnigibson root directory, got: {module_path}!")
# Create and return the generated sub-dictionary
def _recursively_get_or_create_dict(dic, keys):
# If no entry is in @keys, it returns @dic
# Otherwise, checks whether the dictionary contains the first entry in @keys, if so, it grabs the
# corresponding nested dictionary, otherwise, generates a new Dict() as the value
# It then recurisvely calls this function with the new dic and the remaining keys
if len(keys) == 0:
return dic
else:
key = keys[0]
if key not in dic:
dic[key] = Dict()
return _recursively_get_or_create_dict(dic=dic[key], keys=keys[1:])
return _recursively_get_or_create_dict(dic=macros, keys=subsections)
| 6,808 | Python | 42.647436 | 129 | 0.735605 |
StanfordVL/OmniGibson/omnigibson/lazy.py | import sys
from omnigibson.utils.lazy_import_utils import LazyImporter
sys.modules[__name__] = LazyImporter("", None)
| 119 | Python | 22.999995 | 59 | 0.764706 |
StanfordVL/OmniGibson/omnigibson/scenes/__init__.py | from omnigibson.scenes.scene_base import Scene, REGISTERED_SCENES
from omnigibson.scenes.traversable_scene import TraversableScene
from omnigibson.scenes.static_traversable_scene import StaticTraversableScene
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
| 297 | Python | 58.599988 | 87 | 0.888889 |
StanfordVL/OmniGibson/omnigibson/scenes/static_traversable_scene.py | import os
import numpy as np
from omnigibson.scenes.traversable_scene import TraversableScene
from omnigibson.prims.geom_prim import CollisionVisualGeomPrim
from omnigibson.utils.asset_utils import get_scene_path
from omnigibson.utils.usd_utils import add_asset_to_stage
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class StaticTraversableScene(TraversableScene):
"""
Static traversable scene class for OmniGibson, where scene is defined by a singular mesh (no intereactable objects)
"""
def __init__(
self,
scene_model,
scene_file=None,
trav_map_resolution=0.1,
default_erosion_radius=0.0,
trav_map_with_objects=True,
num_waypoints=10,
waypoint_resolution=0.2,
floor_plane_visible=False,
floor_plane_color=(1.0, 1.0, 1.0),
):
"""
Args:
scene_model (str): Scene model name, e.g.: Adrian
scene_file (None or str): If specified, full path of JSON file to load (with .json).
None results in no additional objects being loaded into the scene
trav_map_resolution (float): traversability map resolution
default_erosion_radius (float): default map erosion radius in meters
trav_map_with_objects (bool): whether to use objects or not when constructing graph
num_waypoints (int): number of way points returned
waypoint_resolution (float): resolution of adjacent way points
floor_plane_visible (bool): whether to render the additionally added floor plane
floor_plane_color (3-array): if @floor_plane_visible is True, this determines the (R,G,B) color assigned
to the generated floor plane
"""
# Store and initialize additional variables
self._floor_heights = None
self._scene_mesh = None
# Run super init
super().__init__(
scene_model=scene_model,
scene_file=scene_file,
trav_map_resolution=trav_map_resolution,
default_erosion_radius=default_erosion_radius,
trav_map_with_objects=trav_map_with_objects,
num_waypoints=num_waypoints,
waypoint_resolution=waypoint_resolution,
use_floor_plane=True,
floor_plane_visible=floor_plane_visible,
floor_plane_color=floor_plane_color,
)
def _load(self):
# Run super first
super()._load()
# Load the scene mesh (use downsampled one if available)
filename = os.path.join(get_scene_path(self.scene_model), "mesh_z_up_downsampled.obj")
if not os.path.isfile(filename):
filename = os.path.join(get_scene_path(self.scene_model), "mesh_z_up.obj")
scene_prim = add_asset_to_stage(
asset_path=filename,
prim_path=f"/World/scene_{self.scene_model}",
)
# Grab the actual mesh prim
self._scene_mesh = CollisionVisualGeomPrim(
prim_path=f"/World/scene_{self.scene_model}/mesh_z_up/{self.scene_model}_mesh_texture",
name=f"{self.scene_model}_mesh",
)
# Load floor metadata
floor_height_path = os.path.join(get_scene_path(self.scene_model), "floors.txt")
assert os.path.isfile(floor_height_path), f"floor_heights.txt cannot be found in model: {self.scene_model}"
with open(floor_height_path, "r") as f:
self.floor_heights = sorted(list(map(float, f.readlines())))
log.debug("Floors {}".format(self.floor_heights))
# Move the floor plane to the first floor by default
self.move_floor_plane(floor=0)
# Filter the collision between the scene mesh and the floor plane
self._scene_mesh.add_filtered_collision_pair(prim=self._floor_plane)
# Load the traversability map
self._trav_map.load_map(get_scene_path(self.scene_model))
def move_floor_plane(self, floor=0, additional_elevation=0.02, height=None):
"""
Resets the floor plane to a new floor
Args:
floor (int): Integer identifying the floor to move the floor plane to
additional_elevation (float): Additional elevation with respect to the height of the floor
height (None or float): If specified, alternative parameter to directly control the height of the ground
plane. Note that this will override @additional_elevation and @floor!
"""
height = height if height is not None else self.floor_heights[floor] + additional_elevation
self._floor_plane.set_position(np.array([0, 0, height]))
def get_floor_height(self, floor=0):
"""
Return the current floor height (in meter)
Returns:
int: current floor height
"""
return self.floor_heights[floor]
@property
def n_floors(self):
return len(self._floor_heights)
| 5,029 | Python | 39.24 | 119 | 0.6379 |
StanfordVL/OmniGibson/omnigibson/scenes/traversable_scene.py | from omnigibson.scenes.scene_base import Scene
from omnigibson.maps.traversable_map import TraversableMap
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class TraversableScene(Scene):
"""
Traversable scene class.
Contains the functionalities for navigation such as shortest path computation
"""
def __init__(
self,
scene_model,
scene_file=None,
trav_map_resolution=0.1,
default_erosion_radius=0.0,
trav_map_with_objects=True,
num_waypoints=10,
waypoint_resolution=0.2,
use_floor_plane=True,
floor_plane_visible=True,
floor_plane_color=(1.0, 1.0, 1.0),
):
"""
Args:
scene_model (str): Scene model name, e.g.: Adrian or Rs_int
scene_file (None or str): If specified, full path of JSON file to load (with .json).
None results in no additional objects being loaded into the scene
trav_map_resolution (float): traversability map resolution
default_erosion_radius (float): default map erosion radius in meters
trav_map_with_objects (bool): whether to use objects or not when constructing graph
num_waypoints (int): number of way points returned
waypoint_resolution (float): resolution of adjacent way points
use_floor_plane (bool): whether to load a flat floor plane into the simulator
floor_plane_visible (bool): whether to render the additionally added floor plane
floor_plane_color (3-array): if @floor_plane_visible is True, this determines the (R,G,B) color assigned
to the generated floor plane
"""
log.info("TraversableScene model: {}".format(scene_model))
self.scene_model = scene_model
# Create traversable map
self._trav_map = TraversableMap(
map_resolution=trav_map_resolution,
default_erosion_radius=default_erosion_radius,
trav_map_with_objects=trav_map_with_objects,
num_waypoints=num_waypoints,
waypoint_resolution=waypoint_resolution,
)
# Run super init
super().__init__(
scene_file=scene_file,
use_floor_plane=use_floor_plane,
floor_plane_visible=floor_plane_visible,
floor_plane_color=floor_plane_color,
)
@property
def trav_map(self):
"""
Returns:
TraversableMap: Map for computing connectivity between nodes for this scene
"""
return self._trav_map
def get_random_point(self, floor=None, reference_point=None, robot=None):
return self._trav_map.get_random_point(floor=floor, reference_point=reference_point, robot=robot)
def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None):
return self._trav_map.get_shortest_path(
floor=floor,
source_world=source_world,
target_world=target_world,
entire_path=entire_path,
robot=robot,
)
| 3,174 | Python | 38.19753 | 116 | 0.63012 |
StanfordVL/OmniGibson/omnigibson/scenes/interactive_traversable_scene.py | import os
from omnigibson.robots.robot_base import REGISTERED_ROBOTS
from omnigibson.robots.robot_base import m as robot_macros
from omnigibson.scenes.traversable_scene import TraversableScene
from omnigibson.maps.segmentation_map import SegmentationMap
from omnigibson.utils.asset_utils import get_og_scene_path
from omnigibson.utils.constants import STRUCTURE_CATEGORIES
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class InteractiveTraversableScene(TraversableScene):
"""
Create an interactive scene defined from a scene json file.
In general, this supports curated, pre-defined scene layouts with annotated objects.
This adds semantic support via a segmentation map generated for this specific scene.
"""
def __init__(
self,
scene_model,
scene_instance=None,
scene_file=None,
trav_map_resolution=0.1,
default_erosion_radius=0.0,
trav_map_with_objects=True,
num_waypoints=10,
waypoint_resolution=0.2,
load_object_categories=None,
not_load_object_categories=None,
load_room_types=None,
load_room_instances=None,
load_task_relevant_only=False,
seg_map_resolution=0.1,
include_robots=True,
):
"""
Args:
scene_model (str): Scene model name, e.g.: Rs_int
scene_instance (None or str): name of json file to load (without .json); if None,
defaults to og_dataset/scenes/<scene_model>/json/<scene_instance>.urdf
scene_file (None or str): If specified, full path of JSON file to load (with .json).
This will override scene_instance and scene_model!
trav_map_resolution (float): traversability map resolution
default_erosion_radius (float): default map erosion radius in meters
trav_map_with_objects (bool): whether to use objects or not when constructing graph
num_waypoints (int): number of way points returned
waypoint_resolution (float): resolution of adjacent way points
load_object_categories (None or list): if specified, only load these object categories into the scene
not_load_object_categories (None or list): if specified, do not load these object categories into the scene
load_room_types (None or list): only load objects in these room types into the scene
load_room_instances (None or list): if specified, only load objects in these room instances into the scene
load_task_relevant_only (bool): Whether only task relevant objects (and building structure) should be loaded
seg_map_resolution (float): room segmentation map resolution
include_robots (bool): whether to also include the robot(s) defined in the scene
"""
# Store attributes from inputs
self.include_robots = include_robots
# Infer scene directory
self.scene_dir = get_og_scene_path(scene_model)
# Other values that will be loaded at runtime
self.load_object_categories = None
self.not_load_object_categories = None
self.load_room_instances = None
self.load_task_relevant_only = load_task_relevant_only
# Get scene information
if scene_file is None:
scene_file = self.get_scene_loading_info(
scene_model=scene_model,
scene_instance=scene_instance,
)
# Load room semantic and instance segmentation map (must occur AFTER inferring scene directory)
self._seg_map = SegmentationMap(scene_dir=self.scene_dir, map_resolution=seg_map_resolution)
# Decide which room(s) and object categories to load
self.filter_rooms_and_object_categories(
load_object_categories, not_load_object_categories, load_room_types, load_room_instances
)
# Run super init first
super().__init__(
scene_model=scene_model,
scene_file=scene_file,
trav_map_resolution=trav_map_resolution,
default_erosion_radius=default_erosion_radius,
trav_map_with_objects=trav_map_with_objects,
num_waypoints=num_waypoints,
waypoint_resolution=waypoint_resolution,
use_floor_plane=False,
)
def get_scene_loading_info(self, scene_model, scene_instance=None):
"""
Gets scene loading info to know what single USD file to load, specified indirectly via @scene_instance if it
is specified, otherwise, will grab the "best" scene file to load.
Args:
scene_model (str): Name of the scene to load, e.g, Rs_int, etc.
scene_instance (None or str): If specified, should be name of json file to load. (without .json), default to
og_dataset/scenes/<scene_model>/json/<scene_instance>.json
Returns:
str: Absolute path to the desired scene file (.json) to load
"""
# Infer scene file from model and directory
fname = "{}_best".format(scene_model) if scene_instance is None else scene_instance
return os.path.join(self.scene_dir, "json", "{}.json".format(fname))
def filter_rooms_and_object_categories(
self, load_object_categories, not_load_object_categories, load_room_types, load_room_instances
):
"""
Handle partial scene loading based on object categories, room types or room instances
Args:
load_object_categories (None or list): if specified, only load these object categories into the scene
not_load_object_categories (None or list): if specified, do not load these object categories into the scene
load_room_types (None or list): only load objects in these room types into the scene
load_room_instances (None or list): if specified, only load objects in these room instances into the scene
"""
self.load_object_categories = [load_object_categories] if \
isinstance(load_object_categories, str) else load_object_categories
self.not_load_object_categories = [not_load_object_categories] if \
isinstance(not_load_object_categories, str) else not_load_object_categories
if load_room_instances is not None:
if isinstance(load_room_instances, str):
load_room_instances = [load_room_instances]
load_room_instances_filtered = []
for room_instance in load_room_instances:
if room_instance in self._seg_map.room_ins_name_to_ins_id:
load_room_instances_filtered.append(room_instance)
else:
log.warning("room_instance [{}] does not exist.".format(room_instance))
self.load_room_instances = load_room_instances_filtered
elif load_room_types is not None:
if isinstance(load_room_types, str):
load_room_types = [load_room_types]
load_room_instances_filtered = []
for room_type in load_room_types:
if room_type in self._seg_map.room_sem_name_to_ins_name:
load_room_instances_filtered.extend(self._seg_map.room_sem_name_to_ins_name[room_type])
else:
log.warning("room_type [{}] does not exist.".format(room_type))
self.load_room_instances = load_room_instances_filtered
else:
self.load_room_instances = None
def _load(self):
# Run super first
super()._load()
# Load the traversability map if we have the connectivity graph
maps_path = os.path.join(self.scene_dir, "layout")
self._trav_map.load_map(maps_path)
def _should_load_object(self, obj_info, task_metadata):
name = obj_info["args"]["name"]
category = obj_info["args"].get("category", "object")
in_rooms = obj_info["args"].get("in_rooms", None)
if isinstance(in_rooms, str):
assert "," not in in_rooms
in_rooms = [in_rooms] if isinstance(in_rooms, str) else in_rooms
# Do not load these object categories (can blacklist building structures as well)
not_blacklisted = self.not_load_object_categories is None or category not in self.not_load_object_categories
# Only load these object categories (no need to white list building structures)
task_relevant_names = set(task_metadata["inst_to_name"].values()) if "inst_to_name" in task_metadata else set()
is_task_relevant = name in task_relevant_names or category in STRUCTURE_CATEGORIES
whitelisted = (
# Either no whitelisting-only mode is on
(self.load_object_categories is None and not self.load_task_relevant_only) or
# Or the object is in the whitelist
(self.load_object_categories is not None and category in self.load_object_categories) or
# Or it's in the task relevant list
(self.load_task_relevant_only and is_task_relevant)
)
# This object is not located in one of the selected rooms, skip
valid_room = self.load_room_instances is None or len(set(self.load_room_instances) & set(in_rooms)) > 0
# Check whether this is an agent and we allow agents
agent_ok = self.include_robots or obj_info["class_name"] not in REGISTERED_ROBOTS
# We only load this model if all the above conditions are met
return not_blacklisted and whitelisted and valid_room and agent_ok
@property
def seg_map(self):
"""
Returns:
SegmentationMap: Map for segmenting this scene
"""
return self._seg_map
@classmethod
def modify_init_info_for_restoring(cls, init_info):
# Run super first
super().modify_init_info_for_restoring(init_info=init_info)
# We also make sure we load in any robots, and also pop any filters that were stored
init_info["args"]["include_robots"] = True
init_info["args"]["load_object_categories"] = None
init_info["args"]["not_load_object_categories"] = None
init_info["args"]["load_room_types"] = None
init_info["args"]["load_room_instances"] = None
| 10,344 | Python | 46.893518 | 120 | 0.647235 |
StanfordVL/OmniGibson/omnigibson/scenes/scene_base.py | import json
from abc import ABC
from itertools import combinations
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, gm
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.prims.material_prim import MaterialPrim
from omnigibson.utils.constants import STRUCTURE_CATEGORIES
from omnigibson.utils.python_utils import classproperty, Serializable, Registerable, Recreatable, \
create_object_from_init_info
from omnigibson.utils.registry_utils import SerializableRegistry
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.usd_utils import CollisionAPI
from omnigibson.objects.object_base import BaseObject
from omnigibson.objects.dataset_object import DatasetObject
from omnigibson.systems.system_base import SYSTEM_REGISTRY, clear_all_systems, get_system
from omnigibson.objects.light_object import LightObject
from omnigibson.robots.robot_base import m as robot_macros
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Default texture to use for skybox
m.DEFAULT_SKYBOX_TEXTURE = f"{gm.ASSET_PATH}/models/background/sky.jpg"
# Global dicts that will contain mappings
REGISTERED_SCENES = dict()
class Scene(Serializable, Registerable, Recreatable, ABC):
"""
Base class for all Scene objects.
Contains the base functionalities for an arbitrary scene with an arbitrary set of added objects
"""
def __init__(
self,
scene_file=None,
use_floor_plane=True,
floor_plane_visible=True,
use_skybox=True,
floor_plane_color=(1.0, 1.0, 1.0),
):
"""
Args:
scene_file (None or str): If specified, full path of JSON file to load (with .json).
None results in no additional objects being loaded into the scene
use_floor_plane (bool): whether to load a flat floor plane into the simulator
floor_plane_visible (bool): whether to render the additionally added floor plane
floor_plane_color (3-array): if @floor_plane_visible is True, this determines the (R,G,B) color assigned
to the generated floor plane
"""
# Store internal variables
self.scene_file = scene_file
self._loaded = False # Whether this scene exists in the stage or not
self._initialized = False # Whether this scene has its internal handles / info initialized or not (occurs AFTER and INDEPENDENTLY from loading!)
self._registry = None
self._world_prim = None
self._initial_state = None
self._objects_info = None # Information associated with this scene
self._use_floor_plane = use_floor_plane
self._floor_plane_visible = floor_plane_visible
self._floor_plane_color = floor_plane_color
self._floor_plane = None
self._use_skybox = use_skybox
self._skybox = None
# Call super init
super().__init__()
@property
def registry(self):
"""
Returns:
SerializableRegistry: Master registry containing sub-registries of objects, robots, systems, etc.
"""
return self._registry
@property
def skybox(self):
"""
Returns:
None or LightObject: Skybox light associated with this scene, if it is used
"""
return self._skybox
@property
def floor_plane(self):
"""
Returns:
None or XFormPrim: Generated floor plane prim, if it is used
"""
return self._floor_plane
@property
def object_registry(self):
"""
Returns:
SerializableRegistry: Object registry containing all active standalone objects in the scene
"""
return self._registry(key="name", value="object_registry")
@property
def system_registry(self):
"""
Returns:
SerializableRegistry: System registry containing all systems in the scene (e.g.: water, dust, etc.)
"""
return self._registry(key="name", value="system_registry")
@property
def objects(self):
"""
Get the objects in the scene.
Returns:
list of BaseObject: Standalone object(s) that are currently in this scene
"""
return self.object_registry.objects
@property
def robots(self):
"""
Robots in the scene
Returns:
list of BaseRobot: Robot(s) that are currently in this scene
"""
return list(self.object_registry("category", robot_macros.ROBOT_CATEGORY, []))
@property
def systems(self):
"""
Systems in the scene
Returns:
list of BaseSystem: System(s) that are available to use in this scene
"""
return self.system_registry.objects
@property
def object_registry_unique_keys(self):
"""
Returns:
list of str: Keys with which to index into the object registry. These should be valid public attributes of
prims that we can use as unique IDs to reference prims, e.g., prim.prim_path, prim.name, etc.
"""
return ["name", "prim_path", "uuid"]
@property
def object_registry_group_keys(self):
"""
Returns:
list of str: Keys with which to index into the object registry. These should be valid public attributes of
prims that we can use as grouping IDs to reference prims, e.g., prim.in_rooms
"""
return ["prim_type", "states", "category", "fixed_base", "in_rooms", "abilities"]
@property
def loaded(self):
return self._loaded
@property
def initialized(self):
return self._initialized
def _load(self):
"""
Load the scene into simulator
The elements to load may include: floor, building, objects, etc.
"""
# Create collision group for fixed base objects' non root links, root links, and building structures
CollisionAPI.create_collision_group(col_group="fixed_base_nonroot_links", filter_self_collisions=False)
# Disable collision between root links of fixed base objects
CollisionAPI.create_collision_group(col_group="fixed_base_root_links", filter_self_collisions=True)
# Disable collision between building structures
CollisionAPI.create_collision_group(col_group="structures", filter_self_collisions=True)
# Disable collision between building structures and 1. fixed base objects, 2. attached objects
CollisionAPI.add_group_filter(col_group="structures", filter_group="fixed_base_nonroot_links")
CollisionAPI.add_group_filter(col_group="structures", filter_group="fixed_base_root_links")
# We just add a ground plane if requested
if self._use_floor_plane:
self.add_ground_plane(color=self._floor_plane_color, visible=self._floor_plane_visible)
# Also add skybox if requested
if self._use_skybox:
self._skybox = LightObject(
prim_path="/World/skybox",
name="skybox",
category="background",
light_type="Dome",
intensity=1500,
fixed_base=True,
)
og.sim.import_object(self._skybox, register=False)
self._skybox.color = (1.07, 0.85, 0.61)
self._skybox.texture_file_path = m.DEFAULT_SKYBOX_TEXTURE
def _load_objects_from_scene_file(self):
"""
Loads scene objects based on metadata information found in the current USD stage's scene info
(information stored in the world prim's CustomData)
"""
# Grab objects info from the scene file
with open(self.scene_file, "r") as f:
scene_info = json.load(f)
init_info = scene_info["objects_info"]["init_info"]
init_state = scene_info["state"]["object_registry"]
init_systems = scene_info["state"]["system_registry"].keys()
task_metadata = {}
try:
task_metadata = scene_info["metadata"]["task"]
except:
pass
# Create desired systems
for system_name in init_systems:
if gm.USE_GPU_DYNAMICS:
get_system(system_name)
else:
log.warning(f"System {system_name} is not supported without GPU dynamics! Skipping...")
# Iterate over all scene info, and instantiate object classes linked to the objects found on the stage
# accordingly
for obj_name, obj_info in init_info.items():
# Check whether we should load the object or not
if not self._should_load_object(obj_info=obj_info, task_metadata=task_metadata):
continue
# Create object class instance
obj = create_object_from_init_info(obj_info)
# Import into the simulator
og.sim.import_object(obj)
# Set the init pose accordingly
obj.set_position_orientation(
position=init_state[obj_name]["root_link"]["pos"],
orientation=init_state[obj_name]["root_link"]["ori"],
)
def _load_metadata_from_scene_file(self):
"""
Loads metadata from self.scene_file and stores it within the world prim's CustomData
"""
with open(self.scene_file, "r") as f:
scene_info = json.load(f)
# Write the metadata
for key, data in scene_info.get("metadata", dict()).items():
og.sim.write_metadata(key=key, data=data)
def _should_load_object(self, obj_info, task_metadata):
"""
Helper function to check whether we should load an object given its init_info. Useful for potentially filtering
objects based on, e.g., their category, size, etc.
Subclasses can implement additional logic. By default, this returns True
Args:
obj_info (dict): Dictionary of object kwargs that will be used to load the object
Returns:
bool: Whether this object should be loaded or not
"""
return True
def load(self):
"""
Load the scene into simulator
The elements to load may include: floor, building, objects, etc.
"""
# Make sure simulator is stopped
assert og.sim.is_stopped(), "Simulator should be stopped when loading this scene!"
# Do not override this function. Override _load instead.
if self._loaded:
raise ValueError("This scene is already loaded.")
# Create the registry for tracking all objects in the scene
self._registry = self._create_registry()
# Store world prim and load the scene into the simulator
self._world_prim = og.sim.world_prim
self._load()
# If we have any scene file specified, use it to load the objects within it and also update the initial state
# and metadata
if self.scene_file is not None:
self._load_objects_from_scene_file()
self._load_metadata_from_scene_file()
# We're now loaded
self._loaded = True
# Always stop the sim if we started it internally
if not og.sim.is_stopped():
og.sim.stop()
def clear(self):
"""
Clears any internal state before the scene is destroyed
"""
# Clears systems so they can be re-initialized
clear_all_systems()
def _initialize(self):
"""
Initializes state of this scene and sets up any references necessary post-loading. Should be implemented by
sub-class for extended utility
"""
pass
def initialize(self):
"""
Initializes state of this scene and sets up any references necessary post-loading. Subclasses should
implement / extend the _initialize() method.
"""
assert not self._initialized, "Scene can only be initialized once! (It is already initialized)"
self._initialize()
# Grab relevant objects info
self.update_objects_info()
self.wake_scene_objects()
self._initialized = True
# Store initial state, which may be loaded from a scene file if specified
if self.scene_file is None:
init_state = self.dump_state(serialized=False)
else:
with open(self.scene_file, "r") as f:
scene_info = json.load(f)
init_state = scene_info["state"]
og.sim.load_state(init_state, serialized=False)
self._initial_state = init_state
def _create_registry(self):
"""
Creates the internal registry used for tracking all objects
Returns:
SerializableRegistry: registry for tracking all objects
"""
# Create meta registry and populate with internal registries for robots, objects, and systems
registry = SerializableRegistry(
name="master_registry",
class_types=SerializableRegistry,
)
# Add registry for systems -- this is already created externally, so we just update it and pull it directly
registry.add(obj=SYSTEM_REGISTRY)
# Add registry for objects
registry.add(obj=SerializableRegistry(
name="object_registry",
class_types=BaseObject,
default_key="name",
unique_keys=self.object_registry_unique_keys,
group_keys=self.object_registry_group_keys,
))
return registry
def wake_scene_objects(self):
"""
Force wakeup sleeping objects
"""
for obj in self.objects:
obj.wake()
def get_objects_with_state(self, state):
"""
Get the objects with a given state in the scene.
Args:
state (BaseObjectState): state of the objects to get
Returns:
set: all objects with the given state
"""
return self.object_registry("states", state, set())
def get_objects_with_state_recursive(self, state):
"""
Get the objects with a given state and its subclasses in the scene.
Args:
state (BaseObjectState): state of the objects to get
Returns:
set: all objects with the given state and its subclasses
"""
objs = set()
states = {state}
while states:
next_states = set()
for state in states:
objs |= self.object_registry("states", state, set())
next_states |= set(state.__subclasses__())
states = next_states
return objs
def _add_object(self, obj):
"""
Add an object to the scene's internal object tracking mechanisms.
Note that if the scene is not loaded, it should load this added object alongside its other objects when
scene.load() is called. The object should also be accessible through scene.objects.
Args:
obj (BaseObject): the object to load into the simulator
"""
pass
def add_object(self, obj, register=True, _is_call_from_simulator=False):
"""
Add an object to the scene, loading it if the scene is already loaded.
Note that calling add_object to an already loaded scene should only be done by the simulator's import_object()
function.
Args:
obj (BaseObject): the object to load
register (bool): whether to track this object internally in the scene registry
_is_call_from_simulator (bool): whether the caller is the simulator. This should
**not** be set by any callers that are not the Simulator class
Returns:
Usd.Prim: the prim of the loaded object if the scene was already loaded, or None if the scene is not loaded
(in that case, the object is stored to be loaded together with the scene)
"""
# Make sure the simulator is the one calling this function
assert _is_call_from_simulator, "Use import_object() for adding objects to a simulator and scene!"
# If the scene is already loaded, we need to load this object separately. Otherwise, don't do anything now,
# let scene._load() load the object when called later on.
prim = obj.load()
# If this object is fixed and is NOT an agent, disable collisions between the fixed links of the fixed objects
# This is to account for cases such as Tiago, which has a fixed base which is needed for its global base joints
# We do this by adding the object to our tracked collision groups
if obj.fixed_base and obj.category != robot_macros.ROBOT_CATEGORY and not obj.visual_only:
# TODO: Remove structure hotfix once asset collision meshes are fixed!!
if obj.category in STRUCTURE_CATEGORIES:
CollisionAPI.add_to_collision_group(col_group="structures", prim_path=obj.prim_path)
else:
for link in obj.links.values():
CollisionAPI.add_to_collision_group(
col_group="fixed_base_root_links" if link == obj.root_link else "fixed_base_nonroot_links",
prim_path=link.prim_path,
)
# Add this object to our registry based on its type, if we want to register it
if register:
self.object_registry.add(obj)
# Run any additional scene-specific logic with the created object
self._add_object(obj)
return prim
def remove_object(self, obj):
"""
Method to remove an object from the simulator
Args:
obj (BaseObject): Object to remove
"""
# Remove from the appropriate registry if registered.
# Sometimes we don't register objects to the object registry during import_object (e.g. particle templates)
if self.object_registry.object_is_registered(obj):
self.object_registry.remove(obj)
# Remove from omni stage
obj.remove()
def reset(self):
"""
Resets this scene
"""
# Make sure the simulator is playing
assert og.sim.is_playing(), "Simulator must be playing in order to reset the scene!"
# Reset the states of all objects (including robots), including (non-)kinematic states and internal variables.
assert self._initial_state is not None
self.load_state(self._initial_state)
og.sim.step_physics()
@property
def n_floors(self):
"""
Returns:
int: Number of floors in this scene
"""
# Default is a single floor
return 1
@property
def n_objects(self):
"""
Returns:
int: number of objects
"""
return len(self.objects)
@property
def fixed_objects(self):
"""
Returns:
dict: Keyword-mapped objects that are fixed in the scene, IGNORING any robots.
Maps object name to their object class instances (DatasetObject)
"""
return {obj.name: obj for obj in self.object_registry("fixed_base", True, default_val=[]) if obj.category != robot_macros.ROBOT_CATEGORY}
def get_random_floor(self):
"""
Sample a random floor among all existing floor_heights in the scene.
Most scenes in OmniGibson only have a single floor.
Returns:
int: an integer between 0 and self.n_floors-1
"""
return np.random.randint(0, self.n_floors)
def get_random_point(self, floor=None, reference_point=None, robot=None):
"""
Sample a random point on the given floor number. If not given, sample a random floor number.
If @reference_point is given, sample a point in the same connected component as the previous point.
Args:
floor (None or int): floor number. None means the floor is randomly sampled
Warning: if @reference_point is given, @floor must be given;
otherwise, this would lead to undefined behavior
reference_point (3-array): (x,y,z) if given, sample a point in the same connected component as this point
Returns:
2-tuple:
- int: floor number. This is the sampled floor number if @floor is None
- 3-array: (x,y,z) randomly sampled point
"""
raise NotImplementedError()
def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None):
"""
Get the shortest path from one point to another point.
Args:
floor (int): floor number
source_world (2-array): (x,y) 2D source location in world reference frame (metric)
target_world (2-array): (x,y) 2D target location in world reference frame (metric)
entire_path (bool): whether to return the entire path
robot (None or BaseRobot): if given, erode the traversability map to account for the robot's size
Returns:
2-tuple:
- (N, 2) array: array of path waypoints, where N is the number of generated waypoints
- float: geodesic distance of the path
"""
raise NotImplementedError()
def get_floor_height(self, floor=0):
"""
Get the height of the given floor. Default is 0.0, since we only have a single floor
Args:
floor: an integer identifying the floor
Returns:
int: height of the given floor
"""
return 0.0
def add_ground_plane(
self,
size=None,
z_position: float = 0,
name="ground_plane",
prim_path: str = "/World/groundPlane",
static_friction: float = 0.5,
dynamic_friction: float = 0.5,
restitution: float = 0.8,
color=None,
visible=True,
):
"""
Generate a ground plane into the simulator
Args:
size (None or float): If specified, sets the (x,y) size of the generated plane
z_position (float): Z position of the generated plane
name (str): Name to assign to the generated plane
prim_path (str): Prim path for the generated plane
static_friction (float): Static friction of the generated plane
dynamic_friction (float): Dynamics friction of the generated plane
restitution (float): Restitution of the generated plane
color (None or 3-array): If specified, sets the (R,G,B) color of the generated plane
visible (bool): Whether the plane should be visible or not
"""
plane = lazy.omni.isaac.core.objects.ground_plane.GroundPlane(
prim_path=prim_path,
name=name,
z_position=z_position,
size=size,
color=None if color is None else np.array(color),
visible=visible,
# TODO: update with new PhysicsMaterial API
# static_friction=static_friction,
# dynamic_friction=dynamic_friction,
# restitution=restitution,
)
self._floor_plane = XFormPrim(
prim_path=plane.prim_path,
name=plane.name,
)
# Assign floors category to the floor plane
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=self._floor_plane.prim,
semantic_label="floors",
type_label="class",
)
def update_initial_state(self, state=None):
"""
Updates the initial state for this scene (which the scene will get reset to upon calling reset())
Args:
state (None or dict): If specified, the state to set internally. Otherwise, will set the initial state to
be the current state
"""
self._initial_state = self.dump_state(serialized=False) if state is None else state
def update_objects_info(self):
"""
Updates the scene-relevant information and saves it to the active USD. Useful for reloading a scene directly
from a saved USD in this format.
"""
# Save relevant information
# Iterate over all objects and save their init info
init_info = {obj.name: obj.get_init_info() for obj in self.object_registry.objects}
# Compose as single dictionary and store internally
self._objects_info = dict(init_info=init_info)
def get_objects_info(self):
"""
Stored information, if any, for this scene. Structure is:
"init_info":
"<obj0>": <obj0> init kw/args
...
"<robot0>": <robot0> init kw/args
...
Returns:
None or dict: If it exists, nested dictionary of relevant objects' information
"""
return self._objects_info
@property
def state_size(self):
# Total state size is the state size of our registry
return self._registry.state_size
def _dump_state(self):
# Default state for the scene is from the registry alone
return self._registry.dump_state(serialized=False)
def _load_state(self, state):
# Default state for the scene is from the registry alone
self._registry.load_state(state=state, serialized=False)
def _serialize(self, state):
# Default state for the scene is from the registry alone
return self._registry.serialize(state=state)
def _deserialize(self, state):
# Default state for the scene is from the registry alone
# We split this into two explicit steps, because the actual registry state size might dynamically change
# as we're deserializing
state_dict = self._registry.deserialize(state=state)
return state_dict, self._registry.state_size
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_SCENES
return REGISTERED_SCENES
@classmethod
def modify_init_info_for_restoring(cls, init_info):
"""
Helper function to modify a given init info for restoring a scene from corresponding scene info.
Note that this function modifies IN-PLACE!
Args:
init_info (dict): Information for this scene from @self.get_init_info()
"""
# Default is pass
pass
| 26,770 | Python | 36.652602 | 166 | 0.615241 |
StanfordVL/OmniGibson/omnigibson/examples/scenes/scene_selector.py | import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.asset_utils import get_available_g_scenes, get_available_og_scenes
from omnigibson.utils.ui_utils import choose_from_options
# Configure macros for maximum performance
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_FLATCACHE = True
gm.ENABLE_OBJECT_STATES = False
gm.ENABLE_TRANSITION_RULES = False
def main(random_selection=False, headless=False, short_exec=False):
"""
Prompts the user to select any available interactive scene and loads a turtlebot into it.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose the scene type to load
scene_options = {
"InteractiveTraversableScene": "Procedurally generated scene with fully interactive objects",
# "StaticTraversableScene": "Monolithic scene mesh with no interactive objects",
}
scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection)
# Choose the scene model to load
scenes = get_available_og_scenes() if scene_type == "InteractiveTraversableScene" else get_available_g_scenes()
scene_model = choose_from_options(options=scenes, name="scene model", random_selection=random_selection)
cfg = {
"scene": {
"type": scene_type,
"scene_model": scene_model,
},
"robots": [
{
"type": "Turtlebot",
"obs_modalities": ["scan", "rgb", "depth"],
"action_type": "continuous",
"action_normalize": True,
},
],
}
# If the scene type is interactive, also check if we want to quick load or full load the scene
if scene_type == "InteractiveTraversableScene":
load_options = {
"Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)",
"Full": "Load all interactive objects in the scene",
}
load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection)
if load_mode == "Quick":
cfg["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"]
# Load the environment
env = og.Environment(configs=cfg)
# Allow user to move camera more easily
if not gm.HEADLESS:
og.sim.enable_viewer_camera_teleoperation()
# Run a simple loop and reset periodically
max_iterations = 10 if not short_exec else 1
for j in range(max_iterations):
og.log.info("Resetting environment")
env.reset()
for i in range(100):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
og.log.info("Episode finished after {} timesteps".format(i + 1))
break
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 3,113 | Python | 36.975609 | 115 | 0.633151 |
StanfordVL/OmniGibson/omnigibson/examples/scenes/scene_tour_demo.py | import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
from omnigibson.utils.asset_utils import get_available_g_scenes, get_available_og_scenes
from omnigibson.utils.ui_utils import choose_from_options, KeyboardEventHandler
def main(random_selection=False, headless=False, short_exec=False):
"""
Prompts the user to select any available interactive scene and loads it.
It sets the camera to various poses and records images, and then generates a trajectory from a set of waypoints
and records the resulting video.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Make sure the example is not being run headless. If so, terminate early
if gm.HEADLESS:
print("This demo should only be run not headless! Exiting early.")
og.shutdown()
# Choose the scene type to load
scene_options = {
"InteractiveTraversableScene": "Procedurally generated scene with fully interactive objects",
# "StaticTraversableScene": "Monolithic scene mesh with no interactive objects",
}
scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection)
# Choose the scene model to load
scenes = get_available_og_scenes() if scene_type == "InteractiveTraversableScene" else get_available_g_scenes()
scene_model = choose_from_options(options=scenes, name="scene model", random_selection=random_selection)
print(f"scene model: {scene_model}")
cfg = {
"scene": {
"type": scene_type,
"scene_model": scene_model,
},
}
# If the scene type is interactive, also check if we want to quick load or full load the scene
if scene_type == "InteractiveTraversableScene":
load_options = {
"Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)",
"Full": "Load all interactive objects in the scene",
}
load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection)
if load_mode == "Quick":
cfg["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"]
# Load the environment
env = og.Environment(configs=cfg)
# Allow user to teleoperate the camera
cam_mover = og.sim.enable_viewer_camera_teleoperation()
# Create a keyboard event handler for generating waypoints
waypoints = []
def add_waypoint():
nonlocal waypoints
pos = cam_mover.cam.get_position()
print(f"Added waypoint at {pos}")
waypoints.append(pos)
def clear_waypoints():
nonlocal waypoints
print(f"Cleared all waypoints!")
waypoints = []
KeyboardEventHandler.initialize()
KeyboardEventHandler.add_keyboard_callback(
key=lazy.carb.input.KeyboardInput.X,
callback_fn=add_waypoint,
)
KeyboardEventHandler.add_keyboard_callback(
key=lazy.carb.input.KeyboardInput.C,
callback_fn=clear_waypoints,
)
KeyboardEventHandler.add_keyboard_callback(
key=lazy.carb.input.KeyboardInput.J,
callback_fn=lambda: cam_mover.record_trajectory_from_waypoints(
waypoints=np.array(waypoints),
per_step_distance=0.02,
fps=30,
steps_per_frame=1,
fpath=None, # This corresponds to the default path inferred from cam_mover.save_dir
),
)
KeyboardEventHandler.add_keyboard_callback(
key=lazy.carb.input.KeyboardInput.ESCAPE,
callback_fn=lambda: env.close(),
)
# Print out additional keyboard commands
print(f"\t X: Save the current camera pose as a waypoint")
print(f"\t C: Clear all waypoints")
print(f"\t J: Record the camera trajectory from the current set of waypoints")
print(f"\t ESC: Terminate the demo")
# Loop indefinitely
while True:
env.step([])
if __name__ == "__main__":
main()
| 4,049 | Python | 36.5 | 115 | 0.659669 |
StanfordVL/OmniGibson/omnigibson/examples/scenes/traversability_map_example.py | import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import omnigibson as og
from omnigibson.utils.asset_utils import get_og_scene_path, get_available_og_scenes
from omnigibson.utils.ui_utils import choose_from_options
def main(random_selection=False, headless=False, short_exec=False):
"""
Traversable map demo
Loads the floor plan and obstacles for the requested scene, and overlays them in a visual figure such that the
highlighted area reflects the traversable (free-space) area
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
scenes = get_available_og_scenes()
scene_model = choose_from_options(options=scenes, name="scene model", random_selection=random_selection)
print(f"Generating traversability map for scene {scene_model}")
trav_map_size = 200
trav_map_erosion = 2
trav_map = Image.open(os.path.join(get_og_scene_path(scene_model), "layout", "floor_trav_0.png"))
trav_map = np.array(trav_map.resize((trav_map_size, trav_map_size)))
trav_map = cv2.erode(trav_map, np.ones((trav_map_erosion, trav_map_erosion)))
if not headless:
plt.figure(figsize=(12, 12))
plt.imshow(trav_map)
plt.title(f"Traversable area of {scene_model} scene")
if not headless:
plt.show()
# Shut down omnigibson at the end
og.shutdown()
if __name__ == "__main__":
main()
| 1,469 | Python | 30.956521 | 114 | 0.676651 |
StanfordVL/OmniGibson/omnigibson/examples/learning/navigation_policy_demo.py | """
Example training code using stable-baselines3 PPO for one BEHAVIOR activity.
Note that due to the sparsity of the reward, this training code will not converge and achieve task success.
This only serves as a starting point that users can further build upon.
"""
import argparse
import os, time, cv2
import yaml
import omnigibson as og
from omnigibson import example_config_path
from omnigibson.macros import gm
from omnigibson.utils.python_utils import meets_minimum_version
try:
import gym
import torch as th
import torch.nn as nn
import tensorboard
from stable_baselines3 import PPO
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.preprocessing import maybe_transpose
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor
from stable_baselines3.common.utils import set_random_seed
from stable_baselines3.common.callbacks import CallbackList, CheckpointCallback, EvalCallback
except ModuleNotFoundError:
og.log.error("torch, stable-baselines3, or tensorboard is not installed. "
"See which packages are missing, and then run the following for any missing packages:\n"
"pip install stable-baselines3[extra]\n"
"pip install tensorboard\n"
"pip install shimmy>=0.2.1\n"
"Also, please update gym to >=0.26.1 after installing sb3: pip install gym>=0.26.1")
exit(1)
assert meets_minimum_version(gym.__version__, "0.26.1"), "Please install/update gym to version >= 0.26.1"
# We don't need object states nor transitions rules, so we disable them now, and also enable flatcache for maximum speed
gm.ENABLE_OBJECT_STATES = False
gm.ENABLE_TRANSITION_RULES = False
gm.ENABLE_FLATCACHE = True
class CustomCombinedExtractor(BaseFeaturesExtractor):
def __init__(self, observation_space: gym.spaces.Dict):
# We do not know features-dim here before going over all the items,
# so put something dummy for now. PyTorch requires calling
super().__init__(observation_space, features_dim=1)
extractors = {}
self.step_index = 0
self.img_save_dir = 'img_save_dir'
os.makedirs(self.img_save_dir, exist_ok=True)
total_concat_size = 0
feature_size = 128
for key, subspace in observation_space.spaces.items():
# For now, only keep RGB observations
if "rgb" in key:
og.log.info(f"obs {key} shape: {subspace.shape}")
n_input_channels = subspace.shape[0] # channel first
cnn = nn.Sequential(
nn.Conv2d(n_input_channels, 4, kernel_size=8, stride=4, padding=0),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(4, 8, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(8, 4, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
nn.Flatten(),
)
test_tensor = th.zeros(subspace.shape)
with th.no_grad():
n_flatten = cnn(test_tensor[None]).shape[1]
fc = nn.Sequential(nn.Linear(n_flatten, feature_size), nn.ReLU())
extractors[key] = nn.Sequential(cnn, fc)
total_concat_size += feature_size
self.extractors = nn.ModuleDict(extractors)
# Update the features dim manually
self._features_dim = total_concat_size
def forward(self, observations) -> th.Tensor:
encoded_tensor_list = []
self.step_index += 1
# self.extractors contain nn.Modules that do all the processing.
for key, extractor in self.extractors.items():
encoded_tensor_list.append(extractor(observations[key]))
feature = th.cat(encoded_tensor_list, dim=1)
return feature
def main():
# Parse args
parser = argparse.ArgumentParser(description="Train or evaluate a PPO agent in BEHAVIOR")
parser.add_argument(
"--checkpoint",
type=str,
default=None,
help="Absolute path to desired PPO checkpoint to load for evaluation",
)
parser.add_argument(
"--eval",
action="store_true",
help="If set, will evaluate the PPO agent found from --checkpoint",
)
args = parser.parse_args()
tensorboard_log_dir = os.path.join("log_dir", time.strftime("%Y%m%d-%H%M%S"))
os.makedirs(tensorboard_log_dir, exist_ok=True)
prefix = ''
seed = 0
# Load config
with open(f"{example_config_path}/turtlebot_nav.yaml", "r") as f:
cfg = yaml.load(f, Loader=yaml.FullLoader)
# Make sure flattened obs and action space is used
cfg["env"]["flatten_action_space"] = True
cfg["env"]["flatten_obs_space"] = True
# Only use RGB obs
cfg["robots"][0]["obs_modalities"] = ["rgb"]
# If we're not eval, turn off the start / goal markers so the agent doesn't see them
if not args.eval:
cfg["task"]["visualize_goal"] = False
env = og.Environment(configs=cfg)
# If we're evaluating, hide the ceilings and enable camera teleoperation so the user can easily
# visualize the rollouts dynamically
if args.eval:
ceiling = env.scene.object_registry("name", "ceilings")
ceiling.visible = False
og.sim.enable_viewer_camera_teleoperation()
# Set the set
set_random_seed(seed)
env.reset()
policy_kwargs = dict(
features_extractor_class=CustomCombinedExtractor,
)
os.makedirs(tensorboard_log_dir, exist_ok=True)
if args.eval:
assert args.checkpoint is not None, "If evaluating a PPO policy, @checkpoint argument must be specified!"
model = PPO.load(args.checkpoint)
og.log.info("Starting evaluation...")
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=50)
og.log.info("Finished evaluation!")
og.log.info(f"Mean reward: {mean_reward} +/- {std_reward:.2f}")
else:
model = PPO(
"MultiInputPolicy",
env,
verbose=1,
tensorboard_log=tensorboard_log_dir,
policy_kwargs=policy_kwargs,
n_steps=20 * 10,
batch_size=8,
device='cuda',
)
checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=tensorboard_log_dir, name_prefix=prefix)
eval_callback = EvalCallback(eval_env=env, eval_freq=1000, n_eval_episodes=20)
callback = CallbackList([checkpoint_callback, eval_callback])
og.log.debug(model.policy)
og.log.info(f"model: {model}")
og.log.info("Starting training...")
model.learn(
total_timesteps=10000000,
callback=callback,
)
og.log.info("Finished training!")
if __name__ == "__main__":
main()
| 6,953 | Python | 35.989362 | 120 | 0.627355 |
StanfordVL/OmniGibson/omnigibson/examples/simulator/sim_save_load_example.py | import os
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.utils.ui_utils import KeyboardEventHandler
TEST_OUT_PATH = "" # Define output directory here.
def main(random_selection=False, headless=False, short_exec=False):
"""
Prompts the user to select whether they are saving or loading an environment, and interactively
shows how an environment can be saved or restored.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
cfg = {
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Rs_int",
"load_object_categories": ["floors", "walls", "bed", "bottom_cabinet", "chair"],
},
"robots": [
{
"type": "Turtlebot",
"obs_modalities": ["rgb", "depth"],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set the camera to a good angle
def set_camera_pose():
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.229375, -3.40576 , 7.26143 ]),
orientation=np.array([ 0.27619733, -0.00230233, -0.00801152, 0.9610648 ]),
)
set_camera_pose()
# Give user instructions, and then loop until completed
completed = short_exec
if not short_exec and not random_selection:
# Notify user to manipulate environment until ready, then press Z to exit
print()
print("Modify the scene by SHIFT + left clicking objects and dragging them. Once finished, press Z.")
# Register callback so user knows to press space once they're done manipulating the scene
def complete_loop():
nonlocal completed
completed = True
KeyboardEventHandler.add_keyboard_callback(lazy.carb.input.KeyboardInput.Z, complete_loop)
while not completed:
env.step(np.random.uniform(-1, 1, env.robots[0].action_dim))
print("Completed scene modification, saving scene...")
save_path = os.path.join(TEST_OUT_PATH, "saved_stage.json")
og.sim.save(json_path=save_path)
print("Re-loading scene...")
og.sim.restore(json_path=save_path)
# Take a sim step and play
og.sim.step()
og.sim.play()
set_camera_pose()
# Loop until user terminates
completed = short_exec
if not short_exec and not random_selection:
# Notify user to manipulate environment until ready, then press Z to exit
print()
print("View reloaded scene. Once finished, press Z.")
# Register callback so user knows to press space once they're done manipulating the scene
KeyboardEventHandler.add_keyboard_callback(lazy.carb.input.KeyboardInput.Z, complete_loop)
while not completed:
env.step(np.zeros(env.robots[0].action_dim))
# Shutdown omnigibson at the end
og.shutdown()
if __name__ == "__main__":
main()
| 2,983 | Python | 34.105882 | 109 | 0.630238 |
StanfordVL/OmniGibson/omnigibson/examples/teleoperation/robot_teleoperate_demo.py | """
Example script for using external devices to teleoperate a robot.
"""
import omnigibson as og
from omnigibson.utils.ui_utils import choose_from_options
ROBOTS = {
"FrankaPanda": "Franka Emika Panda (default)",
"Fetch": "Mobile robot with one arm",
"Tiago": "Mobile robot with two arms",
}
TELEOP_METHOD = {
"keyboard": "Keyboard (default)",
"spacemouse": "SpaceMouse",
"oculus": "Oculus Quest",
"vision": "Human Keypoints with Camera",
}
def main():
"""
Spawn a robot in an empty scene with a breakfast table and some toys.
Users can try pick and place the toy into the basket using selected external devices and robot of their choice.
"""
from omnigibson.utils.teleop_utils import TeleopSystem
from telemoma.utils.camera_utils import RealSenseCamera
from telemoma.configs.base_config import teleop_config
robot_name = choose_from_options(options=ROBOTS, name="robot")
arm_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot arm teleop method")
if robot_name != "FrankaPanda":
base_teleop_method = choose_from_options(options=TELEOP_METHOD, name="robot base teleop method")
else:
base_teleop_method = "keyboard" # Dummy value since FrankaPanda does not have a base
# Generate teleop config
teleop_config.arm_left_controller = arm_teleop_method
teleop_config.arm_right_controller = arm_teleop_method
teleop_config.base_controller = base_teleop_method
teleop_config.interface_kwargs["keyboard"] = {"arm_speed_scaledown": 0.04}
teleop_config.interface_kwargs["spacemouse"] = {"arm_speed_scaledown": 0.04}
if arm_teleop_method == "vision" or base_teleop_method == "vision":
teleop_config.interface_kwargs["vision"] = {"camera": RealSenseCamera()}
# Create the config for generating the environment we want
scene_cfg = {"type": "Scene"}
# Add the robot we want to load
robot_cfg = {
"type": robot_name,
"obs_modalities": ["rgb"],
"action_normalize": False,
"grasping_mode": "assisted",
}
arms = ["left", "right"] if robot_name == "Tiago" else ["0"]
robot_cfg["controller_config"] = {}
for arm in arms:
robot_cfg["controller_config"][f"arm_{arm}"] = {
"name": "InverseKinematicsController",
"command_input_limits": None,
}
robot_cfg["controller_config"][f"gripper_{arm}"] = {
"name": "MultiFingerGripperController",
"command_input_limits": (0.0, 1.0),
"mode": "smooth",
}
object_cfg = [
{
"type": "DatasetObject",
"prim_path": "/World/breakfast_table",
"name": "breakfast_table",
"category": "breakfast_table",
"model": "kwmfdg",
"bounding_box": [2, 1, 0.4],
"position": [0.8, 0, 0.3],
"orientation": [0, 0, 0.707, 0.707],
},
{
"type": "DatasetObject",
"prim_path": "/World/frail",
"name": "frail",
"category": "frail",
"model": "zmjovr",
"scale": [2, 2, 2],
"position": [0.6, -0.35, 0.5],
},
{
"type": "DatasetObject",
"prim_path": "/World/toy_figure1",
"name": "toy_figure1",
"category": "toy_figure",
"model": "issvzv",
"scale": [0.75, 0.75, 0.75],
"position": [0.6, 0, 0.5],
},
{
"type": "DatasetObject",
"prim_path": "/World/toy_figure2",
"name": "toy_figure2",
"category": "toy_figure",
"model": "nncqfn",
"scale": [0.75, 0.75, 0.75],
"position": [0.6, 0.15, 0.5],
},
{
"type": "DatasetObject",
"prim_path": "/World/toy_figure3",
"name": "toy_figure3",
"category": "toy_figure",
"model": "eulekw",
"scale": [0.25, 0.25, 0.25],
"position": [0.6, 0.3, 0.5],
}
]
cfg = dict(scene=scene_cfg, robots=[robot_cfg], objects=object_cfg)
# Create the environment
env = og.Environment(configs=cfg)
env.reset()
# update viewer camera pose
og.sim.viewer_camera.set_position_orientation([-0.22, 0.99, 1.09], [-0.14, 0.47, 0.84, -0.23])
# Start teleoperation system
robot = env.robots[0]
# Initialize teleoperation system
teleop_sys = TeleopSystem(config=teleop_config, robot=robot, show_control_marker=True)
teleop_sys.start()
# main simulation loop
for _ in range(10000):
action = teleop_sys.get_action(teleop_sys.get_obs())
env.step(action)
# Shut down the environment cleanly at the end
teleop_sys.stop()
env.close()
if __name__ == "__main__":
main() | 4,868 | Python | 34.80147 | 115 | 0.566763 |
StanfordVL/OmniGibson/omnigibson/examples/teleoperation/vr_simple_demo.py | """
Example script for interacting with OmniGibson scenes with VR and BehaviorRobot.
"""
import omnigibson as og
from omnigibson.utils.teleop_utils import OVXRSystem
def main():
"""
Spawn a BehaviorRobot in Rs_int and users can navigate around and interact with the scene using VR.
"""
# Create the config for generating the environment we want
scene_cfg = {"type": "Scene"} #"InteractiveTraversableScene", "scene_model": "Rs_int"}
robot0_cfg = {
"type": "Tiago",
"controller_config": {
"gripper_left": {"command_input_limits": "default"},
"gripper_right": {"command_input_limits": "default"},
}
}
cfg = dict(scene=scene_cfg, robots=[robot0_cfg])
# Create the environment
env = og.Environment(configs=cfg)
env.reset()
# start vrsys
vrsys = OVXRSystem(robot=env.robots[0], show_control_marker=False, system="SteamVR", align_anchor_to_robot_base=True)
vrsys.start()
# set headset position to be 1m above ground and facing +x
vrsys.set_initial_transform(pos=[0, 0, 1], orn=[0, 0, 0, 1])
# main simulation loop
for _ in range(10000):
# step the VR system to get the latest data from VR runtime
vrsys.update()
# generate robot action and step the environment
action = vrsys.teleop_data_to_action()
env.step(action)
# Shut down the environment cleanly at the end
vrsys.stop()
env.close()
if __name__ == "__main__":
main() | 1,512 | Python | 33.386363 | 121 | 0.634259 |
StanfordVL/OmniGibson/omnigibson/examples/action_primitives/rs_int_example.py | import os
import yaml
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet
# Don't use GPU dynamics and use flatcache for performance boost
# gm.USE_GPU_DYNAMICS = True
# gm.ENABLE_FLATCACHE = True
def execute_controller(ctrl_gen, env):
for action in ctrl_gen:
env.step(action)
def main():
"""
Demonstrates how to use the action primitives to pick and place an object in a crowded scene.
It loads Rs_int with a Fetch robot, and the robot picks and places an apple.
"""
# Load the config
config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
# Update it to run a grocery shopping task
config["scene"]["scene_model"] = "Rs_int"
config["scene"]["not_load_object_categories"] = ["ceilings"]
config["objects"] = [
{
"type": "DatasetObject",
"name": "apple",
"category": "apple",
"model": "agveuv",
"position": [-0.3, -1.1, 0.5],
"orientation": [0, 0, 0, 1]
},
]
# Load the environment
env = og.Environment(configs=config)
scene = env.scene
robot = env.robots[0]
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)
cabinet = scene.object_registry("name", "bottom_cabinet_slgzfc_0")
apple = scene.object_registry("name", "apple")
# Grasp apple
print("Executing controller")
execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, apple), env)
print("Finished executing grasp")
# Place on cabinet
print("Executing controller")
execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_ON_TOP, cabinet), env)
print("Finished executing place")
if __name__ == "__main__":
main() | 2,119 | Python | 32.124999 | 142 | 0.674847 |
StanfordVL/OmniGibson/omnigibson/examples/action_primitives/wip_solve_behavior_task.py | import os
import yaml
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet
# Don't use GPU dynamics and use flatcache for performance boost
# gm.USE_GPU_DYNAMICS = True
# gm.ENABLE_FLATCACHE = True
def execute_controller(ctrl_gen, env):
for action in ctrl_gen:
env.step(action)
def main():
"""
Demonstrates how to use the action primitives to solve a simple BEHAVIOR-1K task.
It loads Benevolence_1_int with a Fetch robot, and the robot attempts to solve the
picking_up_trash task using a hardcoded sequence of primitives.
"""
# Load the config
config_filename = os.path.join(og.example_config_path, "fetch_primitives.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
# Update it to run a grocery shopping task
config["scene"]["scene_model"] = "Benevolence_1_int"
config["scene"]["load_task_relevant_only"] = True
config["scene"]["not_load_object_categories"] = ["ceilings"]
config["task"] = {
"type": "BehaviorTask",
"activity_name": "picking_up_trash",
"activity_definition_id": 0,
"activity_instance_id": 0,
"predefined_problem": None,
"online_object_sampling": False,
}
# Load the environment
env = og.Environment(configs=config)
scene = env.scene
robot = env.robots[0]
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False)
# Grasp can of soda
grasp_obj = scene.object_registry("name", "can_of_soda_89")
print("Executing controller")
execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.GRASP, grasp_obj), env)
print("Finished executing grasp")
# Place can in trash can
print("Executing controller")
trash = scene.object_registry("name", "trash_can_85")
execute_controller(controller.apply_ref(StarterSemanticActionPrimitiveSet.PLACE_INSIDE, trash), env)
print("Finished executing place")
if __name__ == "__main__":
main() | 2,253 | Python | 34.218749 | 142 | 0.700843 |
StanfordVL/OmniGibson/omnigibson/examples/environments/navigation_env_demo.py | import os
import yaml
import omnigibson as og
from omnigibson.utils.ui_utils import choose_from_options
def main(random_selection=False, headless=False, short_exec=False):
"""
Prompts the user to select a type of scene and loads a turtlebot into it, generating a Point-Goal navigation
task within the environment.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Load the config
config_filename = os.path.join(og.example_config_path, f"turtlebot_nav.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
# check if we want to quick load or full load the scene
load_options = {
"Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)",
"Full": "Load all interactive objects in the scene",
}
load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection)
if load_mode == "Quick":
config["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"]
# Load the environment
env = og.Environment(configs=config)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
# Run a simple loop and reset periodically
max_iterations = 10 if not short_exec else 1
for j in range(max_iterations):
og.log.info("Resetting environment")
env.reset()
for i in range(100):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
og.log.info("Episode finished after {} timesteps".format(i + 1))
break
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 1,944 | Python | 33.732142 | 112 | 0.645062 |
StanfordVL/OmniGibson/omnigibson/examples/environments/behavior_env_demo.py | import os
import yaml
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.ui_utils import choose_from_options
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Generates a BEHAVIOR Task environment in an online fashion.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Ask the user whether they want online object sampling or not
sampling_options = {
False: "Use a pre-sampled cached BEHAVIOR activity scene",
True: "Sample the BEHAVIOR activity in an online fashion",
}
should_sample = choose_from_options(options=sampling_options, name="online object sampling", random_selection=random_selection)
# Load the pre-selected configuration and set the online_sampling flag
config_filename = os.path.join(og.example_config_path, "fetch_behavior.yaml")
cfg = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
cfg["task"]["online_object_sampling"] = should_sample
# Load the environment
env = og.Environment(configs=cfg)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
# Run a simple loop and reset periodically
max_iterations = 10 if not short_exec else 1
for j in range(max_iterations):
og.log.info("Resetting environment")
env.reset()
for i in range(100):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
og.log.info("Episode finished after {} timesteps".format(i + 1))
break
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,003 | Python | 32.966101 | 131 | 0.667998 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/attachment_demo.py | import yaml
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of attachment of different parts of a shelf
"""
cfg = yaml.load(open(f"{og.example_config_path}/default_cfg.yaml", "r"), Loader=yaml.FullLoader)
# Add objects that we want to create
obj_cfgs = []
obj_cfgs.append(dict(
type="LightObject",
name="light",
light_type="Sphere",
radius=0.01,
intensity=5000,
position=[0, 0, 1.0],
))
base_z = 0.2
delta_z = 0.01
idx = 0
obj_cfgs.append(dict(
type="DatasetObject",
name="shelf_back_panel",
category="shelf_back_panel",
model="gjsnrt",
position=[0, 0, 0.01],
fixed_base=True,
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_side_left",
category="shelf_side",
model="bxfkjj",
position=[-0.4, 0, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_side_right",
category="shelf_side",
model="yujrmw",
position=[0.4, 0, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
ys = [-0.93, -0.61, -0.29, 0.03, 0.35, 0.68]
for i in range(6):
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_shelf_{i}",
category="shelf_shelf",
model="ymtnqa",
position=[0, ys[i], base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name="shelf_top_0",
category="shelf_top",
model="pfiole",
position=[0, 1.0, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_baseboard",
category="shelf_baseboard",
model="hlhneo",
position=[0, -0.97884506, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
cfg["objects"] = obj_cfgs
env = og.Environment(configs=cfg)
# Set viewer camera pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-1.689292, -2.11718198, 0.93332228]),
orientation=np.array([0.57687967, -0.22995655, -0.29022759, 0.72807814]),
)
for _ in range(10):
env.step([])
shelf_baseboard = og.sim.scene.object_registry("name", "shelf_baseboard")
shelf_baseboard.set_position_orientation([0, -0.979, 0.26], [0, 0, 0, 1])
shelf_baseboard.keep_still()
shelf_baseboard.set_linear_velocity(np.array([-0.2, 0, 0]))
shelf_side_left = og.sim.scene.object_registry("name", "shelf_side_left")
shelf_side_left.set_position_orientation([-0.4, 0.0, 0.2], [0, 0, 0, 1])
shelf_side_left.keep_still()
input("\n\nShelf parts fall to their correct poses and get automatically attached to the back panel.\n"
"You can try to drag (Shift + Left-CLICK + Drag) parts of the shelf to break it apart (you may need to zoom out and drag with a larger force).\n"
"Press [ENTER] to continue.\n")
for _ in range(5000):
og.sim.step()
og.shutdown()
if __name__ == "__main__":
main()
| 3,567 | Python | 27.31746 | 155 | 0.563218 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/heat_source_or_sink_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main():
# Create the scene config to load -- empty scene with a stove object added
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
{
"type": "DatasetObject",
"name": "stove",
"category": "stove",
"model": "qbjiva",
"bounding_box": [1.611, 0.769, 1.147],
"abilities": {
"heatSource": {"requires_toggled_on": True},
"toggleable": {},
},
"position": [0, 0, 0.61],
}
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Get reference to stove object
stove = env.scene.object_registry("name", "stove")
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.0792399, -1.30104, 1.51981]),
orientation=np.array([0.54897692, 0.00110359, 0.00168013, 0.83583509]),
)
# Make sure necessary object states are included with the stove
assert object_states.HeatSourceOrSink in stove.states
assert object_states.ToggledOn in stove.states
# Take a few steps so that visibility propagates
for _ in range(5):
env.step(np.array([]))
# Heat source is off.
print("Heat source is OFF.")
heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value()
assert not heat_source_state
# Toggle on stove, notify user
input("Heat source will now turn ON: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(True)
assert stove.states[object_states.ToggledOn].get_value()
# Need to take a step to update the state.
env.step(np.array([]))
# Heat source is on
heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value()
assert heat_source_state
for _ in range(500):
env.step(np.array([]))
# Toggle off stove, notify user
input("Heat source will now turn OFF: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(False)
assert not stove.states[object_states.ToggledOn].get_value()
for _ in range(200):
env.step(np.array([]))
# Move stove, notify user
input("Heat source is now moving: Press ENTER to continue.")
stove.set_position(np.array([0, 1.0, 0.61]))
for i in range(100):
env.step(np.array([]))
# Toggle on stove again, notify user
input("Heat source will now turn ON: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(True)
assert stove.states[object_states.ToggledOn].get_value()
for i in range(500):
env.step(np.array([]))
# Shutdown environment at end
env.close()
if __name__ == "__main__":
main()
| 3,025 | Python | 29.877551 | 80 | 0.611901 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/onfire_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of on fire state.
Loads a stove (toggled on), and two apples.
The first apple will be ignited by the stove first, then the second apple will be ignited by the first apple.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Define specific objects we want to load in with the scene directly
obj_configs = []
# Light
obj_configs.append(dict(
type="LightObject",
light_type="Sphere",
name="light",
radius=0.01,
intensity=1e8,
position=[-2.0, -2.0, 1.0],
))
# Stove
obj_configs.append(dict(
type="DatasetObject",
name="stove",
category="stove",
model="yhjzwg",
bounding_box=[1.185, 0.978, 1.387],
position=[0, 0, 0.69],
))
# 2 Apples
for i in range(2):
obj_configs.append(dict(
type="DatasetObject",
name=f"apple{i}",
category="apple",
model="agveuv",
bounding_box=[0.065, 0.065, 0.077],
position=[0, i * 0.07, 2.0],
abilities={"flammable": {"ignition_temperature": 100, "distance_threshold": 0.5}},
))
# Create the scene config to load -- empty scene with desired objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": obj_configs,
}
# Create the environment
env = og.Environment(configs=cfg)
# Get reference to relevant objects
stove = env.scene.object_registry("name", "stove")
apples = list(env.scene.object_registry("category", "apple"))
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.42246569, -0.34745704, 1.56810353]),
orientation=np.array([0.50083786, -0.10407796, -0.17482619, 0.84128772]),
)
# Let objects settle
for _ in range(10):
env.step(np.array([]))
# Turn on the stove
stove.states[object_states.ToggledOn].set_value(True)
# The first apple will be affected by the stove
apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.11, 0, 0.1]))
# The second apple will NOT be affected by the stove, but will be affected by the first apple once it's on fire.
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.32, 0, 0.1]))
steps = 0
max_steps = -1 if not short_exec else 1000
# Main recording loop
while steps != max_steps:
env.step(np.array([]))
temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples]
print(f"{'Apple temperature:':<20}", *temps, end="\r")
steps += 1
# Always close env at the end
env.close()
if __name__ == "__main__":
main()
| 3,147 | Python | 29.26923 | 119 | 0.599619 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/temperature_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of temperature change
Loads a stove, a microwave and an oven, all toggled on, and five frozen apples
The user can move the apples to see them change from frozen, to normal temperature, to cooked and burnt
This demo also shows how to load objects ToggledOn and how to set the initial temperature of an object
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Define specific objects we want to load in with the scene directly
obj_configs = []
# Light
obj_configs.append(dict(
type="LightObject",
light_type="Sphere",
name="light",
radius=0.01,
intensity=1e8,
position=[-2.0, -2.0, 1.0],
))
# Stove
obj_configs.append(dict(
type="DatasetObject",
name="stove",
category="stove",
model="yhjzwg",
bounding_box=[1.185, 0.978, 1.387],
position=[0, 0, 0.69],
))
# Microwave
obj_configs.append(dict(
type="DatasetObject",
name="microwave",
category="microwave",
model="hjjxmi",
bounding_box=[0.384, 0.256, 0.196],
position=[2.5, 0, 0.10],
))
# Oven
obj_configs.append(dict(
type="DatasetObject",
name="oven",
category="oven",
model="wuinhm",
bounding_box=[1.075, 0.926, 1.552],
position=[-1.25, 0, 0.88],
))
# Tray
obj_configs.append(dict(
type="DatasetObject",
name="tray",
category="tray",
model="xzcnjq",
bounding_box=[0.319, 0.478, 0.046],
position=[-0.25, -0.12, 1.26],
))
# Fridge
obj_configs.append(dict(
type="DatasetObject",
name="fridge",
category="fridge",
model="hivvdf",
bounding_box=[1.065, 1.149, 1.528],
abilities={
"coldSource": {
"temperature": -100.0,
"requires_inside": True,
}
},
position=[1.25, 0, 0.81],
))
# 5 Apples
for i in range(5):
obj_configs.append(dict(
type="DatasetObject",
name=f"apple{i}",
category="apple",
model="agveuv",
bounding_box=[0.065, 0.065, 0.077],
position=[0, i * 0.1, 5.0],
))
# Create the scene config to load -- empty scene with desired objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": obj_configs,
}
# Create the environment
env = og.Environment(configs=cfg)
# Get reference to relevant objects
stove = env.scene.object_registry("name", "stove")
microwave = env.scene.object_registry("name", "microwave")
oven = env.scene.object_registry("name", "oven")
tray = env.scene.object_registry("name", "tray")
fridge = env.scene.object_registry("name", "fridge")
apples = list(env.scene.object_registry("category", "apple"))
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.46938863, -3.97887141, 1.64106008]),
orientation=np.array([0.63311689, 0.00127259, 0.00155577, 0.77405359]),
)
# Let objects settle
for _ in range(25):
env.step(np.array([]))
# Turn on all scene objects
stove.states[object_states.ToggledOn].set_value(True)
microwave.states[object_states.ToggledOn].set_value(True)
oven.states[object_states.ToggledOn].set_value(True)
# Set initial temperature of the apples to -50 degrees Celsius, and move the apples to different objects
for apple in apples:
apple.states[object_states.Temperature].set_value(-50)
apples[0].states[object_states.Inside].set_value(oven, True)
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0, 0, 0.1]))
apples[2].states[object_states.OnTop].set_value(tray, True)
apples[3].states[object_states.Inside].set_value(fridge, True)
apples[4].states[object_states.Inside].set_value(microwave, True)
steps = 0
max_steps = -1 if not short_exec else 1000
# Main recording loop
locations = [f'{loc:>20}' for loc in ["Inside oven", "On stove", "On tray", "Inside fridge", "Inside microwave"]]
print()
print(f"{'Apple location:':<20}", *locations)
while steps != max_steps:
env.step(np.array([]))
temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples]
print(f"{'Apple temperature:':<20}", *temps, end="\r")
steps += 1
# Always close env at the end
env.close()
if __name__ == "__main__":
main()
| 4,976 | Python | 29.722222 | 117 | 0.590836 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/sample_kinematics_demo.py | import os
import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
from omnigibson.objects import DatasetObject
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo to use the raycasting-based sampler to load objects onTop and/or inside another
Loads a cabinet, a microwave open on top of it, and two plates with apples on top, one inside and one on top of the cabinet
Then loads a shelf and cracker boxes inside of it
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "Scene",
},
}
# Define objects we want to sample at runtime
microwave_cfg = dict(
type="DatasetObject",
name="microwave",
category="microwave",
model="hjjxmi",
bounding_box=[0.768, 0.512, 0.392],
)
cabinet_cfg = dict(
type="DatasetObject",
name="cabinet",
category="bottom_cabinet",
model="bamfsz",
bounding_box=[1.075, 1.131, 1.355],
)
plate_cfgs = [dict(
type="DatasetObject",
name=f"plate{i}",
category="plate",
model="iawoof",
bounding_box=np.array([0.20, 0.20, 0.05]),
) for i in range(2)]
apple_cfgs = [dict(
type="DatasetObject",
name=f"apple{i}",
category="apple",
model="agveuv",
bounding_box=[0.065, 0.065, 0.077],
) for i in range(4)]
shelf_cfg = dict(
type="DatasetObject",
name=f"shelf",
category="shelf",
model="pkgbcp",
bounding_box=np.array([1.0, 0.4, 2.0]),
)
box_cfgs = [dict(
type="DatasetObject",
name=f"box{i}",
category="box_of_crackers",
model="cmdigf",
bounding_box=np.array([0.2, 0.05, 0.3]),
) for i in range(5)]
# Compose objects cfg
objects_cfg = [
microwave_cfg,
cabinet_cfg,
*plate_cfgs,
*apple_cfgs,
shelf_cfg,
*box_cfgs,
]
# Update their spawn positions so they don't collide immediately
for i, obj_cfg in enumerate(objects_cfg):
obj_cfg["position"] = [100 + i, 100 + i, 100 + i]
cfg["objects"] = objects_cfg
# Create the environment
env = og.Environment(configs=cfg)
env.step([])
# Sample microwave and boxes
sample_boxes_on_shelf(env)
sample_microwave_plates_apples(env)
max_steps = 100 if short_exec else -1
step = 0
while step != max_steps:
env.step(np.array([]))
step += 1
# Always close environment at the end
env.close()
def sample_microwave_plates_apples(env):
microwave = env.scene.object_registry("name", "microwave")
cabinet = env.scene.object_registry("name", "cabinet")
plates = list(env.scene.object_registry("category", "plate"))
apples = list(env.scene.object_registry("category", "apple"))
# Place the cabinet at a pre-determined location on the floor
og.log.info("Placing cabinet on the floor...")
cabinet.set_orientation([0, 0, 0, 1.0])
env.step(np.array([]))
offset = cabinet.get_position()[2] - cabinet.aabb_center[2]
cabinet.set_position(np.array([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset)
env.step(np.array([]))
# Set microwave on top of the cabinet, open it, and step 100 times
og.log.info("Placing microwave OnTop of the cabinet...")
assert microwave.states[object_states.OnTop].set_value(cabinet, True)
assert microwave.states[object_states.Open].set_value(True)
og.log.info("Microwave placed.")
for _ in range(50):
env.step(np.array([]))
og.log.info("Placing plates")
n_apples_per_plate = int(len(apples) / len(plates))
for i, plate in enumerate(plates):
# Put the 1st plate in the microwave
if i == 0:
og.log.info(f"Placing plate {i} Inside the microwave...")
assert plate.states[object_states.Inside].set_value(microwave, True)
else:
og.log.info(f"Placing plate {i} OnTop the microwave...")
assert plate.states[object_states.OnTop].set_value(microwave, True)
og.log.info(f"Plate {i} placed.")
for _ in range(50):
env.step(np.array([]))
og.log.info(f"Placing {n_apples_per_plate} apples OnTop of the plate...")
for j in range(n_apples_per_plate):
apple_idx = i * n_apples_per_plate + j
apple = apples[apple_idx]
assert apple.states[object_states.OnTop].set_value(plate, True)
og.log.info(f"Apple {apple_idx} placed.")
for _ in range(50):
env.step(np.array([]))
def sample_boxes_on_shelf(env):
shelf = env.scene.object_registry("name", "shelf")
boxes = list(env.scene.object_registry("category", "box_of_crackers"))
# Place the shelf at a pre-determined location on the floor
og.log.info("Placing shelf on the floor...")
shelf.set_orientation([0, 0, 0, 1.0])
env.step(np.array([]))
offset = shelf.get_position()[2] - shelf.aabb_center[2]
shelf.set_position(np.array([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset)
env.step(np.array([])) # One step is needed for the object to be fully initialized
og.log.info("Shelf placed.")
for _ in range(50):
env.step(np.array([]))
og.log.info("Placing boxes...")
for i, box in enumerate(boxes):
box.states[object_states.Inside].set_value(shelf, True)
og.log.info(f"Box {i} placed.")
for _ in range(50):
env.step(np.array([]))
if __name__ == "__main__":
main()
| 5,940 | Python | 30.94086 | 127 | 0.584343 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/object_state_texture_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm, macros
from omnigibson.systems import get_system
from omnigibson.utils.constants import ParticleModifyMethod
# Make sure object states are enabled, we're using GPU dynamics, and HQ rendering is enabled
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_HQ_RENDERING = True
def main():
# Create the scene config to load -- empty scene plus a cabinet
cfg = {
"scene": {
"type": "Scene",
"floor_plane_visible": True,
},
"objects": [
{
"type": "DatasetObject",
"name": "cabinet",
"category": "bottom_cabinet",
"model": "zuwvdo",
"bounding_box": [1.595, 0.537, 1.14],
"abilities": {
"freezable": {},
"cookable": {},
"burnable": {},
"saturable": {},
"particleRemover": {
"method": ParticleModifyMethod.ADJACENCY,
"conditions": {
# For a specific particle system, this specifies what conditions are required in order for the
# particle applier / remover to apply / remover particles associated with that system
# The list should contain functions with signature condition() --> bool,
# where True means the condition is satisfied
# In this case, we only allow our cabinet to absorb water, with no conditions needed.
# This is needed for the Saturated ("saturable") state so that we can modify the texture
# according to the water.
# NOTE: This will only change color if gm.ENABLE_HQ_RENDERING and gm.USE_GPU_DYNAMICS is
# enabled!
"water": [],
},
},
},
"position": [0, 0, 0.59],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 1.7789 , -1.68822, 1.13551]),
orientation=np.array([0.57065614, 0.20331904, 0.267029 , 0.74947212]),
)
# Grab reference to object of interest
obj = env.scene.object_registry("name", "cabinet")
# Make sure all the appropriate states are in the object
assert object_states.Frozen in obj.states
assert object_states.Cooked in obj.states
assert object_states.Burnt in obj.states
assert object_states.Saturated in obj.states
def report_states():
# Make sure states are propagated before printing
for i in range(5):
env.step(np.array([]))
print("=" * 20)
print("temperature:", obj.states[object_states.Temperature].get_value())
print("obj is frozen:", obj.states[object_states.Frozen].get_value())
print("obj is cooked:", obj.states[object_states.Cooked].get_value())
print("obj is burnt:", obj.states[object_states.Burnt].get_value())
print("obj is soaked:", obj.states[object_states.Saturated].get_value(get_system("water")))
print("obj textures:", obj.get_textures())
# Report default states
print("==== Initial state ====")
report_states()
# Notify user that we're about to freeze the object, and then freeze the object
input("\nObject will be frozen. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(-50)
report_states()
# Notify user that we're about to cook the object, and then cook the object
input("\nObject will be cooked. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(100)
report_states()
# Notify user that we're about to burn the object, and then burn the object
input("\nObject will be burned. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(250)
report_states()
# Notify user that we're about to reset the object to its default state, and then reset state
input("\nObject will be reset to default state. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
obj.states[object_states.MaxTemperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
report_states()
# Notify user that we're about to soak the object, and then soak the object
input("\nObject will be saturated with water. Press ENTER to continue.")
obj.states[object_states.Saturated].set_value(get_system("water"), True)
report_states()
# Notify user that we're about to unsoak the object, and then unsoak the object
input("\nObject will be unsaturated with water. Press ENTER to continue.")
obj.states[object_states.Saturated].set_value(get_system("water"), False)
report_states()
# Close environment at the end
input("Demo completed. Press ENTER to shutdown environment.")
env.close()
if __name__ == "__main__":
main() | 5,330 | Python | 41.309523 | 122 | 0.611257 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/particle_applier_remover_demo.py | import numpy as np
import omnigibson as og
from omnigibson.object_states import Covered
from omnigibson.objects import DatasetObject
from omnigibson.macros import gm, macros
from omnigibson.systems import get_system
from omnigibson.utils.usd_utils import create_joint
from omnigibson.utils.ui_utils import choose_from_options
from omnigibson.utils.constants import ParticleModifyMethod
# Set macros for this example
macros.object_states.particle_modifier.VISUAL_PARTICLES_REMOVAL_LIMIT = 1000
macros.object_states.particle_modifier.PHYSICAL_PARTICLES_REMOVAL_LIMIT = 8000
macros.object_states.particle_modifier.MAX_VISUAL_PARTICLES_APPLIED_PER_STEP = 4
macros.object_states.particle_modifier.MAX_PHYSICAL_PARTICLES_APPLIED_PER_STEP = 40
macros.object_states.covered.MAX_VISUAL_PARTICLES = 300
# Make sure object states and GPU dynamics are enabled (GPU dynamics needed for fluids)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_HQ_RENDERING = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of ParticleApplier and ParticleRemover object states, which enable objects to either apply arbitrary
particles and remove arbitrary particles from the simulator, respectively.
Loads an empty scene with a table, and starts clean to allow particles to be applied or pre-covers the table
with particles to be removed. The ParticleApplier / ParticleRemover state is applied to an imported cloth object
and allowed to interact with the table, applying / removing particles from the table.
NOTE: The key difference between ParticleApplier/Removers and ParticleSource/Sinks is that Applier/Removers
requires contact (if using ParticleProjectionMethod.ADJACENCY) or overlap
(if using ParticleProjectionMethod.PROJECTION) in order to spawn / remove particles, and generally only spawn
particles at the contact points. ParticleSource/Sinks are special cases of ParticleApplier/Removers that
always use ParticleProjectionMethod.PROJECTION and always spawn / remove particles within their projection volume,
irregardless of overlap with other objects!
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose what configuration to load
modifier_type = choose_from_options(
options={
"particleApplier": "Demo object's ability to apply particles in the simulator",
"particleRemover": "Demo object's ability to remove particles from the simulator",
},
name="particle modifier type",
random_selection=random_selection,
)
modification_metalink = {
"particleApplier": "particleapplier_link",
"particleRemover": "particleremover_link",
}
particle_types = ["stain", "water"]
particle_type = choose_from_options(
options={name: f"{name} particles will be applied or removed from the simulator" for name in particle_types},
name="particle type",
random_selection=random_selection,
)
modification_method = {
"Adjacency": ParticleModifyMethod.ADJACENCY,
"Projection": ParticleModifyMethod.PROJECTION,
}
projection_mesh_params = {
"Adjacency": None,
"Projection": {
# Either Cone or Cylinder; shape of the projection where particles can be applied / removed
"type": "Cone",
# Size of the cone
"extents": np.array([0.1875, 0.1875, 0.375]),
},
}
method_type = choose_from_options(
options={
"Adjacency": "Close proximity to the object will be used to determine whether particles can be applied / removed",
"Projection": "A Cone or Cylinder shape protruding from the object will be used to determine whether particles can be applied / removed",
},
name="modifier method type",
random_selection=random_selection,
)
# Create the ability kwargs to pass to the object state
abilities = {
modifier_type: {
"method": modification_method[method_type],
"conditions": {
# For a specific particle system, this specifies what conditions are required in order for the
# particle applier / remover to apply / remover particles associated with that system
# The list should contain functions with signature condition() --> bool,
# where True means the condition is satisified
particle_type: [],
},
"projection_mesh_params": projection_mesh_params[method_type],
}
}
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="kwmfdg",
bounding_box=[3.402, 1.745, 1.175],
position=[0, 0, 0.98],
)
# Create the scene config to load -- empty scene with a light and table
cfg = {
"scene": {
"type": "Scene",
},
"objects": [table_cfg],
}
# Sanity check inputs: Remover + Adjacency + Fluid will not work because we are using a visual_only
# object, so contacts will not be triggered with this object
# Load the environment, then immediately stop the simulator since we need to add in the modifier object
env = og.Environment(configs=cfg)
og.sim.stop()
# Grab references to table
table = env.scene.object_registry("name", "table")
# Set the viewer camera appropriately
og.sim.viewer_camera.set_position_orientation(
position=np.array([-1.61340969, -1.79803028, 2.53167412]),
orientation=np.array([ 0.46291845, -0.12381886, -0.22679218, 0.84790371]),
)
# If we're using a projection volume, we manually add in the required metalink required in order to use the volume
modifier = DatasetObject(
name="modifier",
category="dishtowel",
model="dtfspn",
bounding_box=[0.34245, 0.46798, 0.07],
visual_only=method_type == "Projection", # Non-fluid adjacency requires the object to have collision geoms active
abilities=abilities,
)
modifier_root_link_path = f"{modifier.prim_path}/base_link"
modifier._prim = modifier._load()
if method_type == "Projection":
metalink_path = f"{modifier.prim_path}/{modification_metalink[modifier_type]}"
og.sim.stage.DefinePrim(metalink_path, "Xform")
create_joint(
prim_path=f"{modifier_root_link_path}/{modification_metalink[modifier_type]}_joint",
body0=modifier_root_link_path,
body1=metalink_path,
joint_type="FixedJoint",
enabled=True,
)
modifier._post_load()
modifier._loaded = True
og.sim.import_object(modifier)
modifier.set_position(np.array([0, 0, 5.0]))
# Play the simulator and take some environment steps to let the objects settle
og.sim.play()
for _ in range(25):
env.step(np.array([]))
# If we're removing particles, set the table's covered state to be True
if modifier_type == "particleRemover":
table.states[Covered].set_value(get_system(particle_type), True)
# Take a few steps to let particles settle
for _ in range(25):
env.step(np.array([]))
# Enable camera teleoperation for convenience
og.sim.enable_viewer_camera_teleoperation()
# Set the modifier object to be in position to modify particles
if method_type == "Projection":
# Higher z to showcase projection volume at work
z = 1.85
elif particle_type == "stain":
# Lower z needed to allow for adjacency bounding box to overlap properly
z = 1.175
else:
# Higher z needed for actual physical interaction to accommodate non-negligible particle radius
z = 1.22
modifier.keep_still()
modifier.set_position_orientation(
position=np.array([0, 0.3, z]),
orientation=np.array([0, 0, 0, 1.0]),
)
# Move object in square around table
deltas = [
[130, np.array([-0.01, 0, 0])],
[60, np.array([0, -0.01, 0])],
[130, np.array([0.01, 0, 0])],
[60, np.array([0, 0.01, 0])],
]
for t, delta in deltas:
for i in range(t):
modifier.set_position(modifier.get_position() + delta)
env.step(np.array([]))
# Always shut down environment at the end
env.close()
if __name__ == "__main__":
main()
| 8,488 | Python | 38.85446 | 149 | 0.661051 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/folded_unfolded_state_demo.py | from omnigibson.utils.constants import PrimType
from omnigibson.object_states import Folded, Unfolded
from omnigibson.macros import gm
import numpy as np
import omnigibson as og
# Make sure object states and GPU dynamics are enabled (GPU dynamics needed for cloth)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of cloth objects that can potentially be folded.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene + custom cloth object
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
{
"type": "DatasetObject",
"name": "carpet",
"category": "carpet",
"model": "ctclvd",
"bounding_box": [0.897, 0.568, 0.012],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [0, 0, 0.5],
},
{
"type": "DatasetObject",
"name": "dishtowel",
"category": "dishtowel",
"model": "dtfspn",
"bounding_box": [0.852, 1.1165, 0.174],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [1, 1, 0.5],
},
{
"type": "DatasetObject",
"name": "shirt",
"category": "t_shirt",
"model": "kvidcx",
"bounding_box": [0.472, 1.243, 1.158],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [-1, 1, 0.5],
"orientation": [0.7071, 0., 0.7071, 0.],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab object references
carpet = env.scene.object_registry("name", "carpet")
dishtowel = env.scene.object_registry("name", "dishtowel")
shirt = env.scene.object_registry("name", "shirt")
objs = [carpet, dishtowel, shirt]
# Set viewer camera
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.46382895, -2.66703958, 1.22616824]),
orientation=np.array([0.58779174, -0.00231237, -0.00318273, 0.80900271]),
)
def print_state():
folded = carpet.states[Folded].get_value()
unfolded = carpet.states[Unfolded].get_value()
info = "carpet: [folded] %d [unfolded] %d" % (folded, unfolded)
folded = dishtowel.states[Folded].get_value()
unfolded = dishtowel.states[Unfolded].get_value()
info += " || dishtowel: [folded] %d [unfolded] %d" % (folded, unfolded)
folded = shirt.states[Folded].get_value()
unfolded = shirt.states[Unfolded].get_value()
info += " || tshirt: [folded] %d [unfolded] %d" % (folded, unfolded)
print(f"{info}{' ' * (110 - len(info))}", end="\r")
for _ in range(100):
og.sim.step()
print("\nCloth state:\n")
if not short_exec:
# Fold all three cloths along the x-axis
for i in range(3):
obj = objs[i]
pos = obj.root_link.compute_particle_positions()
x_min, x_max = np.min(pos, axis=0)[0], np.max(pos, axis=0)[0]
x_extent = x_max - x_min
# Get indices for the bottom 10 percent vertices in the x-axis
indices = np.argsort(pos, axis=0)[:, 0][:(pos.shape[0] // 10)]
start = np.copy(pos[indices])
# lift up a bit
mid = np.copy(start)
mid[:, 2] += x_extent * 0.2
# move towards x_max
end = np.copy(mid)
end[:, 0] += x_extent * 0.9
increments = 25
for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]):
obj.root_link.set_particle_positions(ctrl_pts, idxs=indices)
og.sim.step()
print_state()
# Fold the t-shirt twice again along the y-axis
for direction in [-1, 1]:
obj = shirt
pos = obj.root_link.compute_particle_positions()
y_min, y_max = np.min(pos, axis=0)[1], np.max(pos, axis=0)[1]
y_extent = y_max - y_min
if direction == 1:
indices = np.argsort(pos, axis=0)[:, 1][:(pos.shape[0] // 20)]
else:
indices = np.argsort(pos, axis=0)[:, 1][-(pos.shape[0] // 20):]
start = np.copy(pos[indices])
# lift up a bit
mid = np.copy(start)
mid[:, 2] += y_extent * 0.2
# move towards y_max
end = np.copy(mid)
end[:, 1] += direction * y_extent * 0.4
increments = 25
for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]):
obj.root_link.set_particle_positions(ctrl_pts, idxs=indices)
env.step(np.array([]))
print_state()
while True:
env.step(np.array([]))
print_state()
# Shut down env at the end
print()
env.close()
if __name__ == "__main__":
main()
| 5,379 | Python | 33.487179 | 117 | 0.5066 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/overlaid_demo.py | import numpy as np
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.constants import PrimType
from omnigibson.object_states import Overlaid
# Make sure object states and GPU dynamics are enabled (GPU dynamics needed for cloth)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of cloth objects that can be overlaid on rigid objects.
Loads a carpet on top of a table. Initially Overlaid will be True because the carpet largely covers the table.
If you drag the carpet off the table or even just fold it into half, Overlaid will become False.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene + custom cloth object + custom rigid object
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
{
"type": "DatasetObject",
"name": "carpet",
"category": "carpet",
"model": "ctclvd",
"bounding_box": [1.346, 0.852, 0.017],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [0, 0, 1.0],
},
{
"type": "DatasetObject",
"name": "breakfast_table",
"category": "breakfast_table",
"model": "rjgmmy",
"bounding_box": [1.36, 1.081, 0.84],
"prim_type": PrimType.RIGID,
"position": [0, 0, 0.58],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab object references
carpet = env.scene.object_registry("name", "carpet")
breakfast_table = env.scene.object_registry("name", "breakfast_table")
# Set camera pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.88215526, -1.40086216, 2.00311063]),
orientation=np.array([0.42013364, 0.12342107, 0.25339685, 0.86258043]),
)
max_steps = 100 if short_exec else -1
steps = 0
print("\nTry dragging cloth around with CTRL + Left-Click to see the Overlaid state change:\n")
while steps != max_steps:
print(f"Overlaid {carpet.states[Overlaid].get_value(breakfast_table)} ", end="\r")
env.step(np.array([]))
# Shut down env at the end
env.close()
if __name__ == "__main__":
main()
| 2,554 | Python | 31.341772 | 114 | 0.568912 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/particle_source_sink_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
from omnigibson.utils.constants import ParticleModifyCondition
# Make sure object states are enabled and GPU dynamics are used
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_HQ_RENDERING = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of ParticleSource and ParticleSink object states, which enable objects to either spawn arbitrary
particles and remove arbitrary particles from the simulator, respectively.
Loads an empty scene with a sink, which is enabled with both the ParticleSource and ParticleSink states.
The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles,
which is then destroyed ("sunk") by the sink's particle sink located at the drain.
NOTE: The key difference between ParticleApplier/Removers and ParticleSource/Sinks is that Applier/Removers
requires contact (if using ParticleProjectionMethod.ADJACENCY) or overlap
(if using ParticleProjectionMethod.PROJECTION) in order to spawn / remove particles, and generally only spawn
particles at the contact points. ParticleSource/Sinks are special cases of ParticleApplier/Removers that
always use ParticleProjectionMethod.PROJECTION and always spawn / remove particles within their projection volume,
irregardless of overlap with other objects!
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "Scene",
}
}
# Define objects to load into the environment
sink_cfg = dict(
type="DatasetObject",
name="sink",
category="sink",
model="egwapq",
bounding_box=[2.427, 0.625, 1.2],
abilities={
"toggleable": {},
"particleSource": {
"conditions": {
"water": [(ParticleModifyCondition.TOGGLEDON, True)], # Must be toggled on for water source to be active
},
"initial_speed": 0.0, # Water merely falls out of the spout
},
"particleSink": {
"conditions": {
"water": [], # No conditions, always sinking nearby particles
},
},
},
position=[0.0, 0, 0.42],
)
cfg["objects"] = [sink_cfg]
# Create the environment!
env = og.Environment(configs=cfg)
# Set camera to ideal angle for viewing objects
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.37860532, -0.65396566, 1.4067066 ]),
orientation=np.array([0.49909498, 0.15201752, 0.24857062, 0.81609284]),
)
# Take a few steps to let the objects settle, and then turn on the sink
for _ in range(10):
env.step(np.array([])) # Empty action since no robots are in the scene
sink = env.scene.object_registry("name", "sink")
assert sink.states[object_states.ToggledOn].set_value(True)
# Take a step, and save the state
env.step(np.array([]))
initial_state = og.sim.dump_state()
# Main simulation loop.
max_steps = 1000
max_iterations = -1 if not short_exec else 1
iteration = 0
try:
while iteration != max_iterations:
# Keep stepping until table or bowl are clean, or we reach 1000 steps
steps = 0
while steps != max_steps:
steps += 1
env.step(np.array([]))
og.log.info("Max steps reached; resetting.")
# Reset to the initial state
og.sim.load_state(initial_state)
iteration += 1
finally:
# Always shut down environment at the end
env.close()
if __name__ == "__main__":
main()
| 3,985 | Python | 34.90991 | 126 | 0.627353 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/slicing_demo.py | import numpy as np
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.utils.transform_utils as T
# Make sure object states and transition rules are enabled
gm.ENABLE_OBJECT_STATES = True
gm.ENABLE_TRANSITION_RULES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of slicing an apple into two apple slices
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene with table, knife, and apple
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="rjgmmy",
bounding_box=[1.36, 1.081, 0.84],
position=[0, 0, 0.58],
)
apple_cfg = dict(
type="DatasetObject",
name="apple",
category="apple",
model="agveuv",
bounding_box=[0.098, 0.098, 0.115],
position=[0.085, 0, 0.92],
)
knife_cfg = dict(
type="DatasetObject",
name="knife",
category="table_knife",
model="lrdmpf",
bounding_box=[0.401, 0.044, 0.009],
position=[0, 0, 20.0],
)
light0_cfg = dict(
type="LightObject",
name="light0",
light_type="Sphere",
radius=0.01,
intensity=4000.0,
position=[1.217, -0.848, 1.388],
)
light1_cfg = dict(
type="LightObject",
name="light1",
light_type="Sphere",
radius=0.01,
intensity=4000.0,
position=[-1.217, 0.848, 1.388],
)
cfg = {
"scene": {
"type": "Scene",
},
"objects": [table_cfg, apple_cfg, knife_cfg, light0_cfg, light1_cfg]
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab reference to apple and knife
apple = env.scene.object_registry("name", "apple")
knife = env.scene.object_registry("name", "knife")
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.544888, -0.412084, 1.11569 ]),
orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
for _ in range(50):
env.step(np.array([]))
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + np.array([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-np.pi / 2, 0, 0]),
)
input("The knife will fall on the apple and slice it. Press [ENTER] to continue.")
# Step simulation for a bit so that apple is sliced
for i in range(1000):
env.step(np.array([]))
input("Apple has been sliced! Press [ENTER] to terminate the demo.")
# Always close environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,901 | Python | 25.87037 | 103 | 0.577732 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/heated_state_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main():
# Define object configurations for objects to load -- we want to load a light and three bowls
obj_configs = []
obj_configs.append(dict(
type="LightObject",
light_type="Sphere",
name="light",
radius=0.01,
intensity=1e8,
position=[-2.0, -2.0, 1.0],
))
for i, (scale, x) in enumerate(zip([0.5, 1.0, 2.0], [-0.6, 0, 0.8])):
obj_configs.append(dict(
type="DatasetObject",
name=f"bowl{i}",
category="bowl",
model="ajzltc",
bounding_box=np.array([0.329, 0.293, 0.168]) * scale,
abilities={"heatable": {}},
position=[x, 0, 0.2],
))
# Create the scene config to load -- empty scene with light object and bowls
cfg = {
"scene": {
"type": "Scene",
},
"objects": obj_configs,
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.182103, -2.07295 , 0.14017 ]),
orientation=np.array([0.77787037, 0.00267566, 0.00216149, 0.62841535]),
)
# Dim the skybox so we can see the bowls' steam effectively
env.scene.skybox.intensity = 100.0
# Grab reference to objects of relevance
objs = list(env.scene.object_registry("category", "bowl"))
def report_states(objs):
for obj in objs:
print("=" * 20)
print("object:", obj.name)
print("temperature:", obj.states[object_states.Temperature].get_value())
print("obj is heated:", obj.states[object_states.Heated].get_value())
# Report default states
print("==== Initial state ====")
report_states(objs)
# Notify user that we're about to heat the object
input("Objects will be heated, and steam will slowly rise. Press ENTER to continue.")
# Heated.
for obj in objs:
obj.states[object_states.Temperature].set_value(50)
env.step(np.array([]))
report_states(objs)
# Take a look at the steam effect.
# After a while, objects will be below the Steam temperature threshold.
print("==== Objects are now heated... ====")
print()
for _ in range(2000):
env.step(np.array([]))
# Also print temperatures
temps = [f"{obj.states[object_states.Temperature].get_value():>7.2f}" for obj in objs]
print(f"obj temps:", *temps, end="\r")
print()
# Objects are not heated anymore.
print("==== Objects are no longer heated... ====")
report_states(objs)
# Close environment at the end
input("Demo completed. Press ENTER to shutdown environment.")
env.close()
if __name__ == "__main__":
main()
| 2,977 | Python | 29.080808 | 97 | 0.595566 |
StanfordVL/OmniGibson/omnigibson/examples/objects/draw_bounding_box.py | import matplotlib.pyplot as plt
import numpy as np
import omnigibson as og
def main(random_selection=False, headless=False, short_exec=False):
"""
Shows how to obtain the bounding box of an articulated object.
Draws the bounding box around the loaded object, a cabinet, and writes the visualized image to disk at the
current directory named 'bbox_2d_[loose / tight]_img.png'.
NOTE: In the GUI, bounding boxes can be natively viewed by clicking on the sensor ((*)) icon at the top,
and then selecting the appropriate bounding box modalities, and clicking "Show". See:
https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/visualization.html#the-visualizer
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Specify objects to load
banana_cfg = dict(
type="DatasetObject",
name="banana",
category="banana",
model="vvyyyv",
bounding_box=[0.643, 0.224, 0.269],
position=[-0.906661, -0.545106, 0.136824],
orientation=[0, 0, 0.76040583, -0.6494482],
)
door_cfg = dict(
type="DatasetObject",
name="door",
category="door",
model="ohagsq",
bounding_box=[1.528, 0.064, 1.299],
position=[-2.0, 0, 0.70000001],
orientation=[0, 0, -0.38268343, 0.92387953],
)
# Create the scene config to load -- empty scene with a few objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": [banana_cfg, door_cfg],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=np.array([-4.62785 , -0.418575, 0.933943]),
orientation=np.array([ 0.52196595, -0.4231939 , -0.46640436, 0.5752612 ]),
)
# Add bounding boxes to camera sensor
bbox_modalities = ["bbox_3d", "bbox_2d_loose", "bbox_2d_tight"]
for bbox_modality in bbox_modalities:
cam.add_modality(bbox_modality)
# Take a few steps to let objects settle
for i in range(100):
env.step(np.array([]))
# Grab observations from viewer camera and write them to disk
obs, _ = cam.get_obs()
for bbox_modality in bbox_modalities:
# Print out each of the modalities
og.log.info(f"Observation modality {bbox_modality}:\n{obs[bbox_modality]}")
# Also write the 2d loose bounding box to disk
if "3d" not in bbox_modality:
from omnigibson.utils.deprecated_utils import colorize_bboxes
colorized_img = colorize_bboxes(bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"], num_channels=4)
fpath = f"{bbox_modality}_img.png"
plt.imsave(fpath, colorized_img)
og.log.info(f"Saving modality [{bbox_modality}] image to: {fpath}")
# Always close environment down at end
env.close()
if __name__ == "__main__":
main()
| 3,048 | Python | 33.647727 | 120 | 0.621391 |
StanfordVL/OmniGibson/omnigibson/examples/objects/highlight_objects.py | import numpy as np
import omnigibson as og
def main(random_selection=False, headless=False, short_exec=False):
"""
Highlights visually all object instances of windows and then removes the highlighting
It also demonstrates how to apply an action on all instances of objects of a given category
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Rs_int",
}
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab all window objects
windows = og.sim.scene.object_registry("category", "window")
# Step environment while toggling window highlighting
i = 0
highlighted = False
max_steps = -1 if not short_exec else 1000
while i != max_steps:
env.step(np.array([]))
if i % 50 == 0:
highlighted = not highlighted
og.log.info(f"Toggling window highlight to: {highlighted}")
for window in windows:
# Note that this property is R/W!
window.highlighted = highlighted
i += 1
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 1,359 | Python | 28.565217 | 103 | 0.595291 |
StanfordVL/OmniGibson/omnigibson/examples/objects/load_object_selector.py | import numpy as np
import omnigibson as og
from omnigibson.utils.asset_utils import (
get_all_object_categories,
get_og_avg_category_specs,
get_all_object_category_models,
)
from omnigibson.utils.ui_utils import choose_from_options
def main(random_selection=False, headless=False, short_exec=False):
"""
This demo shows how to load any scaled objects from the OG object model dataset
The user selects an object model to load
The objects can be loaded into an empty scene or an interactive scene (OG)
The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots
and executing actions
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
scene_options = ["Scene", "InteractiveTraversableScene"]
scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection)
# -- Choose the object to load --
# Select a category to load
available_obj_categories = get_all_object_categories()
obj_category = choose_from_options(options=available_obj_categories, name="object category", random_selection=random_selection)
# Select a model to load
available_obj_models = get_all_object_category_models(obj_category)
obj_model = choose_from_options(options=available_obj_models, name="object model", random_selection=random_selection)
# Load the specs of the object categories, e.g., common scaling factor
avg_category_spec = get_og_avg_category_specs()
# Create and load this object into the simulator
obj_cfg = dict(
type="DatasetObject",
name="obj",
category=obj_category,
model=obj_model,
position=[0, 0, 50.0],
)
cfg = {
"scene": {
"type": scene_type,
},
"objects": [obj_cfg],
}
if scene_type == "InteractiveTraversableScene":
cfg["scene"]["scene_model"] = "Rs_int"
# Create the environment
env = og.Environment(configs=cfg)
# Place the object so it rests on the floor
obj = env.scene.object_registry("name", "obj")
center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, obj.aabb_extent[2] / 2.0])
obj.set_position(center_offset)
# Step through the environment
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
env.step(np.array([]))
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,553 | Python | 33.986301 | 131 | 0.66275 |
StanfordVL/OmniGibson/omnigibson/examples/objects/visualize_object.py | import argparse
import numpy as np
import omnigibson as og
from omnigibson.utils.asset_utils import (
get_all_object_categories,
get_all_object_category_models,
)
from omnigibson.utils.ui_utils import choose_from_options
import omnigibson.utils.transform_utils as T
def main(random_selection=False, headless=False, short_exec=False):
"""
Visualizes object as specified by its USD path, @usd_path. If None if specified, will instead
result in an object selection from OmniGibson's object dataset
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Assuming that if random_selection=True, headless=True, short_exec=True, we are calling it from tests and we
# do not want to parse args (it would fail because the calling function is pytest "testfile.py")
usd_path = None
if not (random_selection and headless and short_exec):
parser = argparse.ArgumentParser()
parser.add_argument(
"--usd_path",
default=None,
help="USD Model to load",
)
args = parser.parse_args()
usd_path = args.usd_path
# Define objects to load
light0_cfg = dict(
type="LightObject",
light_type="Sphere",
name="sphere_light0",
radius=0.01,
intensity=1e5,
position=[-2.0, -2.0, 2.0],
)
light1_cfg = dict(
type="LightObject",
light_type="Sphere",
name="sphere_light1",
radius=0.01,
intensity=1e5,
position=[-2.0, 2.0, 2.0],
)
# Make sure we have a valid usd path
if usd_path is None:
# Select a category to load
available_obj_categories = get_all_object_categories()
obj_category = choose_from_options(options=available_obj_categories, name="object category",
random_selection=random_selection)
# Select a model to load
available_obj_models = get_all_object_category_models(obj_category)
obj_model = choose_from_options(options=available_obj_models, name="object model",
random_selection=random_selection)
kwargs = {
"type": "DatasetObject",
"category": obj_category,
"model": obj_model,
}
else:
kwargs = {
"type": "USDObject",
"usd_path": usd_path,
}
# Import the desired object
obj_cfg = dict(
**kwargs,
name="obj",
usd_path=usd_path,
visual_only=True,
position=[0, 0, 10.0],
)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "Scene",
},
"objects": [light0_cfg, light1_cfg, obj_cfg],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.00913503, -1.95750906, 1.36407314]),
orientation=np.array([0.6350064 , 0. , 0. , 0.77250687]),
)
# Grab the object references
obj = env.scene.object_registry("name", "obj")
# Standardize the scale of the object so it fits in a [1,1,1] box -- note that we have to stop the simulator
# in order to set the scale
extents = obj.aabb_extent
og.sim.stop()
obj.scale = (np.ones(3) / extents).min()
og.sim.play()
env.step(np.array([]))
# Move the object so that its center is at [0, 0, 1]
center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, 1.0])
obj.set_position(center_offset)
# Allow the user to easily move the camera around
og.sim.enable_viewer_camera_teleoperation()
# Rotate the object in place
steps_per_rotate = 360
steps_per_joint = steps_per_rotate / 10
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
z_angle = (2 * np.pi * (i % steps_per_rotate) / steps_per_rotate)
quat = T.euler2quat(np.array([0, 0, z_angle]))
pos = T.quat2mat(quat) @ center_offset
if obj.n_dof > 0:
frac = (i % steps_per_joint) / steps_per_joint
j_frac = -1.0 + 2.0 * frac if (i // steps_per_joint) % 2 == 0 else 1.0 - 2.0 * frac
obj.set_joint_positions(positions=j_frac * np.ones(obj.n_dof), normalized=True, drive=False)
obj.keep_still()
obj.set_position_orientation(position=pos, orientation=quat)
env.step(np.array([]))
# Shut down at the end
og.shutdown()
if __name__ == "__main__":
main()
| 4,641 | Python | 31.921986 | 113 | 0.587373 |
StanfordVL/OmniGibson/omnigibson/examples/renderer_settings/renderer_settings_example.py | import numpy as np
import omnigibson as og
from omnigibson.renderer_settings.renderer_settings import RendererSettings
def main(random_selection=False, headless=False, short_exec=False):
"""
Shows how to use RendererSettings class
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Specify objects to load
banana_cfg = dict(
type="DatasetObject",
name="banana",
category="banana",
model="vvyyyv",
scale=[3.0, 5.0, 2.0],
position=[-0.906661, -0.545106, 0.136824],
orientation=[0, 0, 0.76040583, -0.6494482 ],
)
door_cfg = dict(
type="DatasetObject",
name="door",
category="door",
model="ohagsq",
position=[-2.0, 0, 0.70000001],
orientation=[0, 0, -0.38268343, 0.92387953],
)
# Create the scene config to load -- empty scene with a few objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": [banana_cfg, door_cfg],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=np.array([-4.62785 , -0.418575, 0.933943]),
orientation=np.array([ 0.52196595, -0.4231939 , -0.46640436, 0.5752612 ]),
)
def steps(n):
for _ in range(n):
env.step(np.array([]))
# Take a few steps to let objects settle
steps(25)
# Create renderer settings object.
renderer_setting = RendererSettings()
# RendererSettings is a singleton.
renderer_setting2 = RendererSettings()
assert renderer_setting == renderer_setting2
# Set current renderer.
input("Setting renderer to Real-Time. Press [ENTER] to continue.")
renderer_setting.set_current_renderer("Real-Time")
assert renderer_setting.get_current_renderer() == "Real-Time"
steps(5)
input("Setting renderer to Interactive (Path Tracing). Press [ENTER] to continue.")
renderer_setting.set_current_renderer("Interactive (Path Tracing)")
assert renderer_setting.get_current_renderer() == "Interactive (Path Tracing)"
steps(5)
# Get all available settings.
print(renderer_setting.settings.keys())
input("Showcasing how to use RendererSetting APIs. Please see example script for more information. "
"Press [ENTER] to continue.")
# Set setting (2 lines below are equivalent).
renderer_setting.set_setting(path="/app/renderer/skipMaterialLoading", value=True)
renderer_setting.common_settings.materials_settings.skip_material_loading.set(True)
# Get setting (3 lines below are equivalent).
assert renderer_setting.get_setting_from_path(path="/app/renderer/skipMaterialLoading") == True
assert renderer_setting.common_settings.materials_settings.skip_material_loading.value == True
assert renderer_setting.common_settings.materials_settings.skip_material_loading.get() == True
# Reset setting (2 lines below are equivalent).
renderer_setting.reset_setting(path="/app/renderer/skipMaterialLoading")
renderer_setting.common_settings.materials_settings.skip_material_loading.reset()
assert renderer_setting.get_setting_from_path(path="/app/renderer/skipMaterialLoading") == False
# Set setting to an unallowed value using top-level method.
# Examples below will use the "top-level" setting method.
try:
renderer_setting.set_setting(path="/app/renderer/skipMaterialLoading", value="foo")
except AssertionError as e:
print(e) # All good. We got an AssertionError.
# Set setting to a value out-of-range.
try:
renderer_setting.set_setting(path="/rtx/fog/fogColorIntensity", value=0.0)
except AssertionError as e:
print(e) # All good. We got an AssertionError.
# Set unallowed setting.
try:
renderer_setting.set_setting(path="foo", value="bar")
except NotImplementedError as e:
print(e) # All good. We got a NotImplementedError.
# Set setting but the setting group is not enabled.
# Setting is successful but there will be a warning message printed.
renderer_setting.set_setting(path="/rtx/fog/fogColorIntensity", value=1.0)
# Shutdown sim
input("Completed demo. Press [ENTER] to shutdown simulation.")
og.shutdown()
if __name__ == "__main__":
main()
| 4,453 | Python | 34.919355 | 104 | 0.662924 |
StanfordVL/OmniGibson/omnigibson/examples/robots/grasping_mode_example.py | """
Example script demo'ing robot manipulation control with grasping.
"""
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.sensors import VisionSensor
from omnigibson.utils.ui_utils import choose_from_options, KeyboardRobotController
GRASPING_MODES = dict(
sticky="Sticky Mitten - Objects are magnetized when they touch the fingers and a CLOSE command is given",
assisted="Assisted Grasping - Objects are magnetized when they touch the fingers, are within the hand, and a CLOSE command is given",
physical="Physical Grasping - No additional grasping assistance applied",
)
# Don't use GPU dynamics and Use flatcache for performance boost
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_FLATCACHE = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Robot grasping mode demo with selection
Queries the user to select a type of grasping mode
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose type of grasping
grasping_mode = choose_from_options(options=GRASPING_MODES, name="grasping mode", random_selection=random_selection)
# Create environment configuration to use
scene_cfg = dict(type="Scene")
robot0_cfg = dict(
type="Fetch",
obs_modalities=["rgb"], # we're just doing a grasping demo so we don't need all observation modalities
action_type="continuous",
action_normalize=True,
grasping_mode=grasping_mode,
)
# Define objects to load
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="lcsizg",
bounding_box=[0.5, 0.5, 0.8],
fixed_base=True,
position=[0.7, -0.1, 0.6],
orientation=[0, 0, 0.707, 0.707],
)
chair_cfg = dict(
type="DatasetObject",
name="chair",
category="straight_chair",
model="amgwaw",
bounding_box=None,
fixed_base=False,
position=[0.45, 0.65, 0.425],
orientation=[0, 0, -0.9990215, -0.0442276],
)
box_cfg = dict(
type="PrimitiveObject",
name="box",
primitive_type="Cube",
rgba=[1.0, 0, 0, 1.0],
size=0.05,
position=[0.53, -0.1, 0.97],
)
# Compile config
cfg = dict(scene=scene_cfg, robots=[robot0_cfg], objects=[table_cfg, chair_cfg, box_cfg])
# Create the environment
env = og.Environment(configs=cfg)
# Reset the robot
robot = env.robots[0]
robot.set_position([0, 0, 0])
robot.reset()
robot.keep_still()
# Make the robot's camera(s) high-res
for sensor in robot.sensors.values():
if isinstance(sensor, VisionSensor):
sensor.image_height = 720
sensor.image_width = 720
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([-2.39951, 2.26469, 2.66227]),
orientation=np.array([-0.23898481, 0.48475231, 0.75464013, -0.37204802]),
)
# Create teleop controller
action_generator = KeyboardRobotController(robot=robot)
# Print out relevant keyboard info if using keyboard teleop
action_generator.print_keyboard_teleop_info()
# Other helpful user info
print("Running demo with grasping mode {}.".format(grasping_mode))
print("Press ESC to quit")
# Loop control until user quits
max_steps = -1 if not short_exec else 100
step = 0
while step != max_steps:
action = action_generator.get_random_action() if random_selection else action_generator.get_teleop_action()
for _ in range(10):
env.step(action)
step += 1
# Always shut down the environment cleanly at the end
env.close()
if __name__ == "__main__":
main()
| 3,902 | Python | 30.731707 | 137 | 0.639672 |
StanfordVL/OmniGibson/omnigibson/examples/robots/all_robots_visualizer.py | import numpy as np
import omnigibson as og
from omnigibson.robots import REGISTERED_ROBOTS
def main(random_selection=False, headless=False, short_exec=False):
"""
Robot demo
Loads all robots in an empty scene, generate random actions
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create empty scene with no robots in it initially
cfg = {
"scene": {
"type": "Scene",
}
}
env = og.Environment(configs=cfg)
# Iterate over all robots and demo their motion
for robot_name, robot_cls in REGISTERED_ROBOTS.items():
# Create and import robot
robot = robot_cls(
prim_path=f"/World/{robot_name}",
name=robot_name,
obs_modalities=[], # We're just moving robots around so don't load any observation modalities
)
og.sim.import_object(robot)
# At least one step is always needed while sim is playing for any imported object to be fully initialized
og.sim.play()
og.sim.step()
# Reset robot and make sure it's not moving
robot.reset()
robot.keep_still()
# Log information
og.log.info(f"Loaded {robot_name}")
og.log.info(f"Moving {robot_name}")
if not headless:
# Set viewer in front facing robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 2.69918369, -3.63686664, 4.57894564]),
orientation=np.array([0.39592411, 0.1348514 , 0.29286304, 0.85982 ]),
)
og.sim.enable_viewer_camera_teleoperation()
# Hold still briefly so viewer can see robot
for _ in range(100):
og.sim.step()
# Then apply random actions for a bit
for _ in range(30):
action = np.random.uniform(-1, 1, robot.action_dim)
if robot_name == "Tiago":
action[robot.base_action_idx] = np.random.uniform(-0.1, 0.1, len(robot.base_action_idx))
for _ in range(10):
env.step(action)
# Stop the simulator and remove the robot
og.sim.stop()
og.sim.remove_object(obj=robot)
# Always shut down the environment cleanly at the end
env.close()
if __name__ == "__main__":
main()
| 2,369 | Python | 30.6 | 118 | 0.57577 |
StanfordVL/OmniGibson/omnigibson/examples/robots/robot_control_example.py | """
Example script demo'ing robot control.
Options for random actions, as well as selection of robot action space
"""
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
from omnigibson.robots import REGISTERED_ROBOTS
from omnigibson.utils.ui_utils import choose_from_options, KeyboardRobotController
CONTROL_MODES = dict(
random="Use autonomous random actions (default)",
teleop="Use keyboard control",
)
SCENES = dict(
Rs_int="Realistic interactive home environment (default)",
empty="Empty environment with no objects",
)
# Don't use GPU dynamics and use flatcache for performance boost
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_FLATCACHE = True
def choose_controllers(robot, random_selection=False):
"""
For a given robot, iterates over all components of the robot, and returns the requested controller type for each
component.
:param robot: BaseRobot, robot class from which to infer relevant valid controller options
:param random_selection: bool, if the selection is random (for automatic demo execution). Default False
:return dict: Mapping from individual robot component (e.g.: base, arm, etc.) to selected controller names
"""
# Create new dict to store responses from user
controller_choices = dict()
# Grab the default controller config so we have the registry of all possible controller options
default_config = robot._default_controller_config
# Iterate over all components in robot
for component, controller_options in default_config.items():
# Select controller
options = list(sorted(controller_options.keys()))
choice = choose_from_options(
options=options, name="{} controller".format(component), random_selection=random_selection
)
# Add to user responses
controller_choices[component] = choice
return controller_choices
def main(random_selection=False, headless=False, short_exec=False):
"""
Robot control demo with selection
Queries the user to select a robot, the controllers, a scene and a type of input (random actions or teleop)
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose scene to load
scene_model = choose_from_options(options=SCENES, name="scene", random_selection=random_selection)
# Choose robot to create
robot_name = choose_from_options(
options=list(sorted(REGISTERED_ROBOTS.keys())), name="robot", random_selection=random_selection
)
scene_cfg = dict()
if scene_model == "empty":
scene_cfg["type"] = "Scene"
else:
scene_cfg["type"] = "InteractiveTraversableScene"
scene_cfg["scene_model"] = scene_model
# Add the robot we want to load
robot0_cfg = dict()
robot0_cfg["type"] = robot_name
robot0_cfg["obs_modalities"] = ["rgb", "depth", "seg_instance", "normal", "scan", "occupancy_grid"]
robot0_cfg["action_type"] = "continuous"
robot0_cfg["action_normalize"] = True
# Compile config
cfg = dict(scene=scene_cfg, robots=[robot0_cfg])
# Create the environment
env = og.Environment(configs=cfg)
# Choose robot controller to use
robot = env.robots[0]
controller_choices = choose_controllers(robot=robot, random_selection=random_selection)
# Choose control mode
if random_selection:
control_mode = "random"
else:
control_mode = choose_from_options(options=CONTROL_MODES, name="control mode")
# Update the control mode of the robot
controller_config = {component: {"name": name} for component, name in controller_choices.items()}
robot.reload_controllers(controller_config=controller_config)
# Because the controllers have been updated, we need to update the initial state so the correct controller state
# is preserved
env.scene.update_initial_state()
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([1.46949, -3.97358, 2.21529]),
orientation=np.array([0.56829048, 0.09569975, 0.13571846, 0.80589577]),
)
# Reset environment and robot
env.reset()
robot.reset()
# Create teleop controller
action_generator = KeyboardRobotController(robot=robot)
# Register custom binding to reset the environment
action_generator.register_custom_keymapping(
key=lazy.carb.input.KeyboardInput.R,
description="Reset the robot",
callback_fn=lambda: env.reset(),
)
# Print out relevant keyboard info if using keyboard teleop
if control_mode == "teleop":
action_generator.print_keyboard_teleop_info()
# Other helpful user info
print("Running demo.")
print("Press ESC to quit")
# Loop control until user quits
max_steps = -1 if not short_exec else 100
step = 0
while step != max_steps:
action = action_generator.get_random_action() if control_mode == "random" else action_generator.get_teleop_action()
env.step(action=action)
step += 1
# Always shut down the environment cleanly at the end
env.close()
if __name__ == "__main__":
main()
| 5,275 | Python | 33.038709 | 123 | 0.68872 |
StanfordVL/OmniGibson/omnigibson/examples/robots/advanced/ik_example.py | import argparse
import time
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.objects import PrimitiveObject
from omnigibson.robots import Fetch
from omnigibson.scenes import Scene
from omnigibson.utils.control_utils import IKSolver
def main(random_selection=False, headless=False, short_exec=False):
"""
Minimal example of usage of inverse kinematics solver
This example showcases how to construct your own IK functionality using omniverse's native lula library
without explicitly utilizing all of OmniGibson's class abstractions, and also showcases how to manipulate
the simulator at a lower-level than the main Environment entry point.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Assuming that if random_selection=True, headless=True, short_exec=True, we are calling it from tests and we
# do not want to parse args (it would fail because the calling function is pytest "testfile.py")
if not (random_selection and headless and short_exec):
parser = argparse.ArgumentParser()
parser.add_argument(
"--programmatic",
"-p",
dest="programmatic_pos",
action="store_true",
help="if the IK solvers should be used with the GUI or programmatically",
)
args = parser.parse_args()
programmatic_pos = args.programmatic_pos
else:
programmatic_pos = True
# Import scene and robot (Fetch)
scene_cfg = {"type": "Scene"}
# Create Fetch robot
# Note that since we only care about IK functionality, we fix the base (this also makes the robot more stable)
# (any object can also have its fixed_base attribute set to True!)
# Note that since we're going to be setting joint position targets, we also need to make sure the robot's arm joints
# (which includes the trunk) are being controlled using joint positions
robot_cfg = {
"type": "Fetch",
"fixed_base": True,
"controller_config": {
"arm_0": {
"name": "NullJointController",
"motor_type": "position",
}
}
}
cfg = dict(scene=scene_cfg, robots=[robot_cfg])
env = og.Environment(configs=cfg)
# Update the viewer camera's pose so that it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([4.32248, -5.74338, 6.85436]),
orientation=np.array([0.39592, 0.13485, 0.29286, 0.85982]),
)
robot = env.robots[0]
# Set robot base at the origin
robot.set_position_orientation(np.array([0, 0, 0]), np.array([0, 0, 0, 1]))
# At least one simulation step while the simulator is playing must occur for the robot (or in general, any object)
# to be fully initialized after it is imported into the simulator
og.sim.play()
og.sim.step()
# Make sure none of the joints are moving
robot.keep_still()
# Since this demo aims to showcase how users can directly control the robot with IK,
# we will need to disable the built-in controllers in OmniGibson
robot.control_enabled = False
# Create the IK solver -- note that we are controlling both the trunk and the arm since both are part of the
# controllable kinematic chain for the end-effector!
control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
ik_solver = IKSolver(
robot_description_path=robot.robot_arm_descriptor_yamls[robot.default_arm],
robot_urdf_path=robot.urdf_path,
reset_joint_pos=robot.get_joint_positions()[control_idx],
eef_name=robot.eef_link_names[robot.default_arm],
)
# Define a helper function for executing specific end-effector commands using the ik solver
def execute_ik(pos, quat=None, max_iter=100):
og.log.info("Querying joint configuration to current marker position")
# Grab the joint positions in order to reach the desired pose target
joint_pos = ik_solver.solve(
target_pos=pos,
target_quat=quat,
tolerance_pos=0.002,
tolerance_quat=0.01,
weight_pos=20.0,
weight_quat=0.05,
max_iterations=max_iter,
initial_joint_pos=robot.get_joint_positions()[control_idx],
)
if joint_pos is not None:
og.log.info("Solution found. Setting new arm configuration.")
robot.set_joint_positions(joint_pos, indices=control_idx, drive=True)
else:
og.log.info("EE position not reachable.")
og.sim.step()
if programmatic_pos or headless:
# Sanity check IK using pre-defined hardcoded positions
query_positions = [[1, 0, 0.8], [1, 1, 1], [0.5, 0.5, 0], [0.5, 0.5, 0.5]]
for query_pos in query_positions:
execute_ik(query_pos)
time.sleep(2)
else:
# Create a visual marker to be moved by the user, representing desired end-effector position
marker = PrimitiveObject(
prim_path=f"/World/marker",
name="marker",
primitive_type="Sphere",
radius=0.03,
visual_only=True,
rgba=[1.0, 0, 0, 1.0],
)
og.sim.import_object(marker)
# Get initial EE position and set marker to that location
command = robot.get_eef_position()
marker.set_position(command)
og.sim.step()
# Setup callbacks for grabbing keyboard inputs from omni
exit_now = False
def keyboard_event_handler(event, *args, **kwargs):
nonlocal command, exit_now
# Check if we've received a key press or repeat
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \
or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT:
if event.input == lazy.carb.input.KeyboardInput.ENTER:
# Execute the command
execute_ik(pos=command)
elif event.input == lazy.carb.input.KeyboardInput.ESCAPE:
# Quit
og.log.info("Quit.")
exit_now = True
else:
# We see if we received a valid delta command, and if so, we update our command and visualized
# marker position
delta_cmd = input_to_xyz_delta_command(inp=event.input)
if delta_cmd is not None:
command = command + delta_cmd
marker.set_position(command)
og.sim.step()
# Callback must return True if valid
return True
# Hook up the callback function with omni's user interface
appwindow = lazy.omni.appwindow.get_default_app_window()
input_interface = lazy.carb.input.acquire_input_interface()
keyboard = appwindow.get_keyboard()
sub_keyboard = input_interface.subscribe_to_keyboard_events(keyboard, keyboard_event_handler)
# Print out helpful information to the user
print_message()
# Loop until the user requests an exit
while not exit_now:
og.sim.step()
# Always shut the simulation down cleanly at the end
og.app.close()
def input_to_xyz_delta_command(inp, delta=0.01):
mapping = {
lazy.carb.input.KeyboardInput.W: np.array([delta, 0, 0]),
lazy.carb.input.KeyboardInput.S: np.array([-delta, 0, 0]),
lazy.carb.input.KeyboardInput.DOWN: np.array([0, 0, -delta]),
lazy.carb.input.KeyboardInput.UP: np.array([0, 0, delta]),
lazy.carb.input.KeyboardInput.A: np.array([0, delta, 0]),
lazy.carb.input.KeyboardInput.D: np.array([0, -delta, 0]),
}
return mapping.get(inp)
def print_message():
print("*" * 80)
print("Move the marker to a desired position to query IK and press ENTER")
print("W/S: move marker further away or closer to the robot")
print("A/D: move marker to the left or the right of the robot")
print("UP/DOWN: move marker up and down")
print("ESC: quit")
if __name__ == "__main__":
main()
| 8,259 | Python | 39.891089 | 120 | 0.623683 |
StanfordVL/OmniGibson/omnigibson/sensors/sensor_noise_base.py | from abc import ABCMeta, abstractmethod
from omnigibson.utils.python_utils import classproperty, Registerable
# Registered sensor noises
REGISTERED_SENSOR_NOISES = dict()
class BaseSensorNoise(Registerable, metaclass=ABCMeta):
"""
Base SensorNoise class.
Sensor noise-specific add_noise method is implemented in subclasses
Args:
enabled (bool): Whether this sensor should be enabled by default
"""
def __init__(self, enabled=True):
# Store whether this noise model is enabled or not
self._enabled = enabled
def __call__(self, obs):
"""
If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading. This is an
identical call to self.corrupt(...)
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
return self.corrupt(obs=obs)
def corrupt(self, obs):
"""
If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading.
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
# Run sanity check to make sure obs is in acceptable range
assert len(obs[(obs < 0.0) | (obs > 1.0)]) == 0, "sensor reading has to be between [0.0, 1.0]"
return self._corrupt(obs=obs) if self._enabled else obs
@abstractmethod
def _corrupt(self, obs):
"""
Corrupts observation @obs by adding sensor noise to sensor reading
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array
"""
raise NotImplementedError()
@property
def enabled(self):
"""
Returns:
bool: Whether this noise model is enabled or not
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
En/disables this noise model
Args:
enabled (bool): Whether this noise model should be enabled or not
"""
self._enabled = enabled
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseSensorNoise")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_SENSOR_NOISES
return REGISTERED_SENSOR_NOISES
| 2,917 | Python | 30.376344 | 115 | 0.633528 |
StanfordVL/OmniGibson/omnigibson/sensors/dropout_sensor_noise.py | import numpy as np
from omnigibson.sensors.sensor_noise_base import BaseSensorNoise
class DropoutSensorNoise(BaseSensorNoise):
"""
Naive dropout sensor noise model
Args:
dropout_prob (float): Value in [0.0, 1.0] representing fraction of a single observation to be replaced
with @dropout_value
dropout_value (float): Value in [0.0, 1.0] to replace observations selected to be dropped out
enabled (bool): Whether this sensor should be enabled by default
"""
def __init__(
self,
dropout_prob=0.05,
dropout_value=1.0,
enabled=True,
):
# Store args, and make sure values are in acceptable range
for name, val in zip(("dropout_prob", "dropout_value"), (dropout_prob, dropout_value)):
assert 0.0 <= val <= 1.0, f"{name} should be in range [0.0, 1.0], got: {val}"
self._dropout_prob = dropout_prob
self._dropout_value = dropout_value
# Run super method
super().__init__(enabled=enabled)
def _corrupt(self, obs):
# If our noise rate is 0, we just return the obs
if self._dropout_prob == 0.0:
return obs
# Corrupt with randomized dropout
valid_mask = np.random.choice(2, obs.shape, p=[self._dropout_prob, 1.0 - self._dropout_prob])
obs[valid_mask == 0] = self._dropout_value
return obs
@property
def dropout_prob(self):
"""
Returns:
float: Value in [0.0, 1.0] representing fraction of a single observation to be replaced
with self.dropout_value
"""
return self._dropout_prob
@dropout_prob.setter
def dropout_prob(self, p):
"""
Set the dropout probability for this noise model.
Args:
p (float): Value in [0.0, 1.0] representing fraction of a single observation to be replaced
with self.dropout_value
"""
assert 0.0 <= p <= 1.0, f"dropout_prob should be in range [0.0, 1.0], got: {p}"
self._dropout_prob = p
@property
def dropout_value(self):
"""
Returns:
float: Value in [0.0, 1.0] to replace observations selected to be dropped out
"""
return self._dropout_value
@dropout_value.setter
def dropout_value(self, val):
"""
Set the dropout value for this noise model.
Args:
val (float): Value in [0.0, 1.0] to replace observations selected to be dropped out
"""
assert 0.0 <= val <= 1.0, f"dropout_value should be in range [0.0, 1.0], got: {val}"
self._dropout_value = val
| 2,676 | Python | 32.049382 | 110 | 0.583707 |
StanfordVL/OmniGibson/omnigibson/sensors/__init__.py | from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.sensors.sensor_base import BaseSensor, REGISTERED_SENSORS, ALL_SENSOR_MODALITIES
from omnigibson.sensors.scan_sensor import ScanSensor
from omnigibson.sensors.vision_sensor import VisionSensor
from omnigibson.sensors.sensor_noise_base import BaseSensorNoise, REGISTERED_SENSOR_NOISES
from omnigibson.sensors.dropout_sensor_noise import DropoutSensorNoise
# Map sensor prim names to corresponding sensor classes
SENSOR_PRIMS_TO_SENSOR_CLS = {
"Lidar": ScanSensor,
"Camera": VisionSensor,
}
def create_sensor(
sensor_type,
prim_path,
name,
modalities="all",
enabled=True,
sensor_kwargs=None,
noise_type=None,
noise_kwargs=None
):
"""
Create a sensor of type @sensor_type with optional keyword args @sensor_kwargs that should be passed to the
constructor. Also, additionally send noise of type @noise_type with corresponding keyword args @noise_kwargs
that should be passed to the noise constructor.
Args:
sensor_type (str): Type of sensor to create. Should be either one of SENSOR_PRIM_TO_SENSOR.keys() or
one of REGISTERED_SENSORS (i.e.: the string name of the desired class to create)
prim_path (str): prim path of the Sensor to encapsulate or create.
name (str): Name for the sensor. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Valid options are part of
sensor.all_modalities. Default is "all", which corresponds to all modalities being used
enabled (bool): Whether this sensor should be enabled or not
sensor_kwargs (dict): Any keyword kwargs to pass to the constructor
noise_type (str): Type of sensor to create. Should be one of REGISTERED_SENSOR_NOISES
(i.e.: the string name of the desired class to create)
noise_kwargs (dict): Any keyword kwargs to pass to the constructor
Returns:
BaseSensor: Created sensor with specified params
"""
# Run basic sanity check
assert isinstance(sensor_type, str), "Inputted sensor_type must be a string!"
# Grab the requested sensor class
if sensor_type in SENSOR_PRIMS_TO_SENSOR_CLS:
sensor_cls = SENSOR_PRIMS_TO_SENSOR_CLS[sensor_type]
elif sensor_type in REGISTERED_SENSORS:
sensor_cls = REGISTERED_SENSORS[sensor_type]
else:
# This is an error, we didn't find the requested sensor ):
raise ValueError(f"No sensor found with corresponding sensor_type: {sensor_type}")
# Create the noise, and sanity check to make sure it's a valid type
noise = None
if noise_type is not None:
assert_valid_key(key=noise_type, valid_keys=REGISTERED_SENSOR_NOISES, name="sensor noise type")
noise_kwargs = dict() if noise_kwargs is None else noise_kwargs
noise = REGISTERED_SENSOR_NOISES[noise_type](**noise_kwargs)
# Create the sensor
sensor_kwargs = dict() if sensor_kwargs is None else sensor_kwargs
sensor = sensor_cls(
prim_path=prim_path,
name=name,
modalities=modalities,
enabled=enabled,
noise=noise,
**sensor_kwargs,
)
return sensor
| 3,282 | Python | 40.556962 | 112 | 0.696222 |
StanfordVL/OmniGibson/omnigibson/sensors/sensor_base.py | from abc import ABCMeta
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.python_utils import classproperty, assert_valid_key, Registerable
from omnigibson.utils.gym_utils import GymObservable
from gym.spaces import Space
# Registered sensors
REGISTERED_SENSORS = dict()
# All possible modalities across all sensors
ALL_SENSOR_MODALITIES = set()
class BaseSensor(XFormPrim, GymObservable, Registerable, metaclass=ABCMeta):
"""
Base Sensor class.
Sensor-specific get_obs method is implemented in subclasses
Args:
prim_path (str): prim path of the Sensor to encapsulate or create.
name (str): Name for the sensor. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds
to all modalities being used. Otherwise, valid options should be part of cls.all_modalities.
enabled (bool): Whether this sensor should be enabled by default
noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this sensor's prim at runtime.
"""
def __init__(
self,
prim_path,
name,
modalities="all",
enabled=True,
noise=None,
load_config=None,
):
# Store inputs (and sanity check modalities along the way)
if modalities == "all":
modalities = self.all_modalities
else:
modalities = [modalities] if isinstance(modalities, str) else modalities
for modality in modalities:
assert_valid_key(key=modality, valid_keys=self.all_modalities, name="modality")
self._modalities = set(modalities)
self._enabled = enabled
self._noise = noise
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# Sub-sensors must implement this class directly! Cannot use parent XForm class by default
raise NotImplementedError("Sensor class must implement _load!")
def _post_load(self):
# Run super first
super()._post_load()
# Set the enabled property based on the internal value
# This is done so that any subclassed sensors which require simulator specific enabling can handle this now
self.enabled = self._enabled
def get_obs(self):
# Get sensor reading, and optionally corrupt the readings with noise using self.noise if
# self.noise.enabled is True.
# Note that the returned dictionary will only be filled in if this sensor is enabled!
if not self._enabled:
return dict()
obs, info = self._get_obs()
if self._noise is not None:
for k, v in obs.items():
if k not in self.no_noise_modalities:
obs[k] = self._noise(v)
return obs, info
def _get_obs(self):
"""
Get sensor reading. Should generally be extended by subclass.
Returns:
2-tuple:
dict: Keyword-mapped observations mapping modality names to numpy arrays of arbitrary dimension
dict: Additional information about the observations.
"""
# Default is returning an empty dict
return dict(), dict()
def _load_observation_space(self):
# Fill in observation space based on mapping and active modalities
obs_space = dict()
for modality, space in self._obs_space_mapping.items():
if modality in self._modalities:
if isinstance(space, Space):
# Directly add this space
obs_space[modality] = space
else:
# Assume we are procedurally generating a box space
shape, low, high, dtype = space
obs_space[modality] = self._build_obs_box_space(shape=shape, low=low, high=high, dtype=dtype)
return obs_space
def add_modality(self, modality):
"""
Add a modality to this sensor. Must be a valid modality (one of self.all_modalities)
Args:
modality (str): Name of the modality to add to this sensor
"""
assert_valid_key(key=modality, valid_keys=self.all_modalities, name="modality")
if modality not in self._modalities:
self._modalities.add(modality)
# Update observation space
self.load_observation_space()
def remove_modality(self, modality):
"""
Remove a modality from this sensor. Must be a valid modality that is active (one of self.modalities)
Args:
modality (str): Name of the modality to remove from this sensor
"""
assert_valid_key(key=modality, valid_keys=self._modalities, name="modality")
if modality in self._modalities:
self._modalities.remove(modality)
# Update observation space
self.load_observation_space()
@property
def modalities(self):
"""
Returns:
set: Name of modalities provided by this sensor. This should correspond to all the keys provided
in self.get_obs()
"""
return self._modalities
@property
def _obs_space_mapping(self):
"""
Returns:
dict: Keyword-mapped observation space settings for each modality. For each modality in
cls.all_modalities, its name should map directly to the corresponding gym space Space for that modality
or a 4-tuple entry (shape, low, high, dtype) for procedurally generating the appropriate Box Space
for that modality
"""
raise NotImplementedError()
@classproperty
def all_modalities(cls):
"""
Returns:
set: All possible valid modalities for this sensor. Should be implemented by subclass.
"""
raise NotImplementedError()
@property
def noise(self):
"""
Returns:
None or BaseSensorNoise: Noise model to use for this sensor
"""
return self._noise
@classproperty
def no_noise_modalities(cls):
"""
Returns:
set: Modalities that should NOT be passed through noise, irregardless of whether noise is enabled or not.
This is useful for some modalities which are not exclusively numerical arrays.
"""
raise NotImplementedError()
@property
def enabled(self):
"""
Returns:
bool: Whether this sensor is enabled or not
"""
# By default, we simply return the internal value. Subclasses may need to extend this functionality,
# e.g. by disabling actual sim functionality for better computational efficiency
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Args:
enabled (bool): Whether this sensor should be enabled or not
"""
# By default, we simply store the value internally. Subclasses may need to extend this functionality,
# e.g. by disabling actual sim functionality for better computational efficiency
self._enabled = enabled
@classproperty
def sensor_type(cls):
"""
Returns:
str: Type of this sensor. By default, this is the sensor class name
"""
return cls.__name__
@classmethod
def _register_cls(cls):
global ALL_SENSOR_MODALITIES
# Run super first
super()._register_cls()
# Also store modalities from this sensor class if we're registering it
if cls.__name__ not in cls._do_not_register_classes:
ALL_SENSOR_MODALITIES.union(cls.all_modalities)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseSensor")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_SENSORS
return REGISTERED_SENSORS
| 8,385 | Python | 34.837607 | 119 | 0.620155 |
StanfordVL/OmniGibson/omnigibson/sensors/scan_sensor.py | import cv2
import numpy as np
from collections.abc import Iterable
from transforms3d.quaternions import quat2mat
import omnigibson.lazy as lazy
from omnigibson.sensors.sensor_base import BaseSensor
from omnigibson.utils.constants import OccupancyGridState
from omnigibson.utils.python_utils import classproperty
class ScanSensor(BaseSensor):
"""
General 2D LiDAR range sensor and occupancy grid sensor.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds
to all modalities being used. Otherwise, valid options should be part of cls.all_modalities.
For this scan sensor, this includes any of:
{scan, occupancy_grid}
Note that in order for "occupancy_grid" to be used, "scan" must also be included.
enabled (bool): Whether this sensor should be enabled by default
noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this sensor's prim at runtime.
min_range (float): Minimum range to sense in meters
max_range (float): Maximum range to sense in meters
horizontal_fov (float): Field of view of sensor, in degrees
vertical_fov (float): Field of view of sensor, in degrees
yaw_offset (float): Degrees for offsetting this sensors horizontal FOV.
Useful in cases where this sensor's forward direction is different than expected
horizontal_resolution (float): Degrees in between each horizontal scan hit
vertical_resolution (float): Degrees in between each vertical scan hit
rotation_rate (float): How fast the range sensor is rotating, in rotations per sec. Set to 0 for all scans
be to hit at once
draw_points (bool): Whether to draw the points hit by this sensor
draw_lines (bool): Whether to draw the lines representing the scans from this sensor
occupancy_grid_resolution (int): How many discretized nodes in the occupancy grid. This will specify the
height == width of the map
occupancy_grid_range (float): Range of the occupancy grid, in meters
occupancy_grid_inner_radius (float): Inner range of the occupancy grid that will assumed to be empty, in meters
occupancy_grid_local_link (None or XFormPrim): XForm prim that represents the "origin" of any generated
occupancy grid, e.g.: if this scan sensor is attached to a robot, then this should possibly be the base link
for that robot. If None is specified, then this will default to this own sensor's frame as the origin.
"""
def __init__(
self,
prim_path,
name,
modalities="all",
enabled=True,
noise=None,
load_config=None,
# Basic LIDAR kwargs
min_range=0.05,
max_range=10.0,
horizontal_fov=360.0,
vertical_fov=1.0,
yaw_offset=0.0,
horizontal_resolution=1.0,
vertical_resolution=1.0,
rotation_rate=0.0,
draw_points=False,
draw_lines=False,
# Occupancy Grid kwargs
occupancy_grid_resolution=128,
occupancy_grid_range=5.0,
occupancy_grid_inner_radius=0.5,
occupancy_grid_local_link=None,
):
# Store settings
self.occupancy_grid_resolution = occupancy_grid_resolution
self.occupancy_grid_range = occupancy_grid_range
self.occupancy_grid_inner_radius = int(occupancy_grid_inner_radius * occupancy_grid_resolution
/ occupancy_grid_range)
self.occupancy_grid_local_link = self if occupancy_grid_local_link is None else occupancy_grid_local_link
# Create variables that will be filled in at runtime
self._rs = None # Range sensor interface, analagous to others, e.g.: dynamic control interface
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["min_range"] = min_range
load_config["max_range"] = max_range
load_config["horizontal_fov"] = horizontal_fov
load_config["vertical_fov"] = vertical_fov
load_config["yaw_offset"] = yaw_offset
load_config["horizontal_resolution"] = horizontal_resolution
load_config["vertical_resolution"] = vertical_resolution
load_config["rotation_rate"] = rotation_rate
load_config["draw_points"] = draw_points
load_config["draw_lines"] = draw_lines
# Sanity check modalities -- if we're using occupancy_grid without scan modality, raise an error
if isinstance(modalities, Iterable) and not isinstance(modalities, str) and "occupancy_grid" in modalities:
assert "scan" in modalities, f"'scan' modality must be included in order to get occupancy_grid modality!"
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
modalities=modalities,
enabled=enabled,
noise=noise,
load_config=load_config,
)
def _load(self):
# Define a LIDAR prim at the current stage
result, lidar = lazy.omni.kit.commands.execute("RangeSensorCreateLidar", path=self._prim_path)
return lidar.GetPrim()
def _post_load(self):
# run super first
super()._post_load()
# Set all the lidar kwargs
self.min_range = self._load_config["min_range"]
self.max_range = self._load_config["max_range"]
self.horizontal_fov = self._load_config["horizontal_fov"]
self.vertical_fov = self._load_config["vertical_fov"]
self.yaw_offset = self._load_config["yaw_offset"]
self.horizontal_resolution = self._load_config["horizontal_resolution"]
self.vertical_resolution = self._load_config["vertical_resolution"]
self.rotation_rate = self._load_config["rotation_rate"]
self.draw_points = self._load_config["draw_points"]
self.draw_lines = self._load_config["draw_lines"]
def _initialize(self):
# run super first
super()._initialize()
# Initialize lidar sensor interface
self._rs = lazy.omni.isaac.range_sensor._range_sensor.acquire_lidar_sensor_interface()
@property
def _obs_space_mapping(self):
# Set the remaining modalities' values
# (obs modality, shape, low, high)
obs_space_mapping = dict(
scan=((self.n_horizontal_rays, self.n_vertical_rays), 0.0, 1.0, np.float32),
occupancy_grid=((self.occupancy_grid_resolution, self.occupancy_grid_resolution, 1), 0.0, 1.0, np.float32),
)
return obs_space_mapping
def get_local_occupancy_grid(self, scan):
"""
Get local occupancy grid based on current 1D scan
Args:
n-array: 1D LiDAR scan
Returns:
2D-array: (occupancy_grid_resolution, occupancy_grid_resolution)-sized numpy array of the local occupancy grid
"""
# Run sanity checks first
assert "occupancy_grid" in self._modalities, "Occupancy grid is not enabled for this range sensor!"
assert self.n_vertical_rays == 1, "Occupancy grid is only valid for a 1D range sensor (n_vertical_rays = 1)!"
# Grab vector of corresponding angles for each scan line
angles = np.arange(
-np.radians(self.horizontal_fov / 2),
np.radians(self.horizontal_fov / 2),
np.radians(self.horizontal_resolution),
)
# Convert into 3D unit vectors for each angle
unit_vector_laser = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angles])
# Scale unit vectors by corresponding laser scan distnaces
assert ((scan >= 0.0) & (scan <= 1.0)).all(), "scan out of valid range [0, 1]"
scan_laser = unit_vector_laser * (scan * (self.max_range - self.min_range) + self.min_range)
# Convert scans from laser frame to world frame
pos, ori = self.get_position_orientation()
scan_world = quat2mat(ori).dot(scan_laser.T).T + pos
# Convert scans from world frame to local base frame
base_pos, base_ori = self.occupancy_grid_local_link.get_position_orientation()
scan_local = quat2mat(base_ori).T.dot((scan_world - base_pos).T).T
scan_local = scan_local[:, :2]
scan_local = np.concatenate([np.array([[0, 0]]), scan_local, np.array([[0, 0]])], axis=0)
# flip y axis
scan_local[:, 1] *= -1
# Initialize occupancy grid -- default is unknown values
occupancy_grid = np.zeros((self.occupancy_grid_resolution, self.occupancy_grid_resolution)).astype(np.uint8)
occupancy_grid.fill(int(OccupancyGridState.UNKNOWN * 2.0))
# Convert local scans into the corresponding OG square it should belong to (note now all values are > 0, since
# OG ranges from [0, resolution] x [0, resolution])
scan_local_in_map = scan_local / self.occupancy_grid_range * self.occupancy_grid_resolution + \
(self.occupancy_grid_resolution / 2)
scan_local_in_map = scan_local_in_map.reshape((1, -1, 1, 2)).astype(np.int32)
# For each scan hit,
for i in range(scan_local_in_map.shape[1]):
cv2.circle(
img=occupancy_grid,
center=(scan_local_in_map[0, i, 0, 0], scan_local_in_map[0, i, 0, 1]),
radius=2,
color=int(OccupancyGridState.OBSTACLES * 2.0),
thickness=-1,
)
cv2.fillPoly(
img=occupancy_grid, pts=scan_local_in_map, color=int(OccupancyGridState.FREESPACE * 2.0), lineType=1
)
cv2.circle(
img=occupancy_grid,
center=(self.occupancy_grid_resolution // 2, self.occupancy_grid_resolution // 2),
radius=self.occupancy_grid_inner_radius,
color=int(OccupancyGridState.FREESPACE * 2.0),
thickness=-1,
)
return occupancy_grid[:, :, None].astype(np.float32) / 2.0
def _get_obs(self):
# Run super first to grab any upstream obs
obs, info = super()._get_obs()
# Add scan info (normalized to [0.0, 1.0])
if "scan" in self._modalities:
raw_scan = self._rs.get_linear_depth_data(self._prim_path)
# Sometimes get_linear_depth_data will return values that are slightly out of range, needs clipping
raw_scan = np.clip(raw_scan, self.min_range, self.max_range)
obs["scan"] = (raw_scan - self.min_range) / (self.max_range - self.min_range)
# Optionally add occupancy grid info
if "occupancy_grid" in self._modalities:
obs["occupancy_grid"] = self.get_local_occupancy_grid(scan=obs["scan"])
return obs, info
@property
def n_horizontal_rays(self):
"""
Returns:
int: Number of horizontal rays for this range sensor
"""
return int(self.horizontal_fov // self.horizontal_resolution)
@property
def n_vertical_rays(self):
"""
Returns:
int: Number of vertical rays for this range sensor
"""
return int(self.vertical_fov // self.vertical_resolution)
@property
def min_range(self):
"""
Gets this range sensor's min_range (minimum distance in meters which will register a hit)
Returns:
float: minimum range for this range sensor, in meters
"""
return self.get_attribute("minRange")
@min_range.setter
def min_range(self, val):
"""
Sets this range sensor's min_range (minimum distance in meters which will register a hit)
Args:
val (float): minimum range for this range sensor, in meters
"""
self.set_attribute("minRange", val)
@property
def max_range(self):
"""
Gets this range sensor's max_range (maximum distance in meters which will register a hit)
Returns:
float: maximum range for this range sensor, in meters
"""
return self.get_attribute("maxRange")
@max_range.setter
def max_range(self, val):
"""
Sets this range sensor's max_range (maximum distance in meters which will register a hit)
Args:
val (float): maximum range for this range sensor, in meters
"""
self.set_attribute("maxRange", val)
@property
def draw_lines(self):
"""
Gets whether range lines are drawn for this sensor
Returns:
bool: Whether range lines are drawn for this sensor
"""
return self.get_attribute("drawLines")
@draw_lines.setter
def draw_lines(self, draw):
"""
Sets whether range lines are drawn for this sensor
Args:
draw (float): Whether range lines are drawn for this sensor
"""
self.set_attribute("drawLines", draw)
@property
def draw_points(self):
"""
Gets whether range points are drawn for this sensor
Returns:
bool: Whether range points are drawn for this sensor
"""
return self.get_attribute("drawPoints")
@draw_points.setter
def draw_points(self, draw):
"""
Sets whether range points are drawn for this sensor
Args:
draw (float): Whether range points are drawn for this sensor
"""
self.set_attribute("drawPoints", draw)
@property
def horizontal_fov(self):
"""
Gets this range sensor's horizontal_fov
Returns:
float: horizontal field of view for this range sensor
"""
return self.get_attribute("horizontalFov")
@horizontal_fov.setter
def horizontal_fov(self, fov):
"""
Sets this range sensor's horizontal_fov
Args:
fov (float): horizontal field of view to set
"""
self.set_attribute("horizontalFov", fov)
@property
def horizontal_resolution(self):
"""
Gets this range sensor's horizontal_resolution (degrees in between each horizontal hit)
Returns:
float: horizontal resolution for this range sensor, in degrees
"""
return self.get_attribute("horizontalResolution")
@horizontal_resolution.setter
def horizontal_resolution(self, resolution):
"""
Sets this range sensor's horizontal_resolution (degrees in between each horizontal hit)
Args:
resolution (float): horizontal resolution to set, in degrees
"""
self.set_attribute("horizontalResolution", resolution)
@property
def vertical_fov(self):
"""
Gets this range sensor's vertical_fov
Returns:
float: vertical field of view for this range sensor
"""
return self.get_attribute("verticalFov")
@vertical_fov.setter
def vertical_fov(self, fov):
"""
Sets this range sensor's vertical_fov
Args:
fov (float): vertical field of view to set
"""
self.set_attribute("verticalFov", fov)
@property
def vertical_resolution(self):
"""
Gets this range sensor's vertical_resolution (degrees in between each vertical hit)
Returns:
float: vertical resolution for this range sensor, in degrees
"""
return self.get_attribute("verticalResolution")
@vertical_resolution.setter
def vertical_resolution(self, resolution):
"""
Sets this range sensor's vertical_resolution (degrees in between each vertical hit)
Args:
resolution (float): vertical resolution to set, in degrees
"""
self.set_attribute("verticalResolution", resolution)
@property
def yaw_offset(self):
"""
Gets this range sensor's yaw_offset (used in cases where this sensor's forward direction is different than expected)
Returns:
float: yaw offset for this range sensor in degrees
"""
return self.get_attribute("yawOffset")
@yaw_offset.setter
def yaw_offset(self, offset):
"""
Sets this range sensor's yaw_offset (used in cases where this sensor's forward direction is different than expected)
Args:
offset (float): yaw offset to set in degrees.
"""
self.set_attribute("yawOffset", offset)
@property
def rotation_rate(self):
"""
Gets this range sensor's rotation_rate, in degrees per second. Note that a 0 value corresponds to no rotation,
and all range hits are assumed to be received at the exact same time.
Returns:
float: rotation rate for this range sensor in degrees per second
"""
return self.get_attribute("rotationRate")
@rotation_rate.setter
def rotation_rate(self, rate):
"""
Sets this range sensor's rotation_rate, in degrees per second. Note that a 0 value corresponds to no rotation,
and all range hits are assumed to be received at the exact same time.
Args:
rate (float): rotation rate for this range sensor in degrees per second
"""
self.set_attribute("rotationRate", rate)
@classproperty
def all_modalities(cls):
return {"scan", "occupancy_grid"}
@classproperty
def no_noise_modalities(cls):
# Occupancy grid should have no noise
return {"occupancy_grid"}
@property
def enabled(self):
# Just use super
return super().enabled
@enabled.setter
def enabled(self, enabled):
# We must use super and additionally directly en/disable the sensor in the simulation
# Note: weird syntax below required to "extend" super class's implementation, see:
# https://stackoverflow.com/a/37663266
super(ScanSensor, self.__class__).enabled.fset(self, enabled)
self.set_attribute("enabled", enabled)
| 18,414 | Python | 37.205394 | 124 | 0.624796 |
StanfordVL/OmniGibson/omnigibson/sensors/vision_sensor.py | import numpy as np
import time
import gym
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.sensors.sensor_base import BaseSensor
from omnigibson.systems.system_base import REGISTERED_SYSTEMS
from omnigibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT, MAX_VIEWER_SIZE, semantic_class_name_to_id, semantic_class_id_to_name
from omnigibson.utils.python_utils import assert_valid_key, classproperty
from omnigibson.utils.sim_utils import set_carb_setting
from omnigibson.utils.ui_utils import dock_window
from omnigibson.utils.vision_utils import Remapper
# Duplicate of simulator's render method, used so that this can be done before simulator is created!
def render():
"""
Refreshes the Isaac Sim app rendering components including UI elements and view ports..etc.
"""
set_carb_setting(og.app._carb_settings, "/app/player/playSimulations", False)
og.app.update()
set_carb_setting(og.app._carb_settings, "/app/player/playSimulations", True)
class VisionSensor(BaseSensor):
"""
Vision sensor that handles a variety of modalities, including:
- RGB (normal)
- Depth (normal, linear)
- Normals
- Segmentation (semantic, instance)
- Optical flow
- 2D Bounding boxes (tight, loose)
- 3D Bounding boxes
- Camera state
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds
to all modalities being used. Otherwise, valid options should be part of cls.all_modalities.
For this vision sensor, this includes any of:
{rgb, depth, depth_linear, normal, seg_semantic, seg_instance, flow, bbox_2d_tight,
bbox_2d_loose, bbox_3d, camera}
enabled (bool): Whether this sensor should be enabled by default
noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this sensor's prim at runtime.
image_height (int): Height of generated images, in pixels
image_width (int): Width of generated images, in pixels
focal_length (float): Focal length to set
clipping_range (2-tuple): (min, max) viewing range of this vision sensor
viewport_name (None or str): If specified, will link this camera to the specified viewport, overriding its
current camera. Otherwise, creates a new viewport
"""
ALL_MODALITIES = (
"rgb",
"depth",
"depth_linear",
"normal",
"seg_semantic", # Semantic segmentation shows the category each pixel belongs to
"seg_instance", # Instance segmentation shows the name of the object each pixel belongs to
"seg_instance_id", # Instance ID segmentation shows the prim path of the visual mesh each pixel belongs to
"flow",
"bbox_2d_tight",
"bbox_2d_loose",
"bbox_3d",
"camera_params",
)
# Documentation for the different types of segmentation for particle systems:
# - Cloth (e.g. `dishtowel`):
# - semantic: all shows up under one semantic label (e.g. `"4207839377": "dishtowel"`)
# - instance: entire cloth shows up under one label (e.g. `"87": "dishtowel_0"`)
# - instance id: entire cloth shows up under one label (e.g. `"31": "/World/dishtowel_0/base_link_cloth"`)
# - MicroPhysicalParticleSystem - FluidSystem (e.g. `water`):
# - semantic: all shows up under one semantic label (e.g. `"3330677804": "water"`)
# - instance: all shows up under one instance label (e.g. `"21": "water"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "water"`)
# - MicroPhysicalParticleSystem - GranularSystem (e.g. `sesame seed`):
# - semantic: all shows up under one semantic label (e.g. `"2975304485": "sesame_seed"`)
# - instance: all shows up under one instance label (e.g. `"21": "sesame_seed"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "sesame_seed"`)
# - MacroPhysicalParticleSystem (e.g. `diced__carrot`):
# - semantic: all shows up under one semantic label (e.g. `"2419487146": "diced__carrot"`)
# - instance: all shows up under one instance label (e.g. `"21": "diced__carrot"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "diced__carrot"`)
# - MacroVisualParticleSystem (e.g. `stain`):
# - semantic: all shows up under one semantic label (e.g. `"884110082": "stain"`)
# - instance: all shows up under one instance label (e.g. `"21": "stain"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "stain"`)
# Persistent dictionary of sensors, mapped from prim_path to sensor
SENSORS = dict()
SEMANTIC_REMAPPER = Remapper()
INSTANCE_REMAPPER = Remapper()
INSTANCE_ID_REMAPPER = Remapper()
INSTANCE_REGISTRY = {0: "background", 1: "unlabelled"}
INSTANCE_ID_REGISTRY = {0: "background"}
def __init__(
self,
prim_path,
name,
modalities="all",
enabled=True,
noise=None,
load_config=None,
image_height=128,
image_width=128,
focal_length=17.0, # Default 17.0 since this is roughly the human eye focal length
clipping_range=(0.001, 10000000.0),
viewport_name=None,
):
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["image_height"] = image_height
load_config["image_width"] = image_width
load_config["focal_length"] = focal_length
load_config["clipping_range"] = clipping_range
load_config["viewport_name"] = viewport_name
# Create variables that will be filled in later at runtime
self._viewport = None # Viewport from which to grab data
self._annotators = None
self._render_product = None
self._RAW_SENSOR_TYPES = dict(
rgb="rgb",
depth="distance_to_camera",
depth_linear="distance_to_image_plane",
normal="normals",
# Semantic segmentation shows the category each pixel belongs to
seg_semantic="semantic_segmentation",
# Instance segmentation shows the name of the object each pixel belongs to
seg_instance="instance_segmentation",
# Instance ID segmentation shows the prim path of the visual mesh each pixel belongs to
seg_instance_id="instance_id_segmentation",
flow="motion_vectors",
bbox_2d_tight="bounding_box_2d_tight",
bbox_2d_loose="bounding_box_2d_loose",
bbox_3d="bounding_box_3d",
camera_params="camera_params",
)
assert {key for key in self._RAW_SENSOR_TYPES.keys() if key != "camera_params"} == set(self.all_modalities), \
"VisionSensor._RAW_SENSOR_TYPES must have the same keys as VisionSensor.all_modalities!"
modalities = set([modalities]) if isinstance(modalities, str) else modalities
# 1) seg_instance and seg_instance_id require seg_semantic to be enabled (for rendering particle systems)
# 2) bounding box observations require seg_semantic to be enabled (for remapping bounding box semantic IDs)
semantic_dependent_modalities = {"seg_instance", "seg_instance_id", "bbox_2d_loose", "bbox_2d_tight", "bbox_3d"}
# if any of the semantic dependent modalities are enabled, then seg_semantic must be enabled
if semantic_dependent_modalities.intersection(modalities) and "seg_semantic" not in modalities:
modalities.add("seg_semantic")
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
modalities=modalities,
enabled=enabled,
noise=noise,
load_config=load_config,
)
def _load(self):
# Define a new camera prim at the current stage
# Note that we can't use og.sim.stage here because the vision sensors get loaded first
return lazy.pxr.UsdGeom.Camera.Define(lazy.omni.isaac.core.utils.stage.get_current_stage(), self._prim_path).GetPrim()
def _post_load(self):
# run super first
super()._post_load()
# Add this sensor to the list of global sensors
self.SENSORS[self._prim_path] = self
resolution = (self._load_config["image_width"], self._load_config["image_height"])
self._render_product = lazy.omni.replicator.core.create.render_product(self._prim_path, resolution)
# Create a new viewport to link to this camera or link to a pre-existing one
viewport_name = self._load_config["viewport_name"]
if viewport_name is not None:
vp_names_to_handles = {vp.name: vp for vp in lazy.omni.kit.viewport.window.get_viewport_window_instances()}
assert_valid_key(key=viewport_name, valid_keys=vp_names_to_handles, name="viewport name")
viewport = vp_names_to_handles[viewport_name]
else:
viewport = lazy.omni.kit.viewport.utility.create_viewport_window()
# Take a render step to make sure the viewport is generated before docking it
render()
# Grab the newly created viewport and dock it to the GUI
# The first viewport is always the "main" global camera, and any additional cameras are auxiliary views
# These auxiliary views will be stacked in a single column
# Thus, the first auxiliary viewport should be generated to the left of the main dockspace, and any
# subsequent viewports should be equally spaced according to the number of pre-existing auxiliary views
n_auxiliary_sensors = len(self.SENSORS) - 1
if n_auxiliary_sensors == 1:
# This is the first auxiliary viewport, dock to the left of the main dockspace
dock_window(space=lazy.omni.ui.Workspace.get_window("DockSpace"), name=viewport.name,
location=lazy.omni.ui.DockPosition.LEFT, ratio=0.25)
elif n_auxiliary_sensors > 1:
# This is any additional auxiliary viewports, dock equally-spaced in the auxiliary column
# We also need to re-dock any prior viewports!
for i in range(2, n_auxiliary_sensors + 1):
dock_window(space=lazy.omni.ui.Workspace.get_window(f"Viewport {i - 1}"), name=f"Viewport {i}",
location=lazy.omni.ui.DockPosition.BOTTOM, ratio=(1 + n_auxiliary_sensors - i) / (2 + n_auxiliary_sensors - i))
self._viewport = viewport
# Link the camera and viewport together
self._viewport.viewport_api.set_active_camera(self._prim_path)
# Requires 3 render updates to propagate changes
for i in range(3):
render()
# Set the viewer size (requires taking one render step afterwards)
self._viewport.viewport_api.set_texture_resolution(resolution)
# Also update focal length and clipping range
self.focal_length = self._load_config["focal_length"]
self.clipping_range = self._load_config["clipping_range"]
# Requires 3 render updates to propagate changes
for i in range(3):
render()
def _initialize(self):
# Run super first
super()._initialize()
self._annotators = {modality: None for modality in self._modalities}
# Initialize sensors
self.initialize_sensors(names=self._modalities)
for _ in range(3):
render()
def initialize_sensors(self, names):
"""Initializes a raw sensor in the simulation.
Args:
names (str or list of str): Name of the raw sensor(s) to initialize.
If they are not part of self._RAW_SENSOR_TYPES' keys, we will simply pass over them
"""
names = {names} if isinstance(names, str) else set(names)
for name in names:
self._add_modality_to_backend(modality=name)
def _get_obs(self):
# Make sure we're initialized
assert self.initialized, "Cannot grab vision observations without first initializing this VisionSensor!"
# Run super first to grab any upstream obs
obs, info = super()._get_obs()
# Reorder modalities to ensure that seg_semantic is always ran before seg_instance or seg_instance_id
if "seg_semantic" in self._modalities:
reordered_modalities = ["seg_semantic"] + [modality for modality in self._modalities if modality != "seg_semantic"]
else:
reordered_modalities = self._modalities
for modality in reordered_modalities:
raw_obs = self._annotators[modality].get_data()
# Obs is either a dictionary of {"data":, ..., "info": ...} or a direct array
obs[modality] = raw_obs["data"] if isinstance(raw_obs, dict) else raw_obs
if modality == "seg_semantic":
id_to_labels = raw_obs["info"]["idToLabels"]
obs[modality], info[modality] = self._remap_semantic_segmentation(obs[modality], id_to_labels)
elif modality == "seg_instance":
id_to_labels = raw_obs["info"]["idToLabels"]
obs[modality], info[modality] = self._remap_instance_segmentation(
obs[modality], id_to_labels, obs["seg_semantic"], info["seg_semantic"], id=False)
elif modality == "seg_instance_id":
id_to_labels = raw_obs["info"]["idToLabels"]
obs[modality], info[modality] = self._remap_instance_segmentation(
obs[modality], id_to_labels, obs["seg_semantic"], info["seg_semantic"], id=True)
elif "bbox" in modality:
obs[modality] = self._remap_bounding_box_semantic_ids(obs[modality])
return obs, info
def _remap_semantic_segmentation(self, img, id_to_labels):
"""
Remap the semantic segmentation image to the class IDs defined in semantic_class_name_to_id().
Also, correct the id_to_labels input with the labels from semantic_class_name_to_id() and return it.
Args:
img (np.ndarray): Semantic segmentation image to remap
id_to_labels (dict): Dictionary of semantic IDs to class labels
Returns:
np.ndarray: Remapped semantic segmentation image
dict: Corrected id_to_labels dictionary
"""
# Preprocess id_to_labels to feed into the remapper
replicator_mapping = {}
for key, val in id_to_labels.items():
key = int(key)
replicator_mapping[key] = val["class"].lower()
if "," in replicator_mapping[key]:
# If there are multiple class names, grab the one that is a registered system
# This happens with MacroVisual particles, e.g. {"11": {"class": "breakfast_table,stain"}}
categories = [cat for cat in replicator_mapping[key].split(",") if cat in REGISTERED_SYSTEMS]
assert len(categories) == 1, "There should be exactly one category that belongs to REGISTERED_SYSTEMS"
replicator_mapping[key] = categories[0]
assert replicator_mapping[key] in semantic_class_id_to_name().values(), f"Class {val['class']} does not exist in the semantic class name to id mapping!"
assert set(np.unique(img)).issubset(set(replicator_mapping.keys())), "Semantic segmentation image does not match the original id_to_labels mapping."
return VisionSensor.SEMANTIC_REMAPPER.remap(replicator_mapping, semantic_class_id_to_name(), img)
def _remap_instance_segmentation(self, img, id_to_labels, semantic_img, semantic_labels, id=False):
"""
Remap the instance segmentation image to our own instance IDs.
Also, correct the id_to_labels input with our new labels and return it.
Args:
img (np.ndarray): Instance segmentation image to remap
id_to_labels (dict): Dictionary of instance IDs to class labels
semantic_img (np.ndarray): Semantic segmentation image to use for instance registry
semantic_labels (dict): Dictionary of semantic IDs to class labels
id (bool): Whether to remap for instance ID segmentation
Returns:
np.ndarray: Remapped instance segmentation image
dict: Corrected id_to_labels dictionary
"""
# Sometimes 0 and 1 show up in the image, but they are not in the id_to_labels mapping
id_to_labels.update({"0": "BACKGROUND"})
if not id:
id_to_labels.update({"1": "UNLABELLED"})
# Preprocess id_to_labels and update instance registry
replicator_mapping = {}
for key, value in id_to_labels.items():
key = int(key)
if value in ["BACKGROUND", "UNLABELLED"]:
value = value.lower()
else:
assert "/" in value, f"Instance segmentation (ID) label {value} is not a valid prim path!"
prim_name = value.split("/")[-1]
# Hacky way to get the particles of MacroVisual/PhysicalParticleSystem
# Remap instance segmentation and instance segmentation ID labels to system name
if "Particle" in prim_name:
category_name = prim_name.split("Particle")[0]
assert category_name in REGISTERED_SYSTEMS, f"System name {category_name} is not in the registered systems!"
value = category_name
else:
# Remap instance segmentation labels to object name
if not id:
# value is the prim path of the object
if value == "/World/groundPlane":
value = "groundPlane"
else:
obj = og.sim.scene.object_registry("prim_path", value)
# Remap instance segmentation labels from prim path to object name
assert obj is not None, f"Object with prim path {value} cannot be found in objct registry!"
value = obj.name
# Keep the instance segmentation ID labels intact (prim paths of visual meshes)
else:
pass
self._register_instance(value, id=id)
replicator_mapping[key] = value
# Handle the cases for MicroPhysicalParticleSystem (FluidSystem, GranularSystem).
# They show up in the image, but not in the info (id_to_labels).
# We identify these values, find the corresponding semantic label (system name), and add the mapping.
for key, img_idx in zip(*np.unique(img, return_index=True)):
if str(key) not in id_to_labels:
semantic_label = semantic_img.flatten()[img_idx]
assert semantic_label in semantic_labels, f"Semantic map value {semantic_label} is not in the semantic labels!"
category_name = semantic_labels[semantic_label]
if category_name in REGISTERED_SYSTEMS:
value = category_name
self._register_instance(value, id=id)
# If the category name is not in the registered systems,
# which happens because replicator sometimes returns segmentation map and id_to_labels that are not in sync,
# we will label this as "unlabelled" for now
else:
value = "unlabelled"
replicator_mapping[key] = value
registry = VisionSensor.INSTANCE_ID_REGISTRY if id else VisionSensor.INSTANCE_REGISTRY
remapper = VisionSensor.INSTANCE_ID_REMAPPER if id else VisionSensor.INSTANCE_REMAPPER
assert set(np.unique(img)).issubset(set(replicator_mapping.keys())), "Instance segmentation image does not match the original id_to_labels mapping."
return remapper.remap(replicator_mapping, registry, img)
def _register_instance(self, instance_name, id=False):
registry = VisionSensor.INSTANCE_ID_REGISTRY if id else VisionSensor.INSTANCE_REGISTRY
if instance_name not in registry.values():
registry[len(registry)] = instance_name
def _remap_bounding_box_semantic_ids(self, bboxes):
"""
Remap the semantic IDs of the bounding boxes to our own semantic IDs.
Args:
bboxes (list of dict): List of bounding boxes to remap
Returns:
list of dict: Remapped list of bounding boxes
"""
for bbox in bboxes:
bbox["semanticId"] = VisionSensor.SEMANTIC_REMAPPER.remap_bbox(bbox["semanticId"])
return bboxes
def add_modality(self, modality):
# Check if we already have this modality (if so, no need to initialize it explicitly)
should_initialize = modality not in self._modalities
# Run super
super().add_modality(modality=modality)
# We also need to initialize this new modality
if should_initialize:
self.initialize_sensors(names=modality)
def remove_modality(self, modality):
# Check if we don't have this modality (if not, no need to remove it explicitly)
should_remove = modality in self._modalities
# Run super
super().remove_modality(modality=modality)
if should_remove:
self._remove_modality_from_backend(modality=modality)
def _add_modality_to_backend(self, modality):
"""
Helper function to add specified modality @modality to the omniverse Replicator backend so that its data is
generated during get_obs()
Args:
modality (str): Name of the modality to add to the Replicator backend
"""
if self._annotators.get(modality, None) is None:
self._annotators[modality] = lazy.omni.replicator.core.AnnotatorRegistry.get_annotator(self._RAW_SENSOR_TYPES[modality])
self._annotators[modality].attach([self._render_product])
def _remove_modality_from_backend(self, modality):
"""
Helper function to remove specified modality @modality from the omniverse Replicator backend so that its data is
no longer generated during get_obs()
Args:
modality (str): Name of the modality to remove from the Replicator backend
"""
if self._annotators.get(modality, None) is not None:
self._annotators[modality].detach([self._render_product])
self._annotators[modality] = None
def remove(self):
# Remove from global sensors dictionary
self.SENSORS.pop(self._prim_path)
# Remove viewport
self._viewport.destroy()
# Run super
super().remove()
@property
def camera_parameters(self):
"""
Returns a dictionary of keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor.
The returned dictionary includes the following keys and their corresponding data types:
- "cameraAperture": np.ndarray (float32) - Camera aperture dimensions.
- "cameraApertureOffset": np.ndarray (float32) - Offset of the camera aperture.
- "cameraFisheyeLensP": np.ndarray (float32) - Fisheye lens P parameter.
- "cameraFisheyeLensS": np.ndarray (float32) - Fisheye lens S parameter.
- "cameraFisheyeMaxFOV": float - Maximum field of view for fisheye lens.
- "cameraFisheyeNominalHeight": int - Nominal height for fisheye lens.
- "cameraFisheyeNominalWidth": int - Nominal width for fisheye lens.
- "cameraFisheyeOpticalCentre": np.ndarray (float32) - Optical center for fisheye lens.
- "cameraFisheyePolynomial": np.ndarray (float32) - Polynomial parameters for fisheye lens distortion.
- "cameraFocalLength": float - Focal length of the camera.
- "cameraFocusDistance": float - Focus distance of the camera.
- "cameraFStop": float - F-stop value of the camera.
- "cameraModel": str - Camera model identifier.
- "cameraNearFar": np.ndarray (float32) - Near and far plane distances.
- "cameraProjection": np.ndarray (float32) - Camera projection matrix.
- "cameraViewTransform": np.ndarray (float32) - Camera view transformation matrix.
- "metersPerSceneUnit": float - Scale factor from scene units to meters.
- "renderProductResolution": np.ndarray (int32) - Resolution of the rendered product.
Returns:
dict: Keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor.
"""
# Add the camera params modality if it doesn't already exist
if "camera_params" not in self._annotators:
self.initialize_sensors(names="camera_params")
# Requires 3 render updates for camera params annotator to decome active
for _ in range(3):
render()
# Grab and return the parameters
return self._annotators["camera_params"].get_data()
@property
def viewer_visibility(self):
"""
Returns:
bool: Whether the viewer is visible or not
"""
return self._viewport.visible
@viewer_visibility.setter
def viewer_visibility(self, visible):
"""
Sets whether the viewer should be visible or not in the Omni UI
Args:
visible (bool): Whether the viewer should be visible or not
"""
self._viewport.visible = visible
# Requires 1 render update to propagate changes
render()
@property
def image_height(self):
"""
Returns:
int: Image height of this sensor, in pixels
"""
return self._viewport.viewport_api.get_texture_resolution()[1]
@image_height.setter
def image_height(self, height):
"""
Sets the image height @height for this sensor
Args:
height (int): Image height of this sensor, in pixels
"""
width, _ = self._viewport.viewport_api.get_texture_resolution()
self._viewport.viewport_api.set_texture_resolution((width, height))
# Requires 3 updates to propagate changes
for i in range(3):
render()
@property
def image_width(self):
"""
Returns:
int: Image width of this sensor, in pixels
"""
return self._viewport.viewport_api.get_texture_resolution()[0]
@image_width.setter
def image_width(self, width):
"""
Sets the image width @width for this sensor
Args:
width (int): Image width of this sensor, in pixels
"""
_, height = self._viewport.viewport_api.get_texture_resolution()
self._viewport.viewport_api.set_texture_resolution((width, height))
# Requires 3 updates to propagate changes
for i in range(3):
render()
@property
def clipping_range(self):
"""
Returns:
2-tuple: [min, max] value of the sensor's clipping range, in meters
"""
return np.array(self.get_attribute("clippingRange"))
@clipping_range.setter
def clipping_range(self, limits):
"""
Sets the clipping range @limits for this sensor
Args:
limits (2-tuple): [min, max] value of the sensor's clipping range, in meters
"""
self.set_attribute(attr="clippingRange", val=lazy.pxr.Gf.Vec2f(*limits))
# In order for sensor changes to propagate, we must toggle its visibility
self.visible = False
# A single update step has to happen here before we toggle visibility for changes to propagate
render()
self.visible = True
@property
def horizontal_aperture(self):
"""
Returns:
float: horizontal aperture of this sensor, in mm
"""
return self.get_attribute("horizontalAperture")
@horizontal_aperture.setter
def horizontal_aperture(self, length):
"""
Sets the focal length @length for this sensor
Args:
length (float): horizontal aperture of this sensor, in meters
"""
self.set_attribute("horizontalAperture", length)
@property
def focal_length(self):
"""
Returns:
float: focal length of this sensor, in mm
"""
return self.get_attribute("focalLength")
@focal_length.setter
def focal_length(self, length):
"""
Sets the focal length @length for this sensor
Args:
length (float): focal length of this sensor, in mm
"""
self.set_attribute("focalLength", length)
@property
def intrinsic_matrix(self):
"""
Returns:
n-array: (3, 3) camera intrinsic matrix. Transforming point p (x,y,z) in the camera frame via K * p will
produce p' (x', y', w) - the point in the image plane. To get pixel coordiantes, divide x' and y' by w
"""
projection_matrix = self.camera_parameters["cameraProjection"]
projection_matrix = np.array(projection_matrix).reshape(4, 4)
fx = projection_matrix[0, 0]
fy = projection_matrix[1, 1]
cx = projection_matrix[0, 2]
cy = projection_matrix[1, 2]
s = projection_matrix[0, 1] # Skew factor
intrinsic_matrix = np.array([[fx, s, cx],
[0.0, fy, cy],
[0.0, 0.0, 1.0]])
return intrinsic_matrix
@property
def _obs_space_mapping(self):
# Generate the complex space types for special modalities:
# {"bbox_2d_tight", "bbox_2d_loose", "bbox_3d"}
bbox_3d_space = gym.spaces.Sequence(space=gym.spaces.Tuple((
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_min
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_min
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_min
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_max
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_max
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_max
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(4, 4), dtype=np.float32), # transform
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio
)))
bbox_2d_space = gym.spaces.Sequence(space=gym.spaces.Tuple((
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_min
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_min
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_max
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_max
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio
)))
obs_space_mapping = dict(
rgb=((self.image_height, self.image_width, 4), 0, 255, np.uint8),
depth=((self.image_height, self.image_width), 0.0, np.inf, np.float32),
depth_linear=((self.image_height, self.image_width), 0.0, np.inf, np.float32),
normal=((self.image_height, self.image_width, 4), -1.0, 1.0, np.float32),
seg_semantic=((self.image_height, self.image_width), 0, MAX_CLASS_COUNT, np.uint32),
seg_instance=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32),
seg_instance_id=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32),
flow=((self.image_height, self.image_width, 4), -np.inf, np.inf, np.float32),
bbox_2d_tight=bbox_2d_space,
bbox_2d_loose=bbox_2d_space,
bbox_3d=bbox_3d_space,
)
return obs_space_mapping
@classmethod
def clear(cls):
"""
Clears all cached sensors that have been generated. Should be used when the simulator is completely reset; i.e.:
all objects on the stage are destroyed
"""
for sensor in cls.SENSORS.values():
# Destroy any sensor that is not attached to the main viewport window
if sensor._viewport.name != "Viewport":
sensor._viewport.destroy()
# Render to update
render()
cls.SENSORS = dict()
cls.KNOWN_SEMANTIC_IDS = set()
cls.KEY_ARRAY = None
cls.INSTANCE_REGISTRY = {0: "background", 1: "unlabelled"}
cls.INSTANCE_ID_REGISTRY = {0: "background"}
@classproperty
def all_modalities(cls):
return {modality for modality in cls.ALL_MODALITIES if modality != "camera_params"}
@classproperty
def no_noise_modalities(cls):
# bounding boxes and camera state should not have noise
return {"bbox_2d_tight", "bbox_2d_loose", "bbox_3d"}
| 33,871 | Python | 46.175487 | 164 | 0.62059 |
StanfordVL/OmniGibson/omnigibson/envs/__init__.py | from omnigibson.envs.env_base import Environment
from omnigibson.envs.env_wrapper import EnvironmentWrapper, create_wrapper, REGISTERED_ENV_WRAPPERS
| 149 | Python | 48.999984 | 99 | 0.85906 |
StanfordVL/OmniGibson/omnigibson/envs/env_base.py | import gym
import numpy as np
from copy import deepcopy
import omnigibson as og
from omnigibson.objects import REGISTERED_OBJECTS
from omnigibson.robots import REGISTERED_ROBOTS
from omnigibson.scene_graphs.graph_builder import SceneGraphBuilder
from omnigibson.simulator import launch_simulator
from omnigibson.tasks import REGISTERED_TASKS
from omnigibson.scenes import REGISTERED_SCENES
from omnigibson.sensors import create_sensor
from omnigibson.utils.gym_utils import GymObservable, recursively_generate_flat_dict, recursively_generate_compatible_dict
from omnigibson.utils.config_utils import parse_config
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.python_utils import assert_valid_key, merge_nested_dicts, create_class_from_registry_and_config,\
Recreatable
from omnigibson.macros import gm
# Create module logger
log = create_module_logger(module_name=__name__)
class Environment(gym.Env, GymObservable, Recreatable):
"""
Core environment class that handles loading scene, robot(s), and task, following OpenAI Gym interface.
"""
def __init__(self, configs):
"""
Args:
configs (str or dict or list of str or dict): config_file path(s) or raw config dictionaries.
If multiple configs are specified, they will be merged sequentially in the order specified.
This allows procedural generation of a "full" config from small sub-configs. For valid keys, please
see @default_config below
"""
# Call super first
super().__init__()
# Launch Isaac Sim
launch_simulator()
# Initialize other placeholders that will be filled in later
self._task = None
self._external_sensors = None
self._loaded = None
self._current_episode = 0
# Variables reset at the beginning of each episode
self._current_step = 0
# Convert config file(s) into a single parsed dict
configs = configs if isinstance(configs, list) or isinstance(configs, tuple) else [configs]
# Initial default config
self.config = self.default_config
# Merge in specified configs
for config in configs:
merge_nested_dicts(base_dict=self.config, extra_dict=parse_config(config), inplace=True)
# Store settings and other initialized values
self._automatic_reset = self.env_config["automatic_reset"]
self._flatten_action_space = self.env_config["flatten_action_space"]
self._flatten_obs_space = self.env_config["flatten_obs_space"]
self.physics_frequency = self.env_config["physics_frequency"]
self.action_frequency = self.env_config["action_frequency"]
self.device = self.env_config["device"]
self._initial_pos_z_offset = self.env_config["initial_pos_z_offset"] # how high to offset object placement to account for one action step of dropping
# Create the scene graph builder
self._scene_graph_builder = None
if "scene_graph" in self.config and self.config["scene_graph"] is not None:
self._scene_graph_builder = SceneGraphBuilder(**self.config["scene_graph"])
# Load this environment
self.load()
def reload(self, configs, overwrite_old=True):
"""
Reload using another set of config file(s).
This allows one to change the configuration and hot-reload the environment on the fly.
Args:
configs (dict or str or list of dict or list of str): config_file dict(s) or path(s).
If multiple configs are specified, they will be merged sequentially in the order specified.
This allows procedural generation of a "full" config from small sub-configs.
overwrite_old (bool): If True, will overwrite the internal self.config with @configs. Otherwise, will
merge in the new config(s) into the pre-existing one. Setting this to False allows for minor
modifications to be made without having to specify entire configs during each reload.
"""
# Convert config file(s) into a single parsed dict
configs = [configs] if isinstance(configs, dict) or isinstance(configs, str) else configs
# Initial default config
new_config = self.default_config
# Merge in specified configs
for config in configs:
merge_nested_dicts(base_dict=new_config, extra_dict=parse_config(config), inplace=True)
# Either merge in or overwrite the old config
if overwrite_old:
self.config = new_config
else:
merge_nested_dicts(base_dict=self.config, extra_dict=new_config, inplace=True)
# Load this environment again
self.load()
def reload_model(self, scene_model):
"""
Reload another scene model.
This allows one to change the scene on the fly.
Args:
scene_model (str): new scene model to load (eg.: Rs_int)
"""
self.scene_config["model"] = scene_model
self.load()
def _load_variables(self):
"""
Load variables from config
"""
# Store additional variables after config has been loaded fully
self._initial_pos_z_offset = self.env_config["initial_pos_z_offset"]
# Reset bookkeeping variables
self._reset_variables()
self._current_episode = 0 # Manually set this to 0 since resetting actually increments this
# - Potentially overwrite the USD entry for the scene if none is specified and we're online sampling -
# Make sure the requested scene is valid
scene_type = self.scene_config["type"]
assert_valid_key(key=scene_type, valid_keys=REGISTERED_SCENES, name="scene type")
# Verify scene and task configs are valid for the given task type
REGISTERED_TASKS[self.task_config["type"]].verify_scene_and_task_config(
scene_cfg=self.scene_config,
task_cfg=self.task_config,
)
# - Additionally run some sanity checks on these values -
# Check to make sure our z offset is valid -- check that the distance travelled over 1 action timestep is
# less than the offset we set (dist = 0.5 * gravity * (t^2))
drop_distance = 0.5 * 9.8 * ((1. / self.action_frequency) ** 2)
assert drop_distance < self._initial_pos_z_offset, "initial_pos_z_offset is too small for collision checking"
def _load_task(self, task_config=None):
"""
Load task
Args:
task_confg (None or dict): If specified, custom task configuration to use. Otherwise, will use
self.task_config. Note that if a custom task configuration is specified, the internal task config
will be updated as well
"""
# Update internal config if specified
if task_config is not None:
# Copy task config, in case self.task_config and task_config are the same!
task_config = deepcopy(task_config)
self.task_config.clear()
self.task_config.update(task_config)
# Sanity check task to make sure it's valid
task_type = self.task_config["type"]
assert_valid_key(key=task_type, valid_keys=REGISTERED_TASKS, name="task type")
# Grab the kwargs relevant for the specific task and create the task
self._task = create_class_from_registry_and_config(
cls_name=self.task_config["type"],
cls_registry=REGISTERED_TASKS,
cfg=self.task_config,
cls_type_descriptor="task",
)
assert og.sim.is_stopped(), "Simulator must be stopped before loading tasks!"
# Load task. Should load additional task-relevant objects and configure the scene into its default initial state
self._task.load(env=self)
assert og.sim.is_stopped(), "Simulator must be stopped after loading tasks!"
def _load_scene(self):
"""
Load the scene and robot specified in the config file.
"""
assert og.sim.is_stopped(), "Simulator must be stopped before loading scene!"
# Set the simulator settings
# NOTE: This must be done BEFORE the scene is loaded, or else all vision sensors can't retrieve observations
og.sim.set_simulation_dt(physics_dt=(1. / self.physics_frequency), rendering_dt=(1. / self.action_frequency))
# Create the scene from our scene config
scene = create_class_from_registry_and_config(
cls_name=self.scene_config["type"],
cls_registry=REGISTERED_SCENES,
cfg=self.scene_config,
cls_type_descriptor="scene",
)
og.sim.import_scene(scene)
# Set the rendering settings
if gm.RENDER_VIEWER_CAMERA:
og.sim.viewer_width = self.render_config["viewer_width"]
og.sim.viewer_height = self.render_config["viewer_height"]
og.sim.device = self.device
assert og.sim.is_stopped(), "Simulator must be stopped after loading scene!"
def _load_robots(self):
"""
Load robots into the scene
"""
# Only actually load robots if no robot has been imported from the scene loading directly yet
if len(self.scene.robots) == 0:
assert og.sim.is_stopped(), "Simulator must be stopped before loading robots!"
# Iterate over all robots to generate in the robot config
for i, robot_config in enumerate(self.robots_config):
# Add a name for the robot if necessary
if "name" not in robot_config:
robot_config["name"] = f"robot{i}"
position, orientation = robot_config.pop("position", None), robot_config.pop("orientation", None)
# Make sure robot exists, grab its corresponding kwargs, and create / import the robot
robot = create_class_from_registry_and_config(
cls_name=robot_config["type"],
cls_registry=REGISTERED_ROBOTS,
cfg=robot_config,
cls_type_descriptor="robot",
)
# Import the robot into the simulator
og.sim.import_object(robot)
robot.set_position_orientation(position=position, orientation=orientation)
if len(self.robots_config) > 0:
# Auto-initialize all robots
og.sim.play()
self.scene.reset()
self.scene.update_initial_state()
og.sim.stop()
assert og.sim.is_stopped(), "Simulator must be stopped after loading robots!"
def _load_objects(self):
"""
Load any additional custom objects into the scene
"""
assert og.sim.is_stopped(), "Simulator must be stopped before loading objects!"
for i, obj_config in enumerate(self.objects_config):
# Add a name for the object if necessary
if "name" not in obj_config:
obj_config["name"] = f"obj{i}"
# Pop the desired position and orientation
position, orientation = obj_config.pop("position", None), obj_config.pop("orientation", None)
# Make sure robot exists, grab its corresponding kwargs, and create / import the robot
obj = create_class_from_registry_and_config(
cls_name=obj_config["type"],
cls_registry=REGISTERED_OBJECTS,
cfg=obj_config,
cls_type_descriptor="object",
)
# Import the robot into the simulator and set the pose
og.sim.import_object(obj)
obj.set_position_orientation(position=position, orientation=orientation)
if len(self.objects_config) > 0:
# Auto-initialize all objects
og.sim.play()
self.scene.reset()
self.scene.update_initial_state()
og.sim.stop()
assert og.sim.is_stopped(), "Simulator must be stopped after loading objects!"
def _load_external_sensors(self):
"""
Load any additional custom external sensors into the scene
"""
assert og.sim.is_stopped(), "Simulator must be stopped before loading external sensors!"
sensors_config = self.env_config["external_sensors"]
if sensors_config is not None:
self._external_sensors = dict()
for i, sensor_config in enumerate(sensors_config):
# Add a name for the object if necessary
if "name" not in sensor_config:
sensor_config["name"] = f"external_sensor{i}"
# Determine prim path if not specified
if "prim_path" not in sensor_config:
sensor_config["prim_path"] = f"/World/{sensor_config['name']}"
# Pop the desired position and orientation
local_position, local_orientation = sensor_config.pop("local_position", None), sensor_config.pop("local_orientation", None)
# Make sure sensor exists, grab its corresponding kwargs, and create the sensor
sensor = create_sensor(**sensor_config)
# Load an initialize this sensor
sensor.load()
sensor.initialize()
sensor.set_local_pose(local_position, local_orientation)
self._external_sensors[sensor.name] = sensor
assert og.sim.is_stopped(), "Simulator must be stopped after loading external sensors!"
def _load_observation_space(self):
# Grab robot(s) and task obs spaces
obs_space = dict()
for robot in self.robots:
# Load the observation space for the robot
obs_space[robot.name] = robot.load_observation_space()
# Also load the task obs space
obs_space["task"] = self._task.load_observation_space()
# Also load any external sensors
if self._external_sensors is not None:
external_obs_space = dict()
for sensor_name, sensor in self._external_sensors.items():
# Load the sensor observation space
external_obs_space[sensor_name] = sensor.load_observation_space()
obs_space["external"] = gym.spaces.Dict(external_obs_space)
return obs_space
def load_observation_space(self):
# Call super first
obs_space = super().load_observation_space()
# If we want to flatten it, modify the observation space by recursively searching through all
if self._flatten_obs_space:
self.observation_space = gym.spaces.Dict(recursively_generate_flat_dict(dic=obs_space))
return self.observation_space
def _load_action_space(self):
"""
Load action space for each robot
"""
action_space = gym.spaces.Dict({robot.name: robot.action_space for robot in self.robots})
# Convert into flattened 1D Box space if requested
if self._flatten_action_space:
lows = []
highs = []
for space in action_space.values():
assert isinstance(space, gym.spaces.Box), \
"Can only flatten action space where all individual spaces are gym.space.Box instances!"
assert len(space.shape) == 1, \
"Can only flatten action space where all individual spaces are 1D instances!"
lows.append(space.low)
highs.append(space.high)
action_space = gym.spaces.Box(np.concatenate(lows), np.concatenate(highs), dtype=np.float32)
# Store action space
self.action_space = action_space
def load(self):
"""
Load the scene and robot specified in the config file.
"""
# This environment is not loaded
self._loaded = False
# Load config variables
self._load_variables()
# Load the scene, robots, and task
self._load_scene()
self._load_robots()
self._load_objects()
self._load_task()
self._load_external_sensors()
og.sim.play()
self.reset()
# Load the obs / action spaces
self.load_observation_space()
self._load_action_space()
# Start the scene graph builder
if self._scene_graph_builder:
self._scene_graph_builder.start(self.scene)
# Denote that the scene is loaded
self._loaded = True
def update_task(self, task_config):
"""
Updates the internal task using @task_config. NOTE: This will internally reset the environment as well!
Args:
task_config (dict): Task configuration for updating the new task
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Update task should occur while sim is playing!"
# Denote scene as not loaded yet
self._loaded = False
og.sim.stop()
self._load_task(task_config=task_config)
og.sim.play()
self.reset()
# Load obs / action spaces
self.load_observation_space()
self._load_action_space()
# Scene is now loaded again
self._loaded = True
def close(self):
"""
Clean up the environment and shut down the simulation.
"""
og.shutdown()
def get_obs(self):
"""
Get the current environment observation.
Returns:
2-tuple:
dict: Keyword-mapped observations, which are possibly nested
dict: Additional information about the observations
"""
obs = dict()
info = dict()
# Grab all observations from each robot
for robot in self.robots:
obs[robot.name], info[robot.name] = robot.get_obs()
# Add task observations
obs["task"] = self._task.get_obs(env=self)
# Add external sensor observations if they exist
if self._external_sensors is not None:
external_obs = dict()
external_info = dict()
for sensor_name, sensor in self._external_sensors.items():
external_obs[sensor_name], external_info[sensor_name] = sensor.get_obs()
obs["external"] = external_obs
info["external"] = external_info
# Possibly flatten obs if requested
if self._flatten_obs_space:
obs = recursively_generate_flat_dict(dic=obs)
return obs, info
def get_scene_graph(self):
"""
Get the current scene graph.
Returns:
SceneGraph: Current scene graph
"""
assert self._scene_graph_builder is not None, "Scene graph builder must be specified in config!"
return self._scene_graph_builder.get_scene_graph()
def _populate_info(self, info):
"""
Populate info dictionary with any useful information.
Args:
info (dict): Information dictionary to populate
Returns:
dict: Information dictionary with added info
"""
info["episode_length"] = self._current_step
if self._scene_graph_builder is not None:
info["scene_graph"] = self.get_scene_graph()
def step(self, action):
"""
Apply robot's action and return the next state, reward, done and info,
following OpenAI Gym's convention
Args:
action (gym.spaces.Dict or dict or np.array): robot actions. If a dict is specified, each entry should
map robot name to corresponding action. If a np.array, it should be the flattened, concatenated set
of actions
Returns:
4-tuple:
- dict: state, i.e. next observation
- float: reward, i.e. reward at this current timestep
- bool: done, i.e. whether this episode is terminated
- dict: info, i.e. dictionary with any useful information
"""
try:
# If the action is not a dictionary, convert into a dictionary
if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict):
action_dict = dict()
idx = 0
for robot in self.robots:
action_dim = robot.action_dim
action_dict[robot.name] = action[idx: idx + action_dim]
idx += action_dim
else:
# Our inputted action is the action dictionary
action_dict = action
# Iterate over all robots and apply actions
for robot in self.robots:
robot.apply_action(action_dict[robot.name])
# Run simulation step
og.sim.step()
# Grab observations
obs, obs_info = self.get_obs()
# Step the scene graph builder if necessary
if self._scene_graph_builder is not None:
self._scene_graph_builder.step(self.scene)
# Grab reward, done, and info, and populate with internal info
reward, done, info = self.task.step(self, action)
self._populate_info(info)
info["obs_info"] = obs_info
if done and self._automatic_reset:
# Add lost observation to our information dict, and reset
info["last_observation"] = obs
obs = self.reset()
# Increment step
self._current_step += 1
return obs, reward, done, info
except:
raise ValueError(f"Failed to execute environment step {self._current_step} in episode {self._current_episode}")
def _reset_variables(self):
"""
Reset bookkeeping variables for the next new episode.
"""
self._current_episode += 1
self._current_step = 0
# TODO: Match super class signature?
def reset(self):
"""
Reset episode.
"""
# Reset the task
self.task.reset(self)
# Reset internal variables
self._reset_variables()
# Run a single simulator step to make sure we can grab updated observations
og.sim.step()
# Grab and return observations
obs, _ = self.get_obs()
if self._loaded:
# Sanity check to make sure received observations match expected observation space
check_obs = recursively_generate_compatible_dict(dic=obs)
if not self.observation_space.contains(check_obs):
exp_obs = dict()
for key, value in recursively_generate_flat_dict(dic=self.observation_space).items():
exp_obs[key] = ("obs_space", key, value.dtype, value.shape)
real_obs = dict()
for key, value in recursively_generate_flat_dict(dic=check_obs).items():
if isinstance(value, np.ndarray):
real_obs[key] = ("obs", key, value.dtype, value.shape)
else:
real_obs[key] = ("obs", key, type(value), "()")
exp_keys = set(exp_obs.keys())
real_keys = set(real_obs.keys())
shared_keys = exp_keys.intersection(real_keys)
missing_keys = exp_keys - real_keys
extra_keys = real_keys - exp_keys
if missing_keys:
log.error("MISSING OBSERVATION KEYS:")
log.error(missing_keys)
if extra_keys:
log.error("EXTRA OBSERVATION KEYS:")
log.error(extra_keys)
mismatched_keys = []
for k in shared_keys:
if exp_obs[k][2:] != real_obs[k][2:]: # Compare dtypes and shapes
mismatched_keys.append(k)
log.error(f"MISMATCHED OBSERVATION FOR KEY '{k}':")
log.error(f"Expected: {exp_obs[k]}")
log.error(f"Received: {real_obs[k]}")
raise ValueError("Observation space does not match returned observations!")
return obs
@property
def episode_steps(self):
"""
Returns:
int: Current number of steps in episode
"""
return self._current_step
@property
def initial_pos_z_offset(self):
"""
Returns:
float: how high to offset object placement to test valid pose & account for one action step of dropping
"""
return self._initial_pos_z_offset
@property
def task(self):
"""
Returns:
BaseTask: Active task instance
"""
return self._task
@property
def scene(self):
"""
Returns:
Scene: Active scene in this environment
"""
return og.sim.scene
@property
def robots(self):
"""
Returns:
list of BaseRobot: Robots in the current scene
"""
return self.scene.robots
@property
def external_sensors(self):
"""
Returns:
None or dict: If self.env_config["external_sensors"] is specified, returns the dict mapping sensor name to
instantiated sensor. Otherwise, returns None
"""
return self._external_sensors
@property
def env_config(self):
"""
Returns:
dict: Environment-specific configuration kwargs
"""
return self.config["env"]
@property
def render_config(self):
"""
Returns:
dict: Render-specific configuration kwargs
"""
return self.config["render"]
@property
def scene_config(self):
"""
Returns:
dict: Scene-specific configuration kwargs
"""
return self.config["scene"]
@property
def robots_config(self):
"""
Returns:
dict: Robot-specific configuration kwargs
"""
return self.config["robots"]
@property
def objects_config(self):
"""
Returns:
dict: Object-specific configuration kwargs
"""
return self.config["objects"]
@property
def task_config(self):
"""
Returns:
dict: Task-specific configuration kwargs
"""
return self.config["task"]
@property
def wrapper_config(self):
"""
Returns:
dict: Wrapper-specific configuration kwargs
"""
return self.config["wrapper"]
@property
def default_config(self):
"""
Returns:
dict: Default configuration for this environment. May not be fully specified (i.e.: still requires @config
to be specified during environment creation)
"""
return {
# Environment kwargs
"env": {
"action_frequency": 30,
"physics_frequency": 120,
"device": None,
"automatic_reset": False,
"flatten_action_space": False,
"flatten_obs_space": False,
"initial_pos_z_offset": 0.1,
"external_sensors": None,
},
# Rendering kwargs
"render": {
"viewer_width": 1280,
"viewer_height": 720,
},
# Scene kwargs
"scene": {
# Traversibility map kwargs
"waypoint_resolution": 0.2,
"num_waypoints": 10,
"trav_map_resolution": 0.1,
"default_erosion_radius": 0.0,
"trav_map_with_objects": True,
"scene_instance": None,
"scene_file": None,
},
# Robot kwargs
"robots": [], # no robots by default
# Object kwargs
"objects": [], # no objects by default
# Task kwargs
"task": {
"type": "DummyTask",
},
# Wrapper kwargs
"wrapper": {
"type": None,
},
}
| 28,380 | Python | 36.147906 | 160 | 0.583298 |
StanfordVL/OmniGibson/omnigibson/envs/env_wrapper.py | from omnigibson.utils.python_utils import Wrapper
from omnigibson.utils.python_utils import Registerable, classproperty, create_class_from_registry_and_config
from omnigibson.utils.ui_utils import create_module_logger
from copy import deepcopy
# Global dicts that will contain mappings
REGISTERED_ENV_WRAPPERS = dict()
# Create module logger
log = create_module_logger(module_name=__name__)
def create_wrapper(env):
"""
Wraps environment @env with wrapper defined by env.wrapper_config
"""
wrapper_cfg = deepcopy(env.wrapper_config)
wrapper_type = wrapper_cfg.pop("type")
wrapper_cfg["env"] = env
return create_class_from_registry_and_config(
cls_name=wrapper_type,
cls_registry=REGISTERED_ENV_WRAPPERS,
cfg=wrapper_cfg,
cls_type_descriptor="wrapper",
)
class EnvironmentWrapper(Wrapper, Registerable):
"""
Base class for all environment wrappers in OmniGibson. In general, reset(), step(), and observation_spec() should
be overwritten
Args:
env (OmniGibsonEnv): The environment to wrap.
"""
def __init__(self, env):
self.env = env
# Run super
super().__init__(obj=env)
def step(self, action):
"""
By default, run the normal environment step() function
Args:
action (np.array): action to take in environment
Returns:
4-tuple:
- (dict) observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
return self.env.step(action)
def reset(self):
"""
By default, run the normal environment reset() function
Returns:
dict: Environment observation space after reset occurs
"""
return self.env.reset()
def observation_spec(self):
"""
By default, grabs the normal environment observation_spec
Returns:
dict: Observations from the environment
"""
return self.env.observation_spec()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("EnvironmentWrapper")
return classes
@classproperty
def _cls_registry(cls):
# Global robot registry
global REGISTERED_ENV_WRAPPERS
return REGISTERED_ENV_WRAPPERS
| 2,560 | Python | 27.455555 | 117 | 0.633984 |
StanfordVL/OmniGibson/omnigibson/reward_functions/potential_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
class PotentialReward(BaseRewardFunction):
"""
Potential reward
Assume task has get_potential implemented; Low potential is preferred
(e.g. a common potential for goal-directed task is the distance to goal)
Args:
potential_fcn (method): function for calculating potential. Function signature should be:
potential = potential_fcn(env)
where @env is a Environment instance, and @potential is a float value representing the calculated potential
r_potential (float): Reward weighting to give proportional to the potential difference calculated
in between env timesteps
"""
def __init__(self, potential_fcn, r_potential=1.0):
# Store internal vars
self._potential_fcn = potential_fcn
self._r_potential = r_potential
# Store internal vars that will be filled in at runtime
self._potential = None
# Run super
super().__init__()
def reset(self, task, env):
"""
Compute the initial potential after episode reset
:param task: task instance
:param env: environment instance
"""
# Reset potential
self._potential = self._potential_fcn(env)
def _step(self, task, env, action):
# Reward is proportional to the potential difference between the current and previous timestep
new_potential = self._potential_fcn(env)
reward = (self._potential - new_potential) * self._r_potential
# Update internal potential
self._potential = new_potential
return reward, {}
| 1,683 | Python | 32.019607 | 119 | 0.660131 |
StanfordVL/OmniGibson/omnigibson/reward_functions/__init__.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction, REGISTERED_REWARD_FUNCTIONS
from omnigibson.reward_functions.collision_reward import CollisionReward
from omnigibson.reward_functions.point_goal_reward import PointGoalReward
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.reward_functions.reaching_goal_reward import ReachingGoalReward
| 409 | Python | 67.333322 | 108 | 0.885086 |
StanfordVL/OmniGibson/omnigibson/reward_functions/reward_function_base.py | from abc import ABCMeta, abstractmethod
from copy import deepcopy
from omnigibson.utils.python_utils import classproperty, Registerable
REGISTERED_REWARD_FUNCTIONS = dict()
class BaseRewardFunction(Registerable, metaclass=ABCMeta):
"""
Base RewardFunction class
Reward-specific reset and get_reward methods are implemented in subclasses
"""
def __init__(self):
# Store internal vars that will be filled in at runtime
self._reward = None
self._info = None
@abstractmethod
def _step(self, task, env, action):
"""
Step the reward function and compute the reward at the current timestep. Overwritten by subclasses.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
2-tuple:
- bool: computed reward
- dict: any reward-related information for this specific reward
"""
raise NotImplementedError()
def step(self, task, env, action):
"""
Step the reward function and compute the reward at the current timestep.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
2-tuple:
- bool: computed reward
- dict: any reward-related information for this specific reward
"""
# Step internally and store output
self._reward, self._info = self._step(task=task, env=env, action=action)
# Return reward and a copy of the info
return self._reward, deepcopy(self._info)
def reset(self, task, env):
"""
Reward function-specific reset
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
"""
# Reset internal vars
self._reward = None
self._info = None
@property
def reward(self):
"""
Returns:
float: Current reward for this reward function
"""
assert self._reward is not None, "At least one step() must occur before reward can be calculated!"
return self._reward
@property
def info(self):
"""
Returns:
dict: Current info for this reward function
"""
assert self._info is not None, "At least one step() must occur before info can be calculated!"
return self._info
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseRewardFunction")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_REWARD_FUNCTIONS
return REGISTERED_REWARD_FUNCTIONS
| 3,066 | Python | 30.618556 | 107 | 0.61546 |
StanfordVL/OmniGibson/omnigibson/reward_functions/point_goal_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
class PointGoalReward(BaseRewardFunction):
"""
Point goal reward
Success reward for reaching the goal with the robot's base
Args:
pointgoal (PointGoal): Termination condition for checking whether a point goal is reached
r_pointgoal (float): Reward for reaching the point goal
"""
def __init__(self, pointgoal, r_pointgoal=10.0):
# Store internal vars
self._pointgoal = pointgoal
self._r_pointgoal = r_pointgoal
# Run super
super().__init__()
def _step(self, task, env, action):
# Reward received the pointgoal success condition is met
reward = self._r_pointgoal if self._pointgoal.success else 0.0
return reward, {}
| 811 | Python | 29.074073 | 97 | 0.664612 |
StanfordVL/OmniGibson/omnigibson/reward_functions/collision_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
from omnigibson.object_states.contact_bodies import ContactBodies
class CollisionReward(BaseRewardFunction):
"""
Collision reward
Penalize robot collision. Typically collision_reward_weight is negative. Note that we ignore collisions with any
floor objects.
Args:
robot_idn (int): robot identifier to evaluate collision penalty with. Default is 0, corresponding to the first
robot added to the scene
ignore_self_collisions (bool): Whether to ignore robot self-collisions or not
r_collision (float): Penalty value (>0) to penalize collisions
"""
def __init__(self, robot_idn=0, ignore_self_collisions=True, r_collision=0.1):
# Store internal vars
assert r_collision > 0, f"r_collision must be positive, got: {r_collision}!"
self._robot_idn = robot_idn
self._ignore_self_collisions = ignore_self_collisions
self._r_collision = r_collision
# Run super
super().__init__()
def _step(self, task, env, action):
# Penalty is Reward is -self._r_collision if there were any collisions in the last timestep
robot = env.robots[self._robot_idn]
# Ignore floors and potentially robot's own prims as well
floors = list(env.scene.object_registry("category", "floors", []))
ignore_objs = floors if self._ignore_self_collisions is None else floors + [robot]
in_contact = len(env.robots[self._robot_idn].states[ContactBodies].get_value(ignore_objs=tuple(ignore_objs))) > 0
reward = float(in_contact) * -self._r_collision
return reward, {}
| 1,694 | Python | 44.81081 | 121 | 0.681818 |
StanfordVL/OmniGibson/omnigibson/reward_functions/reaching_goal_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
import omnigibson.utils.transform_utils as T
class ReachingGoalReward(BaseRewardFunction):
"""
Reaching goal reward
Success reward for reaching the goal with the robot's end-effector
Args:
robot_idn (int): robot identifier to evaluate point goal with. Default is 0, corresponding to the first
robot added to the scene
r_reach (float): reward for succeeding to reach the goal
distance_tol (float): Distance (m) tolerance between goal position and @robot_idn's robot eef position
that is accepted as a success
"""
def __init__(self, robot_idn=0, r_reach=10.0, distance_tol=0.1):
# Store internal vars
self._robot_idn = robot_idn
self._r_reach = r_reach
self._distance_tol = distance_tol
# Run super
super().__init__()
def _step(self, task, env, action):
# Sparse reward is received if distance between robot_idn robot's eef and goal is below the distance threshold
success = T.l2_distance(env.robots[self._robot_idn].get_eef_position(), task.goal_pos) < \
self._distance_tol
reward = self._r_reach if success else 0.0
return reward, {}
| 1,289 | Python | 36.941175 | 118 | 0.660978 |
StanfordVL/OmniGibson/omnigibson/tasks/point_reaching_task.py | import numpy as np
from omnigibson.tasks.point_navigation_task import PointNavigationTask
from omnigibson.termination_conditions.point_goal import PointGoal
import omnigibson.utils.transform_utils as T
# Valid point navigation reward types
POINT_NAVIGATION_REWARD_TYPES = {"l2", "geodesic"}
class PointReachingTask(PointNavigationTask):
"""
Point Reaching Task
The goal is to reach a random goal position with the robot's end effector
Args:
robot_idn (int): Which robot that this task corresponds to
floor (int): Which floor to navigate on
initial_pos (None or 3-array): If specified, should be (x,y,z) global initial position to place the robot
at the start of each task episode. If None, a collision-free value will be randomly sampled
initial_quat (None or 3-array): If specified, should be (r,p,y) global euler orientation to place the robot
at the start of each task episode. If None, a value will be randomly sampled about the z-axis
goal_pos (None or 3-array): If specified, should be (x,y,z) global goal position to reach for the given task
episode. If None, a collision-free value will be randomly sampled
goal_tolerance (float): Distance between goal position and current position below which is considered a task
success
goal_in_polar (bool): Whether to represent the goal in polar coordinates or not when capturing task observations
path_range (None or 2-array): If specified, should be (min, max) values representing the range of valid
total path lengths that are valid when sampling initial / goal positions
height_range (None or 2-array): If specified, should be (min, max) values representing the range of valid
total heights that are valid when sampling goal positions
visualize_goal (bool): Whether to visualize the initial / goal locations
visualize_path (bool): Whether to visualize the path from initial to goal location, as represented by
discrete waypoints
goal_height (float): If visualizing, specifies the height of the visual goals (m)
waypoint_height (float): If visualizing, specifies the height of the visual waypoints (m)
waypoint_width (float): If visualizing, specifies the width of the visual waypoints (m)
n_vis_waypoints (int): If visualizing, specifies the number of waypoints to generate
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(
self,
robot_idn=0,
floor=0,
initial_pos=None,
initial_quat=None,
goal_pos=None,
goal_tolerance=0.1,
goal_in_polar=False,
path_range=None,
height_range=None,
visualize_goal=False,
visualize_path=False,
goal_height=0.06,
waypoint_height=0.05,
waypoint_width=0.1,
n_vis_waypoints=10,
reward_config=None,
termination_config=None,
):
# Store inputs
self._height_range = height_range
# Run super
super().__init__(
robot_idn=robot_idn,
floor=floor,
initial_pos=initial_pos,
initial_quat=initial_quat,
goal_pos=goal_pos,
goal_tolerance=goal_tolerance,
goal_in_polar=goal_in_polar,
path_range=path_range,
visualize_goal=visualize_goal,
visualize_path=visualize_path,
goal_height=goal_height,
waypoint_height=waypoint_height,
waypoint_width=waypoint_width,
n_vis_waypoints=n_vis_waypoints,
reward_type="l2", # Must use l2 for reaching task
reward_config=reward_config,
termination_config=termination_config,
)
def _create_termination_conditions(self):
# Run super first
terminations = super()._create_termination_conditions()
# We replace the pointgoal condition with a new one, specifying xyz instead of only xy as the axes to measure
# distance to the goal
terminations["pointgoal"] = PointGoal(
robot_idn=self._robot_idn,
distance_tol=self._goal_tolerance,
distance_axes="xyz",
)
return terminations
def _sample_initial_pose_and_goal_pos(self, env, max_trials=100):
# Run super first
initial_pos, initial_ori, goal_pos = super()._sample_initial_pose_and_goal_pos(env=env, max_trials=max_trials)
# Sample goal position to be within requested height range if specified
if self._height_range is not None:
goal_pos[2] += np.random.uniform(*self._height_range)
return initial_pos, initial_ori, goal_pos
def _get_l2_potential(self, env):
# Distance calculated from robot EEF, not base!
return T.l2_distance(env.robots[self._robot_idn].get_eef_position(), self._goal_pos)
def _get_obs(self, env):
# Get obs from super
low_dim_obs, obs = super()._get_obs(env=env)
# Remove xy-pos and replace with full xyz relative distance between current and goal pos
low_dim_obs.pop("xy_pos_to_goal")
low_dim_obs["eef_to_goal"] = self._global_pos_to_robot_frame(env=env, pos=self._goal_pos)
# Add local eef position as well
low_dim_obs["eef_local_pos"] = self._global_pos_to_robot_frame(env=env, pos=env.robots[self._robot_idn].get_eef_position())
return low_dim_obs, obs
def get_current_pos(self, env):
# Current position is the robot's EEF, not base!
return env.robots[self._robot_idn].get_eef_position()
| 6,615 | Python | 46.942029 | 131 | 0.654271 |
StanfordVL/OmniGibson/omnigibson/tasks/task_base.py | from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
from omnigibson.utils.python_utils import classproperty, Registerable
from omnigibson.utils.gym_utils import GymObservable
REGISTERED_TASKS = dict()
class BaseTask(GymObservable, Registerable, metaclass=ABCMeta):
"""
Base Task class.
Task-specific reset_scene, reset_agent, step methods are implemented in subclasses
Args:
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(self, termination_config=None, reward_config=None):
# Make sure configs are dictionaries
termination_config = dict() if termination_config is None else termination_config
reward_config = dict() if reward_config is None else reward_config
# Sanity check termination and reward conditions -- any keys found in the inputted config but NOT
# found in the default config should raise an error
unknown_termination_keys = set(termination_config.keys()) - set(self.default_termination_config.keys())
assert len(unknown_termination_keys) == 0, \
f"Got unknown termination config keys inputted: {unknown_termination_keys}"
unknown_reward_keys = set(reward_config.keys()) - set(self.default_reward_config.keys())
assert len(unknown_reward_keys) == 0, f"Got unknown reward config keys inputted: {unknown_reward_keys}"
# Combine with defaults and store internally
self._termination_config = self.default_termination_config
self._termination_config.update(termination_config)
self._reward_config = self.default_reward_config
self._reward_config.update(reward_config)
# Generate reward and termination functions
self._termination_conditions = self._create_termination_conditions()
self._reward_functions = self._create_reward_functions()
# Store other internal vars that will be populated at runtime
self._loaded = False
self._reward = None
self._done = None
self._success = None
self._info = None
self._low_dim_obs_dim = None
# Run super init
super().__init__()
@abstractmethod
def _load(self, env):
"""
Load this task. Should be implemented by subclass. Can include functionality, e.g.: loading dynamic objects
into the environment
"""
raise NotImplementedError()
@abstractmethod
def _load_non_low_dim_observation_space(self):
"""
Loads any non-low dim observation spaces for this task.
Returns:
dict: Keyword-mapped observation space for this object mapping non low dim task observation name to
observation space
"""
raise NotImplementedError()
@classmethod
def verify_scene_and_task_config(cls, scene_cfg, task_cfg):
"""
Runs any necessary sanity checks on the scene and task configs passed; and possibly modifies them in-place
Args:
scene_cfg (dict): Scene configuration
task_cfg (dict): Task configuration
"""
# Default is no-op
pass
def _load_observation_space(self):
# Create the non low dim obs space
obs_space = self._load_non_low_dim_observation_space()
# Create the low dim obs space and add to the main obs space dict -- make sure we're flattening low dim obs
obs_space["low_dim"] = self._build_obs_box_space(shape=(self._low_dim_obs_dim,), low=-np.inf, high=np.inf, dtype=np.float64)
return obs_space
def load(self, env):
"""
Load this task
"""
# Make sure the scene is of the correct type!
assert any([issubclass(env.scene.__class__, valid_cls) for valid_cls in self.valid_scene_types]), \
f"Got incompatible scene type {env.scene.__class__.__name__} for task {self.__class__.__name__}! " \
f"Scene class must be a subclass of at least one of: " \
f"{[cls_type.__name__ for cls_type in self.valid_scene_types]}"
# Run internal method
self._load(env=env)
# We're now initialized
self._loaded = True
@abstractmethod
def _create_termination_conditions(self):
"""
Creates the termination functions in the environment
Returns:
dict of BaseTerminationCondition: Termination functions created for this task
"""
raise NotImplementedError()
@abstractmethod
def _create_reward_functions(self):
"""
Creates the reward functions in the environment
Returns:
dict of BaseRewardFunction: Reward functions created for this task
"""
raise NotImplementedError()
def _reset_scene(self, env):
"""
Task-specific scene reset. Default is the normal scene reset
Args:
env (Environment): environment instance
"""
env.scene.reset()
def _reset_agent(self, env):
"""
Task-specific agent reset
Args:
env (Environment): environment instance
"""
# Default is no-op
pass
def _reset_variables(self, env):
"""
Task-specific internal variable reset
Args:
env (Environment): environment instance
"""
# By default, reset reward, done, and info
self._reward = None
self._done = False
self._success = False
self._info = None
def reset(self, env):
"""
Resets this task in the environment
Args:
env (Environment): environment instance to reset
"""
# Reset the scene, agent, and variables
self._reset_scene(env)
self._reset_agent(env)
self._reset_variables(env)
# Also reset all termination conditions and reward functions
for termination_condition in self._termination_conditions.values():
termination_condition.reset(self, env)
for reward_function in self._reward_functions.values():
reward_function.reset(self, env)
# Fill in low dim obs dim so we can use this to create the observation space later
self._low_dim_obs_dim = len(self.get_obs(env=env, flatten_low_dim=True)["low_dim"])
def _step_termination(self, env, action, info=None):
"""
Step and aggregate termination conditions
Args:
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
info (None or dict): Any info to return
Returns:
2-tuple:
- float: aggregated termination at the current timestep
- dict: any information passed through this function or generated by this function
"""
# Get all dones and successes from individual termination conditions
dones = []
successes = []
for termination_condition in self._termination_conditions.values():
d, s = termination_condition.step(self, env, action)
dones.append(d)
successes.append(s)
# Any True found corresponds to a done / success
done = sum(dones) > 0
success = sum(successes) > 0
# Populate info
info = dict() if info is None else info
info["success"] = success
return done, info
def _step_reward(self, env, action, info=None):
"""
Step and aggregate reward functions
Args:
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
info (None or dict): Any info to return
Returns:
2-tuple:
- float: aggregated reward at the current timestep
- dict: any information passed through this function or generated by this function
"""
# Make sure info is a dict
total_info = dict() if info is None else info
# We'll also store individual reward split as well
breakdown_dict = dict()
# Aggregate rewards over all reward functions
total_reward = 0.0
for reward_name, reward_function in self._reward_functions.items():
reward, reward_info = reward_function.step(self, env, action)
total_reward += reward
breakdown_dict[reward_name] = reward
total_info[reward_name] = reward_info
# Store breakdown dict
total_info["reward_breakdown"] = breakdown_dict
return total_reward, total_info
@abstractmethod
def _get_obs(self, env):
"""
Get task-specific observation
Args:
env (Environment): Environment instance
Returns:
2-tuple:
- dict: Keyword-mapped low dimensional observations from this task
- dict: All other keyword-mapped observations from this task
"""
raise NotImplementedError()
def _flatten_low_dim_obs(self, obs):
"""
Flattens dictionary containing low-dimensional observations @obs and converts it from a dictionary into a
1D numpy array
Args:
obs (dict): Low-dim observation dictionary where each value is a 1D array
Returns:
n-array: 1D-numpy array of flattened low-dim observations
"""
# By default, we simply concatenate all values in our obs dict
return np.concatenate([ob for ob in obs.values()]) if len(obs.values()) > 0 else np.array([])
def get_obs(self, env, flatten_low_dim=True):
# Args: env (Environment): environment instance
# Args: flatten_low_dim (bool): Whether to flatten low-dimensional observations
# Grab obs internally
low_dim_obs, obs = self._get_obs(env=env)
# Possibly flatten low dim and add to main observation dictionary
obs["low_dim"] = self._flatten_low_dim_obs(obs=low_dim_obs) if flatten_low_dim else low_dim_obs
return obs
def step(self, env, action):
"""
Perform task-specific step for every timestep
Args:
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
3-tuple:
- float: reward calculated after this step
- bool: whether task is done or not
- dict: nested dictionary of reward- and done-related info
"""
# Make sure we're initialized
assert self._loaded, "Task must be loaded using load() before calling step()!"
# We calculate termination conditions first and then rewards
# (since some rewards can rely on termination conditions to update)
done, done_info = self._step_termination(env=env, action=action)
reward, reward_info = self._step_reward(env=env, action=action)
# Update the internal state of this task
self._reward = reward
self._done = done
self._success = done_info["success"]
self._info = {
"reward": reward_info,
"done": done_info,
}
return self._reward, self._done, deepcopy(self._info)
@property
def name(self):
"""
Returns:
str: Name of this task. Defaults to class name
"""
return self.__class__.__name__
@property
def reward(self):
"""
Returns:
float: Current reward for this task
"""
assert self._reward is not None, "At least one step() must occur before reward can be calculated!"
return self._reward
@property
def done(self):
"""
Returns:
bool: Whether this task is done or not
"""
assert self._done is not None, "At least one step() must occur before done can be calculated!"
return self._done
@property
def success(self):
"""
Returns:
bool: Whether this task has succeeded or not
"""
assert self._success is not None, "At least one step() must occur before success can be calculated!"
return self._success
@property
def info(self):
"""
Returns:
dict: Nested dictionary of information for this task, including reward- and done-specific information
"""
assert self._info is not None, "At least one step() must occur before info can be calculated!"
return self._info
@classproperty
def valid_scene_types(cls):
"""
Returns:
set of Scene: Scene type(s) that are valid (i.e.: compatible) with this specific task. This will be
used to sanity check the task + scene combination at runtime
"""
raise NotImplementedError()
@classproperty
def default_reward_config(cls):
"""
Returns:
dict: Default reward configuration for this class. Should include any kwargs necessary for
any of the reward classes generated in self._create_rewards(). Note: this default config
should be fully verbose -- any keys inputted in the constructor but NOT found in this default config
will raise an error!
"""
raise NotImplementedError()
@classproperty
def default_termination_config(cls):
"""
Returns:
dict: Default termination configuration for this class. Should include any kwargs necessary for
any of the termination classes generated in self._create_terminations(). Note: this default config
should be fully verbose -- any keys inputted in the constructor but NOT found in this default config
will raise an error!
"""
raise NotImplementedError()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseTask")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_TASKS
return REGISTERED_TASKS
| 15,225 | Python | 36.046229 | 132 | 0.622266 |
StanfordVL/OmniGibson/omnigibson/tasks/point_navigation_task.py | import numpy as np
import omnigibson as og
from omnigibson.object_states import Pose
from omnigibson.objects.primitive_object import PrimitiveObject
from omnigibson.reward_functions.collision_reward import CollisionReward
from omnigibson.reward_functions.point_goal_reward import PointGoalReward
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.scenes.traversable_scene import TraversableScene
from omnigibson.tasks.task_base import BaseTask
from omnigibson.termination_conditions.max_collision import MaxCollision
from omnigibson.termination_conditions.falling import Falling
from omnigibson.termination_conditions.point_goal import PointGoal
from omnigibson.termination_conditions.timeout import Timeout
from omnigibson.utils.python_utils import classproperty, assert_valid_key
from omnigibson.utils.sim_utils import land_object, test_valid_pose
import omnigibson.utils.transform_utils as T
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Valid point navigation reward types
POINT_NAVIGATION_REWARD_TYPES = {"l2", "geodesic"}
class PointNavigationTask(BaseTask):
"""
Point Navigation Task
The task is to navigate to a goal position
Args:
robot_idn (int): Which robot that this task corresponds to
floor (int): Which floor to navigate on
initial_pos (None or 3-array): If specified, should be (x,y,z) global initial position to place the robot
at the start of each task episode. If None, a collision-free value will be randomly sampled
initial_quat (None or 4-array): If specified, should be (x,y,z,w) global quaternion orientation to place the
robot at the start of each task episode. If None, a value will be randomly sampled about the z-axis
goal_pos (None or 3-array): If specified, should be (x,y,z) global goal position to reach for the given task
episode. If None, a collision-free value will be randomly sampled
goal_tolerance (float): Distance between goal position and current position below which is considered a task
success
goal_in_polar (bool): Whether to represent the goal in polar coordinates or not when capturing task observations
path_range (None or 2-array): If specified, should be (min, max) values representing the range of valid
total path lengths that are valid when sampling initial / goal positions
visualize_goal (bool): Whether to visualize the initial / goal locations
visualize_path (bool): Whether to visualize the path from initial to goal location, as represented by
discrete waypoints
goal_height (float): If visualizing, specifies the height of the visual goals (m)
waypoint_height (float): If visualizing, specifies the height of the visual waypoints (m)
waypoint_width (float): If visualizing, specifies the width of the visual waypoints (m)
n_vis_waypoints (int): If visualizing, specifies the number of waypoints to generate
reward_type (str): Type of reward to use. Valid options are: {"l2", "geodesic"}
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(
self,
robot_idn=0,
floor=0,
initial_pos=None,
initial_quat=None,
goal_pos=None,
goal_tolerance=0.5,
goal_in_polar=False,
path_range=None,
visualize_goal=False,
visualize_path=False,
goal_height=0.06,
waypoint_height=0.05,
waypoint_width=0.1,
n_vis_waypoints=10,
reward_type="l2",
termination_config=None,
reward_config=None,
):
# Store inputs
self._robot_idn = robot_idn
self._floor = floor
self._initial_pos = initial_pos if initial_pos is None else np.array(initial_pos)
self._initial_quat = initial_quat if initial_quat is None else np.array(initial_quat)
self._goal_pos = goal_pos if goal_pos is None else np.array(goal_pos)
self._goal_tolerance = goal_tolerance
self._goal_in_polar = goal_in_polar
self._path_range = path_range
self._randomize_initial_pos = initial_pos is None
self._randomize_initial_quat = initial_quat is None
self._randomize_goal_pos = goal_pos is None
self._visualize_goal = visualize_goal
self._visualize_path = visualize_path
self._goal_height = goal_height
self._waypoint_height = waypoint_height
self._waypoint_width = waypoint_width
self._n_vis_waypoints = n_vis_waypoints
assert_valid_key(key=reward_type, valid_keys=POINT_NAVIGATION_REWARD_TYPES, name="reward type")
self._reward_type = reward_type
# Create other attributes that will be filled in at runtime
self._initial_pos_marker = None
self._goal_pos_marker = None
self._waypoint_markers = None
self._path_length = None
self._current_robot_pos = None
self._geodesic_dist = None
# Run super
super().__init__(termination_config=termination_config, reward_config=reward_config)
def _create_termination_conditions(self):
# Initialize termination conditions dict and fill in with MaxCollision, Timeout, Falling, and PointGoal
terminations = dict()
terminations["max_collision"] = MaxCollision(max_collisions=self._termination_config["max_collisions"])
terminations["timeout"] = Timeout(max_steps=self._termination_config["max_steps"])
terminations["falling"] = Falling(robot_idn=self._robot_idn, fall_height=self._termination_config["fall_height"])
terminations["pointgoal"] = PointGoal(
robot_idn=self._robot_idn,
distance_tol=self._goal_tolerance,
distance_axes="xy",
)
return terminations
def _create_reward_functions(self):
# Initialize reward functions dict and fill in with Potential, Collision, and PointGoal rewards
rewards = dict()
rewards["potential"] = PotentialReward(
potential_fcn=self.get_potential,
r_potential=self._reward_config["r_potential"],
)
rewards["collision"] = CollisionReward(r_collision=self._reward_config["r_collision"])
rewards["pointgoal"] = PointGoalReward(
pointgoal=self._termination_conditions["pointgoal"],
r_pointgoal=self._reward_config["r_pointgoal"],
)
return rewards
def _load(self, env):
# Load visualization
self._load_visualization_markers(env=env)
# Auto-initialize all markers
og.sim.play()
env.scene.reset()
env.scene.update_initial_state()
og.sim.stop()
def _load_visualization_markers(self, env):
"""
Load visualization, such as initial and target position, shortest path, etc
Args:
env (Environment): Active environment instance
"""
if self._visualize_goal:
self._initial_pos_marker = PrimitiveObject(
prim_path="/World/task_initial_pos_marker",
primitive_type="Cylinder",
name="task_initial_pos_marker",
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=np.array([1, 0, 0, 0.3]),
)
self._goal_pos_marker = PrimitiveObject(
prim_path="/World/task_goal_pos_marker",
primitive_type="Cylinder",
name="task_goal_pos_marker",
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=np.array([0, 0, 1, 0.3]),
)
# Load the objects into the simulator
og.sim.import_object(self._initial_pos_marker)
og.sim.import_object(self._goal_pos_marker)
# Additionally generate waypoints along the path if we're building the map in the environment
if self._visualize_path:
waypoints = []
for i in range(self._n_vis_waypoints):
waypoint = PrimitiveObject(
prim_path=f"/World/task_waypoint_marker{i}",
primitive_type="Cylinder",
name=f"task_waypoint_marker{i}",
radius=self._waypoint_width,
height=self._waypoint_height,
visual_only=True,
rgba=np.array([0, 1, 0, 0.3]),
)
og.sim.import_object(waypoint)
waypoints.append(waypoint)
# Store waypoints
self._waypoint_markers = waypoints
def _sample_initial_pose_and_goal_pos(self, env, max_trials=100):
"""
Potentially sample the robot initial pos / ori and target pos, based on whether we're using randomized
initial and goal states. If not randomzied, then this value will return the corresponding values inputted
during this task initialization.
Args:
env (Environment): Environment instance
max_trials (int): Number of trials to attempt to sample valid poses and positions
Returns:
3-tuple:
- 3-array: (x,y,z) global sampled initial position
- 4-array: (x,y,z,w) global sampled initial orientation in quaternion form
- 3-array: (x,y,z) global sampled goal position
"""
# Possibly sample initial pos
if self._randomize_initial_pos:
_, initial_pos = env.scene.get_random_point(floor=self._floor, robot=env.robots[self._robot_idn])
else:
initial_pos = self._initial_pos
# Possibly sample initial ori
initial_quat = T.euler2quat(np.array([0, 0, np.random.uniform(0, np.pi * 2)])) if \
self._randomize_initial_quat else self._initial_quat
# Possibly sample goal pos
if self._randomize_goal_pos:
dist, in_range_dist = 0.0, False
for _ in range(max_trials):
_, goal_pos = env.scene.get_random_point(floor=self._floor,
reference_point=initial_pos,
robot=env.robots[self._robot_idn])
_, dist = env.scene.get_shortest_path(self._floor, initial_pos[:2], goal_pos[:2], entire_path=False, robot=env.robots[self._robot_idn])
# If a path range is specified, make sure distance is valid
if dist is not None and (self._path_range is None or self._path_range[0] < dist < self._path_range[1]):
in_range_dist = True
break
# Notify if we weren't able to get a valid start / end point sampled in the requested range
if not in_range_dist:
log.warning("Failed to sample initial and target positions within requested path range")
else:
goal_pos = self._goal_pos
# Add additional logging info
log.info("Sampled initial pose: {}, {}".format(initial_pos, initial_quat))
log.info("Sampled goal position: {}".format(goal_pos))
return initial_pos, initial_quat, goal_pos
def _get_geodesic_potential(self, env):
"""
Get potential based on geodesic distance
Args:
env: environment instance
Returns:
float: geodesic distance to the target position
"""
_, geodesic_dist = self.get_shortest_path_to_goal(env=env)
return geodesic_dist
def _get_l2_potential(self, env):
"""
Get potential based on L2 distance
Args:
env: environment instance
Returns:
float: L2 distance to the target position
"""
return T.l2_distance(env.robots[self._robot_idn].states[Pose].get_value()[0][:2], self._goal_pos[:2])
def get_potential(self, env):
"""
Compute task-specific potential: distance to the goal
Args:
env (Environment): Environment instance
Returns:
float: Computed potential
"""
if self._reward_type == "l2":
reward = self._get_l2_potential(env)
elif self._reward_type == "geodesic":
reward = self._get_geodesic_potential(env)
else:
raise ValueError(f"Invalid reward type! {self._reward_type}")
return reward
def _reset_agent(self, env):
# Reset agent
env.robots[self._robot_idn].reset()
# We attempt to sample valid initial poses and goal positions
success, max_trials = False, 100
initial_pos, initial_quat, goal_pos = None, None, None
for i in range(max_trials):
initial_pos, initial_quat, goal_pos = self._sample_initial_pose_and_goal_pos(env)
# Make sure the sampled robot start pose and goal position are both collision-free
success = test_valid_pose(
env.robots[self._robot_idn], initial_pos, initial_quat, env.initial_pos_z_offset
) and test_valid_pose(env.robots[self._robot_idn], goal_pos, None, env.initial_pos_z_offset)
# Don't need to continue iterating if we succeeded
if success:
break
# Notify user if we failed to reset a collision-free sampled pose
if not success:
log.warning("Failed to reset robot without collision")
# Land the robot
land_object(env.robots[self._robot_idn], initial_pos, initial_quat, env.initial_pos_z_offset)
# Store the sampled values internally
self._initial_pos = initial_pos
self._initial_quat = initial_quat
self._goal_pos = goal_pos
# Update visuals if requested
if self._visualize_goal:
self._initial_pos_marker.set_position(self._initial_pos)
self._goal_pos_marker.set_position(self._goal_pos)
def _reset_variables(self, env):
# Run super first
super()._reset_variables(env=env)
# Reset internal variables
self._path_length = 0.0
self._current_robot_pos = self._initial_pos
self._geodesic_dist = self._get_geodesic_potential(env)
def _step_termination(self, env, action, info=None):
# Run super first
done, info = super()._step_termination(env=env, action=action, info=info)
# Add additional info
info["path_length"] = self._path_length
info["spl"] = float(info["success"]) * min(1.0, self._geodesic_dist / self._path_length) if done and self._path_length != 0.0 else 0.0
return done, info
def _global_pos_to_robot_frame(self, env, pos):
"""
Convert a 3D point in global frame to agent's local frame
Args:
env (TraversableEnv): Environment instance
pos (3-array): global (x,y,z) position
Returns:
3-array: (x,y,z) position in self._robot_idn agent's local frame
"""
delta_pos_global = np.array(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
return T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T @ delta_pos_global
def _get_obs(self, env):
# Get relative position of goal with respect to the current agent position
xy_pos_to_goal = self._global_pos_to_robot_frame(env, self._goal_pos)[:2]
if self._goal_in_polar:
xy_pos_to_goal = np.array(T.cartesian_to_polar(*xy_pos_to_goal))
# linear velocity and angular velocity
ori_t = T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T
lin_vel = ori_t @ env.robots[self._robot_idn].get_linear_velocity()
ang_vel = ori_t @ env.robots[self._robot_idn].get_angular_velocity()
# Compose observation dict
low_dim_obs = dict(
xy_pos_to_goal=xy_pos_to_goal,
robot_lin_vel=lin_vel,
robot_ang_vel=ang_vel,
)
# We have no non-low-dim obs, so return empty dict for those
return low_dim_obs, dict()
def _load_non_low_dim_observation_space(self):
# No non-low dim observations so we return an empty dict
return dict()
def get_goal_pos(self):
"""
Returns:
3-array: (x,y,z) global current goal position
"""
return self._goal_pos
def get_current_pos(self, env):
"""
Returns:
3-array: (x,y,z) global current position representing the robot
"""
return env.robots[self._robot_idn].states[Pose].get_value()[0]
def get_shortest_path_to_goal(self, env, start_xy_pos=None, entire_path=False):
"""
Get the shortest path and geodesic distance from @start_pos to the target position
Args:
env (TraversableEnv): Environment instance
start_xy_pos (None or 2-array): If specified, should be the global (x,y) start position from which
to calculate the shortest path to the goal position. If None (default), the robot's current xy position
will be used
entire_path (bool): Whether to return the entire shortest path
Returns:
2-tuple:
- list of 2-array: List of (x,y) waypoints representing the path # TODO: is this true?
- float: geodesic distance of the path to the goal position
"""
start_xy_pos = env.robots[self._robot_idn].states[Pose].get_value()[0][:2] if start_xy_pos is None else start_xy_pos
return env.scene.get_shortest_path(self._floor, start_xy_pos, self._goal_pos[:2], entire_path=entire_path, robot=env.robots[self._robot_idn])
def _step_visualization(self, env):
"""
Step visualization
Args:
env (Environment): Environment instance
"""
if self._visualize_path:
shortest_path, _ = self.get_shortest_path_to_goal(env=env, entire_path=True)
floor_height = env.scene.get_floor_height(self._floor)
num_nodes = min(self._n_vis_waypoints, shortest_path.shape[0])
for i in range(num_nodes):
self._waypoint_markers[i].set_position(
position=np.array([shortest_path[i][0], shortest_path[i][1], floor_height])
)
for i in range(num_nodes, self._n_vis_waypoints):
self._waypoint_markers[i].set_position(position=np.array([0.0, 0.0, 100.0]))
def step(self, env, action):
# Run super method first
reward, done, info = super().step(env=env, action=action)
# Step visualization
self._step_visualization(env=env)
# Update other internal variables
new_robot_pos = env.robots[self._robot_idn].states[Pose].get_value()[0]
self._path_length += T.l2_distance(self._current_robot_pos[:2], new_robot_pos[:2])
self._current_robot_pos = new_robot_pos
return reward, done, info
@classproperty
def valid_scene_types(cls):
# Must be a traversable scene
return {TraversableScene}
@classproperty
def default_termination_config(cls):
return {
"max_collisions": 500,
"max_steps": 500,
"fall_height": 0.03,
}
@classproperty
def default_reward_config(cls):
return {
"r_potential": 1.0,
"r_collision": 0.1,
"r_pointgoal": 10.0,
}
| 20,626 | Python | 42.06263 | 151 | 0.617667 |
StanfordVL/OmniGibson/omnigibson/tasks/__init__.py | from omnigibson.tasks.task_base import REGISTERED_TASKS
from omnigibson.tasks.dummy_task import DummyTask
from omnigibson.tasks.point_navigation_task import PointNavigationTask
from omnigibson.tasks.point_reaching_task import PointReachingTask
from omnigibson.tasks.behavior_task import BehaviorTask
| 300 | Python | 49.166658 | 70 | 0.873333 |
StanfordVL/OmniGibson/omnigibson/tasks/dummy_task.py | import numpy as np
from omnigibson.tasks.task_base import BaseTask
from omnigibson.scenes.scene_base import Scene
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.sim_utils import land_object
class DummyTask(BaseTask):
"""
Dummy task
"""
def _load(self, env):
# Do nothing here
pass
def _create_termination_conditions(self):
# Do nothing
return dict()
def _create_reward_functions(self):
# Do nothing
return dict()
def _get_obs(self, env):
# No task-specific obs of any kind
return dict(), dict()
def _load_non_low_dim_observation_space(self):
# No non-low dim observations so we return an empty dict
return dict()
@classproperty
def valid_scene_types(cls):
# Any scene works
return {Scene}
@classproperty
def default_termination_config(cls):
# Empty dict
return {}
@classproperty
def default_reward_config(cls):
# Empty dict
return {}
| 1,060 | Python | 21.104166 | 64 | 0.627358 |
StanfordVL/OmniGibson/omnigibson/tasks/behavior_task.py | import numpy as np
import os
from bddl.activity import (
Conditions,
evaluate_goal_conditions,
get_goal_conditions,
get_ground_goal_state_options,
get_natural_initial_conditions,
get_initial_conditions,
get_natural_goal_conditions,
get_object_scope,
)
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.object_states import Pose
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.systems.system_base import get_system, add_callback_on_system_init, add_callback_on_system_clear, \
REGISTERED_SYSTEMS
from omnigibson.scenes.scene_base import Scene
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
from omnigibson.utils.bddl_utils import OmniGibsonBDDLBackend, BDDLEntity, BEHAVIOR_ACTIVITIES, BDDLSampler
from omnigibson.tasks.task_base import BaseTask
from omnigibson.termination_conditions.predicate_goal import PredicateGoal
from omnigibson.termination_conditions.timeout import Timeout
import omnigibson.utils.transform_utils as T
from omnigibson.utils.python_utils import classproperty, assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class BehaviorTask(BaseTask):
"""
Task for BEHAVIOR
Args:
activity_name (None or str): Name of the Behavior Task to instantiate
activity_definition_id (int): Specification to load for the desired task. For a given Behavior Task, multiple task
specifications can be used (i.e.: differing goal conditions, or "ways" to complete a given task). This
ID determines which specification to use
activity_instance_id (int): Specific pre-configured instance of a scene to load for this BehaviorTask. This
will be used only if @online_object_sampling is False.
predefined_problem (None or str): If specified, specifies the raw string definition of the Behavior Task to
load. This will automatically override @activity_name and @activity_definition_id.
online_object_sampling (bool): whether to sample object locations online at runtime or not
debug_object_sampling (bool): whether to debug placement functionality
highlight_task_relevant_objects (bool): whether to overlay task-relevant objects in the scene with a colored mask
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(
self,
activity_name=None,
activity_definition_id=0,
activity_instance_id=0,
predefined_problem=None,
online_object_sampling=False,
debug_object_sampling=False,
highlight_task_relevant_objects=False,
termination_config=None,
reward_config=None,
):
# Make sure object states are enabled
assert gm.ENABLE_OBJECT_STATES, "Must set gm.ENABLE_OBJECT_STATES=True in order to use BehaviorTask!"
# Make sure task name is valid if not specifying a predefined problem
if predefined_problem is None:
assert activity_name is not None, \
"Activity name must be specified if no predefined_problem is specified for BehaviorTask!"
assert_valid_key(key=activity_name, valid_keys=BEHAVIOR_ACTIVITIES, name="Behavior Task")
else:
# Infer activity name
activity_name = predefined_problem.split("problem ")[-1].split("-")[0]
# Initialize relevant variables
# BDDL
self.backend = OmniGibsonBDDLBackend()
# Activity info
self.activity_name = None
self.activity_definition_id = activity_definition_id
self.activity_instance_id = activity_instance_id
self.activity_conditions = None
self.activity_initial_conditions = None
self.activity_goal_conditions = None
self.ground_goal_state_options = None
self.feedback = None # None or str
self.sampler = None # BDDLSampler
# Object info
self.debug_object_sampling = debug_object_sampling # bool
self.online_object_sampling = online_object_sampling # bool
self.highlight_task_relevant_objs = highlight_task_relevant_objects # bool
self.object_scope = None # Maps str to BDDLEntity
self.object_instance_to_category = None # Maps str to str
self.future_obj_instances = None # set of str
# Info for demonstration collection
self.instruction_order = None # np.array of int
self.currently_viewed_index = None # int
self.currently_viewed_instruction = None # tuple of str
self.activity_natural_language_goal_conditions = None # str
# Load the initial behavior configuration
self.update_activity(activity_name=activity_name, activity_definition_id=activity_definition_id, predefined_problem=predefined_problem)
# Run super init
super().__init__(termination_config=termination_config, reward_config=reward_config)
@classmethod
def get_cached_activity_scene_filename(cls, scene_model, activity_name, activity_definition_id, activity_instance_id):
"""
Helper method to programmatically construct the scene filename for a given pre-cached task configuration
Args:
scene_model (str): Name of the scene (e.g.: Rs_int)
activity_name (str): Name of the task activity (e.g.: putting_away_halloween_decorations)
activity_definition_id (int): ID of the task definition
activity_instance_id (int): ID of the task instance
Returns:
str: Filename which, if exists, should include the cached activity scene
"""
return f"{scene_model}_task_{activity_name}_{activity_definition_id}_{activity_instance_id}_template"
@classmethod
def verify_scene_and_task_config(cls, scene_cfg, task_cfg):
# Run super first
super().verify_scene_and_task_config(scene_cfg=scene_cfg, task_cfg=task_cfg)
# Possibly modify the scene to load if we're using online_object_sampling
scene_instance, scene_file = scene_cfg["scene_instance"], scene_cfg["scene_file"]
activity_name = task_cfg["predefined_problem"].split("problem ")[-1].split("-")[0] if \
task_cfg.get("predefined_problem", None) is not None else task_cfg["activity_name"]
if scene_file is None and scene_instance is None and not task_cfg["online_object_sampling"]:
scene_instance = cls.get_cached_activity_scene_filename(
scene_model=scene_cfg.get("scene_model", "Scene"),
activity_name=activity_name,
activity_definition_id=task_cfg.get("activity_definition_id", 0),
activity_instance_id=task_cfg.get("activity_instance_id", 0),
)
# Update the value in the scene config
scene_cfg["scene_instance"] = scene_instance
def write_task_metadata(self):
# Store mapping from entity name to its corresponding BDDL instance name
metadata = dict(
inst_to_name={inst: entity.name for inst, entity in self.object_scope.items() if entity.exists},
)
# Write to sim
og.sim.write_metadata(key="task", data=metadata)
def load_task_metadata(self):
# Load from sim
return og.sim.get_metadata(key="task")
def _create_termination_conditions(self):
# Initialize termination conditions dict and fill in with Timeout and PredicateGoal
terminations = dict()
terminations["timeout"] = Timeout(max_steps=self._termination_config["max_steps"])
terminations["predicate"] = PredicateGoal(goal_fcn=lambda: self.activity_goal_conditions)
return terminations
def _create_reward_functions(self):
# Initialize reward functions dict and fill in with Potential reward
rewards = dict()
rewards["potential"] = PotentialReward(
potential_fcn=self.get_potential,
r_potential=self._reward_config["r_potential"],
)
return rewards
def _load(self, env):
# Initialize the current activity
success, self.feedback = self.initialize_activity(env=env)
# assert success, f"Failed to initialize Behavior Activity. Feedback:\n{self.feedback}"
# Highlight any task relevant objects if requested
if self.highlight_task_relevant_objs:
for entity in self.object_scope.values():
if entity.synset == "agent":
continue
if not entity.is_system and entity.exists:
entity.highlighted = True
# Add callbacks to handle internal processing when new systems / objects are added / removed to the scene
callback_name = f"{self.activity_name}_refresh"
og.sim.add_callback_on_import_obj(name=callback_name, callback=self._update_bddl_scope_from_added_obj)
og.sim.add_callback_on_remove_obj(name=callback_name, callback=self._update_bddl_scope_from_removed_obj)
add_callback_on_system_init(name=callback_name, callback=self._update_bddl_scope_from_system_init)
add_callback_on_system_clear(name=callback_name, callback=self._update_bddl_scope_from_system_clear)
def _load_non_low_dim_observation_space(self):
# No non-low dim observations so we return an empty dict
return dict()
def update_activity(self, activity_name, activity_definition_id, predefined_problem=None):
"""
Update the active Behavior activity being deployed
Args:
activity_name (None or str): Name of the Behavior Task to instantiate
activity_definition_id (int): Specification to load for the desired task. For a given Behavior Task, multiple task
specifications can be used (i.e.: differing goal conditions, or "ways" to complete a given task). This
ID determines which specification to use
predefined_problem (None or str): If specified, specifies the raw string definition of the Behavior Task to
load. This will automatically override @activity_name and @activity_definition_id.
"""
# Update internal variables based on values
# Activity info
self.activity_name = activity_name
self.activity_definition_id = activity_definition_id
self.activity_conditions = Conditions(
activity_name,
activity_definition_id,
simulator_name="omnigibson",
predefined_problem=predefined_problem,
)
# Get scope, making sure agent is the first entry
self.object_scope = {"agent.n.01_1": None}
self.object_scope.update(get_object_scope(self.activity_conditions))
# Object info
self.object_instance_to_category = {
obj_inst: obj_cat
for obj_cat in self.activity_conditions.parsed_objects
for obj_inst in self.activity_conditions.parsed_objects[obj_cat]
}
# Generate initial and goal conditions
self.activity_initial_conditions = get_initial_conditions(self.activity_conditions, self.backend, self.object_scope)
self.activity_goal_conditions = get_goal_conditions(self.activity_conditions, self.backend, self.object_scope)
self.ground_goal_state_options = get_ground_goal_state_options(
self.activity_conditions, self.backend, self.object_scope, self.activity_goal_conditions
)
# Demo attributes
self.instruction_order = np.arange(len(self.activity_conditions.parsed_goal_conditions))
np.random.shuffle(self.instruction_order)
self.currently_viewed_index = 0
self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
self.activity_natural_language_initial_conditions = get_natural_initial_conditions(self.activity_conditions)
self.activity_natural_language_goal_conditions = get_natural_goal_conditions(self.activity_conditions)
def get_potential(self, env):
"""
Compute task-specific potential: distance to the goal
Args:
env (Environment): Current active environment instance
Returns:
float: Computed potential
"""
# Evaluate the first ground goal state option as the potential
_, satisfied_predicates = evaluate_goal_conditions(self.ground_goal_state_options[0])
success_score = len(satisfied_predicates["satisfied"]) / (
len(satisfied_predicates["satisfied"]) + len(satisfied_predicates["unsatisfied"])
)
return -success_score
def initialize_activity(self, env):
"""
Initializes the desired activity in the current environment @env
Args:
env (Environment): Current active environment instance
Returns:
2-tuple:
- bool: Whether the generated scene activity should be accepted or not
- dict: Any feedback from the sampling / initialization process
"""
accept_scene = True
feedback = None
# Generate sampler
self.sampler = BDDLSampler(
env=env,
activity_conditions=self.activity_conditions,
object_scope=self.object_scope,
backend=self.backend,
debug=self.debug_object_sampling,
)
# Compose future objects
self.future_obj_instances = \
{init_cond.body[1] for init_cond in self.activity_initial_conditions if init_cond.body[0] == "future"}
if self.online_object_sampling:
# Sample online
accept_scene, feedback = self.sampler.sample()
if not accept_scene:
return accept_scene, feedback
else:
# Load existing scene cache and assign object scope accordingly
self.assign_object_scope_with_cache(env)
# Generate goal condition with the fully populated self.object_scope
self.activity_goal_conditions = get_goal_conditions(self.activity_conditions, self.backend, self.object_scope)
self.ground_goal_state_options = get_ground_goal_state_options(
self.activity_conditions, self.backend, self.object_scope, self.activity_goal_conditions
)
return accept_scene, feedback
def get_agent(self, env):
"""
Grab the 0th agent from @env
Args:
env (Environment): Current active environment instance
Returns:
BaseRobot: The 0th robot from the environment instance
"""
# We assume the relevant agent is the first agent in the scene
return env.robots[0]
def assign_object_scope_with_cache(self, env):
"""
Assigns objects within the current object scope
Args:
env (Environment): Current active environment instance
"""
# Load task metadata
inst_to_name = self.load_task_metadata()["inst_to_name"]
# Assign object_scope based on a cached scene
for obj_inst in self.object_scope:
if obj_inst in self.future_obj_instances:
entity = None
else:
assert obj_inst in inst_to_name, f"BDDL object instance {obj_inst} should exist in cached metadata " \
f"from loaded scene, but could not be found!"
name = inst_to_name[obj_inst]
is_system = name in REGISTERED_SYSTEMS
entity = get_system(name) if is_system else og.sim.scene.object_registry("name", name)
self.object_scope[obj_inst] = BDDLEntity(
bddl_inst=obj_inst,
entity=entity,
)
def _get_obs(self, env):
low_dim_obs = dict()
# Batch rpy calculations for much better efficiency
objs_exist = {obj: obj.exists for obj in self.object_scope.values() if not obj.is_system}
objs_rpy = T.quat2euler(np.array([obj.states[Pose].get_value()[1] if obj_exist else np.array([0, 0, 0, 1.0])
for obj, obj_exist in objs_exist.items()]))
objs_rpy_cos = np.cos(objs_rpy)
objs_rpy_sin = np.sin(objs_rpy)
# Always add agent info first
agent = self.get_agent(env=env)
for (obj, obj_exist), obj_rpy, obj_rpy_cos, obj_rpy_sin in zip(objs_exist.items(), objs_rpy, objs_rpy_cos, objs_rpy_sin):
# TODO: May need to update checking here to USDObject? Or even baseobject?
# TODO: How to handle systems as part of obs?
if obj_exist:
low_dim_obs[f"{obj.bddl_inst}_real"] = np.array([1.0])
low_dim_obs[f"{obj.bddl_inst}_pos"] = obj.states[Pose].get_value()[0]
low_dim_obs[f"{obj.bddl_inst}_ori_cos"] = obj_rpy_cos
low_dim_obs[f"{obj.bddl_inst}_ori_sin"] = obj_rpy_sin
if obj.name != agent.name:
for arm in agent.arm_names:
grasping_object = agent.is_grasping(arm=arm, candidate_obj=obj.wrapped_obj)
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = np.array([float(grasping_object)])
else:
low_dim_obs[f"{obj.bddl_inst}_real"] = np.zeros(1)
low_dim_obs[f"{obj.bddl_inst}_pos"] = np.zeros(3)
low_dim_obs[f"{obj.bddl_inst}_ori_cos"] = np.zeros(3)
low_dim_obs[f"{obj.bddl_inst}_ori_sin"] = np.zeros(3)
for arm in agent.arm_names:
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = np.zeros(1)
return low_dim_obs, dict()
def _step_termination(self, env, action, info=None):
# Run super first
done, info = super()._step_termination(env=env, action=action, info=info)
# Add additional info
info["goal_status"] = self._termination_conditions["predicate"].goal_status
return done, info
def _update_bddl_scope_from_added_obj(self, obj):
"""
Internal callback function to be called when sim.import_object() is called to potentially update internal
bddl object scope
Args:
obj (BaseObject): Newly imported object
"""
# Iterate over all entities, and if they don't exist, check if any category matches @obj's category, and set it
# if it does, and immediately return
for inst, entity in self.object_scope.items():
if not entity.exists and not entity.is_system and obj.category in set(entity.og_categories):
entity.set_entity(entity=obj)
return
def _update_bddl_scope_from_removed_obj(self, obj):
"""
Internal callback function to be called when sim.remove_object() is called to potentially update internal
bddl object scope
Args:
obj (BaseObject): Newly removed object
"""
# Iterate over all entities, and if they exist, check if any name matches @obj's name, and remove it
# if it does, and immediately return
for entity in self.object_scope.values():
if entity.exists and not entity.is_system and obj.name == entity.name:
entity.clear_entity()
return
def _update_bddl_scope_from_system_init(self, system):
"""
Internal callback function to be called when system.initialize() is called to potentially update internal
bddl object scope
Args:
system (BaseSystem): Newly initialized system
"""
# Iterate over all entities, and potentially match the system to the scope
for inst, entity in self.object_scope.items():
if not entity.exists and entity.is_system and entity.og_categories[0] == system.name:
entity.set_entity(entity=system)
return
def _update_bddl_scope_from_system_clear(self, system):
"""
Internal callback function to be called when system.clear() is called to potentially update internal
bddl object scope
Args:
system (BaseSystem): Newly cleared system
"""
# Iterate over all entities, and potentially remove the matched system from the scope
for inst, entity in self.object_scope.items():
if entity.exists and entity.is_system and system.name == entity.name:
entity.clear_entity()
return
def show_instruction(self):
"""
Get current instruction for user
Returns:
3-tuple:
- str: Current goal condition in natural language
- 3-tuple: (R,G,B) color to assign to text
- list of BaseObject: Relevant objects for the current instruction
"""
satisfied = self.currently_viewed_instruction in self._termination_conditions["predicate"].goal_status["satisfied"]
natural_language_condition = self.activity_natural_language_goal_conditions[self.currently_viewed_instruction]
objects = self.activity_goal_conditions[self.currently_viewed_instruction].get_relevant_objects()
text_color = (
[83.0 / 255.0, 176.0 / 255.0, 72.0 / 255.0] if satisfied else [255.0 / 255.0, 51.0 / 255.0, 51.0 / 255.0]
)
return natural_language_condition, text_color, objects
def iterate_instruction(self):
"""
Increment the instruction
"""
self.currently_viewed_index = (self.currently_viewed_index + 1) % len(self.activity_conditions.parsed_goal_conditions)
self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
def save_task(self, path=None, override=False):
"""
Writes the current scene configuration to a .json file
Args:
path (None or str): If specified, absolute fpath to the desired path to write the .json. Default is
<gm.DATASET_PATH/scenes/<SCENE_MODEL>/json/...>
override (bool): Whether to override any files already found at the path to write the task .json
"""
if path is None:
fname = self.get_cached_activity_scene_filename(
scene_model=og.sim.scene.scene_model,
activity_name=self.activity_name,
activity_definition_id=self.activity_definition_id,
activity_instance_id=self.activity_instance_id,
)
path = os.path.join(gm.DATASET_PATH, "scenes", og.sim.scene.scene_model, "json", f"{fname}.json")
if os.path.exists(path) and not override:
log.warning(f"Scene json already exists at {path}. Use override=True to force writing of new json.")
return
# Write metadata and then save
self.write_task_metadata()
og.sim.save(json_path=path)
@property
def name(self):
"""
Returns:
str: Name of this task. Defaults to class name
"""
name_base = super().name
# Add activity name, def id, and inst id
return f"{name_base}_{self.activity_name}_{self.activity_definition_id}_{self.activity_instance_id}"
@classproperty
def valid_scene_types(cls):
# Any scene can be used
return {Scene}
@classproperty
def default_termination_config(cls):
return {
"max_steps": 500,
}
@classproperty
def default_reward_config(cls):
return {
"r_potential": 1.0,
}
| 25,013 | Python | 45.151291 | 143 | 0.633431 |
StanfordVL/OmniGibson/omnigibson/maps/traversable_map.py | import os
import cv2
import numpy as np
from PIL import Image
# Accommodate large maps (e.g. 10k x 10k) while suppressing DecompressionBombError
Image.MAX_IMAGE_PIXELS = None
from omnigibson.maps.map_base import BaseMap
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.motion_planning_utils import astar
# Create module logger
log = create_module_logger(module_name=__name__)
class TraversableMap(BaseMap):
"""
Traversable scene class.
Contains the functionalities for navigation such as shortest path computation
"""
def __init__(
self,
map_resolution=0.1,
default_erosion_radius=0.0,
trav_map_with_objects=True,
num_waypoints=10,
waypoint_resolution=0.2,
):
"""
Args:
map_resolution (float): map resolution in meters, each pixel represents this many meters;
normally, this should be between 0.01 and 0.1
default_erosion_radius (float): default map erosion radius in meters
trav_map_with_objects (bool): whether to use objects or not when constructing graph
num_waypoints (int): number of way points returned
waypoint_resolution (float): resolution of adjacent way points
"""
# Set internal values
self.map_default_resolution = 0.01 # each pixel == 0.01m in the dataset representation
self.default_erosion_radius = default_erosion_radius
self.trav_map_with_objects = trav_map_with_objects
self.num_waypoints = num_waypoints
self.waypoint_interval = int(waypoint_resolution / map_resolution)
# Values loaded at runtime
self.trav_map_original_size = None
self.trav_map_size = None
self.mesh_body_id = None
self.floor_heights = None
self.floor_map = None
# Run super method
super().__init__(map_resolution=map_resolution)
def _load_map(self, maps_path, floor_heights=(0.0,)):
"""
Loads the traversability maps for all floors
Args:
maps_path (str): Path to the folder containing the traversability maps
floor_heights (n-array): Height(s) of the floors for this map
Returns:
int: Size of the loaded map
"""
if not os.path.exists(maps_path):
log.warning("trav map does not exist: {}".format(maps_path))
return
self.floor_heights = floor_heights
self.floor_map = []
map_size = None
for floor in range(len(self.floor_heights)):
if self.trav_map_with_objects:
# TODO: Shouldn't this be generated dynamically?
trav_map = np.array(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor))))
else:
trav_map = np.array(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
# If we do not initialize the original size of the traversability map, we obtain it from the image
# Then, we compute the final map size as the factor of scaling (default_resolution/resolution) times the
# original map size
if self.trav_map_original_size is None:
height, width = trav_map.shape
assert height == width, "trav map is not a square"
self.trav_map_original_size = height
map_size = int(
self.trav_map_original_size * self.map_default_resolution / self.map_resolution
)
# We resize the traversability map to the new size computed before
trav_map = cv2.resize(trav_map, (map_size, map_size))
# We make the pixels of the image to be either 0 or 255
trav_map[trav_map < 255] = 0
self.floor_map.append(trav_map)
return map_size
@property
def n_floors(self):
"""
Returns:
int: Number of floors belonging to this map's associated scene
"""
return len(self.floor_heights)
def _erode_trav_map(self, trav_map, robot=None):
# Erode the traversability map to account for the robot's size
if robot:
robot_chassis_extent = robot.reset_joint_pos_aabb_extent[:2]
radius = np.linalg.norm(robot_chassis_extent) / 2.0
else:
radius = self.default_erosion_radius
radius_pixel = int(np.ceil(radius / self.map_resolution))
trav_map = cv2.erode(trav_map, np.ones((radius_pixel, radius_pixel)))
return trav_map
def get_random_point(self, floor=None, reference_point=None, robot=None):
"""
Sample a random point on the given floor number. If not given, sample a random floor number.
If @reference_point is given, sample a point in the same connected component as the previous point.
Args:
floor (None or int): floor number. None means the floor is randomly sampled
Warning: if @reference_point is given, @floor must be given;
otherwise, this would lead to undefined behavior
reference_point (3-array): (x,y,z) if given, sample a point in the same connected component as this point
Returns:
2-tuple:
- int: floor number. This is the sampled floor number if @floor is None
- 3-array: (x,y,z) randomly sampled point
"""
if reference_point is not None:
assert floor is not None, "floor must be given if reference_point is given"
# If nothing is given, sample a random floor and a random point on that floor
if floor is None and reference_point is None:
floor = np.random.randint(0, self.n_floors)
# create a deep copy so that we don't erode the original map
trav_map = self.floor_map[floor].copy()
trav_map = self._erode_trav_map(trav_map, robot=robot)
if reference_point is not None:
# Find connected component
_, component_labels = cv2.connectedComponents(trav_map, connectivity=4)
# If previous point is given, sample a point in the same connected component
prev_xy_map = self.world_to_map(reference_point[:2])
prev_label = component_labels[prev_xy_map[0]][prev_xy_map[1]]
trav_space = np.where(component_labels == prev_label)
else:
trav_space = np.where(trav_map == 255)
idx = np.random.randint(0, high=trav_space[0].shape[0])
xy_map = np.array([trav_space[0][idx], trav_space[1][idx]])
x, y = self.map_to_world(xy_map)
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None):
"""
Get the shortest path from one point to another point.
If any of the given point is not in the graph, add it to the graph and
create an edge between it to its closest node.
Args:
floor (int): floor number
source_world (2-array): (x,y) 2D source location in world reference frame (metric)
target_world (2-array): (x,y) 2D target location in world reference frame (metric)
entire_path (bool): whether to return the entire path
robot (None or BaseRobot): if given, erode the traversability map to account for the robot's size
Returns:
2-tuple:
- (N, 2) array: array of path waypoints, where N is the number of generated waypoints
- float: geodesic distance of the path
"""
source_map = tuple(self.world_to_map(source_world))
target_map = tuple(self.world_to_map(target_world))
# create a deep copy so that we don't erode the original map
trav_map = self.floor_map[floor].copy()
trav_map = self._erode_trav_map(trav_map, robot=robot)
path_map = astar(trav_map, source_map, target_map)
if path_map is None:
# No traversable path found
return None, None
path_world = self.map_to_world(path_map)
geodesic_distance = np.sum(np.linalg.norm(path_world[1:] - path_world[:-1], axis=1))
path_world = path_world[:: self.waypoint_interval]
if not entire_path:
path_world = path_world[: self.num_waypoints]
num_remaining_waypoints = self.num_waypoints - path_world.shape[0]
if num_remaining_waypoints > 0:
remaining_waypoints = np.tile(target_world, (num_remaining_waypoints, 1))
path_world = np.concatenate((path_world, remaining_waypoints), axis=0)
return path_world, geodesic_distance
| 8,862 | Python | 41.816425 | 117 | 0.61081 |
StanfordVL/OmniGibson/omnigibson/maps/map_base.py | import numpy as np
class BaseMap:
"""
Base map class.
Contains basic interface for converting from map to world frame, and vise-versa
"""
def __init__(
self,
map_resolution=0.1,
):
"""
Args:
map_resolution (float): map resolution
"""
# Set internal values
self.map_resolution = map_resolution
self.map_size = None
def load_map(self, *args, **kwargs):
"""
Load's this map internally
"""
# Run internal method and store map size
self.map_size = self._load_map(*args, **kwargs)
def _load_map(self, *args, **kwargs):
"""
Arbitrary function to load this map. Should be implemented by subclass
Returns:
int: Size of the loaded map
"""
raise NotImplementedError()
def map_to_world(self, xy):
"""
Transforms a 2D point in map reference frame into world (simulator) reference frame
Args:
xy (2-array or (N, 2)-array): 2D location(s) in map reference frame (in image pixel space)
Returns:
2-array or (N, 2)-array: 2D location(s) in world reference frame (in metric space)
"""
axis = 0 if len(xy.shape) == 1 else 1
return np.flip((xy - self.map_size / 2.0) * self.map_resolution, axis=axis)
def world_to_map(self, xy):
"""
Transforms a 2D point in world (simulator) reference frame into map reference frame
xy: 2D location in world reference frame (metric)
:return: 2D location in map reference frame (image)
"""
return np.flip((np.array(xy) / self.map_resolution + self.map_size / 2.0)).astype(np.int)
| 1,751 | Python | 28.694915 | 102 | 0.571102 |
StanfordVL/OmniGibson/omnigibson/maps/__init__.py | from omnigibson.maps.map_base import BaseMap
from omnigibson.maps.traversable_map import TraversableMap
from omnigibson.maps.segmentation_map import SegmentationMap
| 165 | Python | 40.49999 | 60 | 0.872727 |
StanfordVL/OmniGibson/omnigibson/maps/segmentation_map.py | import os
import numpy as np
from PIL import Image
# Accommodate large maps (e.g. 10k x 10k) while suppressing DecompressionBombError
Image.MAX_IMAGE_PIXELS = None
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.maps.map_base import BaseMap
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class SegmentationMap(BaseMap):
"""
Segmentation map for computing connectivity within the scene
"""
def __init__(
self,
scene_dir,
map_resolution=0.1,
floor_heights=(0.0,),
):
"""
Args:
scene_dir (str): path to the scene directory from which segmentation info will be extracted
map_resolution (float): map resolution
floor_heights (list of float): heights of the floors for this segmentation map
"""
# Store internal values
self.scene_dir = scene_dir
self.map_default_resolution = 0.01
self.floor_heights = floor_heights
# Other values that will be loaded at runtime
self.room_sem_name_to_sem_id = None
self.room_sem_id_to_sem_name = None
self.room_ins_name_to_ins_id = None
self.room_ins_id_to_ins_name = None
self.room_sem_name_to_ins_name = None
self.room_ins_map = None
self.room_sem_map = None
# Run super call
super().__init__(map_resolution=map_resolution)
# Load the map
self.load_map()
def _load_map(self):
layout_dir = os.path.join(self.scene_dir, "layout")
room_seg_imgs = os.path.join(layout_dir, "floor_insseg_0.png")
img_ins = Image.open(room_seg_imgs)
room_seg_imgs = os.path.join(layout_dir, "floor_semseg_0.png")
img_sem = Image.open(room_seg_imgs)
height, width = img_ins.size
assert height == width, "room seg map is not a square"
assert img_ins.size == img_sem.size, "semantic and instance seg maps have different sizes"
map_size = int(height * self.map_default_resolution / self.map_resolution)
img_ins = np.array(img_ins.resize((map_size, map_size), Image.NEAREST))
img_sem = np.array(img_sem.resize((map_size, map_size), Image.NEAREST))
room_categories = os.path.join(gm.DATASET_PATH, "metadata", "room_categories.txt")
with open(room_categories, "r") as fp:
room_cats = [line.rstrip() for line in fp.readlines()]
sem_id_to_ins_id = {}
unique_ins_ids = np.unique(img_ins)
unique_ins_ids = np.delete(unique_ins_ids, 0)
for ins_id in unique_ins_ids:
# find one pixel for each ins id
x, y = np.where(img_ins == ins_id)
# retrieve the correspounding sem id
sem_id = img_sem[x[0], y[0]]
if sem_id not in sem_id_to_ins_id:
sem_id_to_ins_id[sem_id] = []
sem_id_to_ins_id[sem_id].append(ins_id)
room_sem_name_to_sem_id = {}
room_ins_name_to_ins_id = {}
room_sem_name_to_ins_name = {}
for sem_id, ins_ids in sem_id_to_ins_id.items():
sem_name = room_cats[sem_id - 1]
room_sem_name_to_sem_id[sem_name] = sem_id
for i, ins_id in enumerate(ins_ids):
# valid class start from 1
ins_name = "{}_{}".format(sem_name, i)
room_ins_name_to_ins_id[ins_name] = ins_id
if sem_name not in room_sem_name_to_ins_name:
room_sem_name_to_ins_name[sem_name] = []
room_sem_name_to_ins_name[sem_name].append(ins_name)
self.room_sem_name_to_sem_id = room_sem_name_to_sem_id
self.room_sem_id_to_sem_name = {value: key for key, value in room_sem_name_to_sem_id.items()}
self.room_ins_name_to_ins_id = room_ins_name_to_ins_id
self.room_ins_id_to_ins_name = {value: key for key, value in room_ins_name_to_ins_id.items()}
self.room_sem_name_to_ins_name = room_sem_name_to_ins_name
self.room_ins_map = img_ins
self.room_sem_map = img_sem
return map_size
def get_random_point_by_room_type(self, room_type):
"""
Sample a random point on the given a specific room type @room_type.
Args:
room_type (str): Room type to sample random point (e.g.: "bathroom")
Returns:
2-tuple:
- int: floor number. This is always 0
- 3-array: (x,y,z) randomly sampled point in a room of type @room_type
"""
if room_type not in self.room_sem_name_to_sem_id:
log.warning("room_type [{}] does not exist.".format(room_type))
return None, None
sem_id = self.room_sem_name_to_sem_id[room_type]
valid_idx = np.array(np.where(self.room_sem_map == sem_id))
random_point_map = valid_idx[:, np.random.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
def get_random_point_by_room_instance(self, room_instance):
"""
Sample a random point on the given a specific room instance @room_instance.
Args:
room_instance (str): Room instance to sample random point (e.g.: "bathroom_1")
Returns:
2-tuple:
- int: floor number. This is always 0
- 3-array: (x,y,z) randomly sampled point in room @room_instance
"""
if room_instance not in self.room_ins_name_to_ins_id:
log.warning("room_instance [{}] does not exist.".format(room_instance))
return None, None
ins_id = self.room_ins_name_to_ins_id[room_instance]
valid_idx = np.array(np.where(self.room_ins_map == ins_id))
random_point_map = valid_idx[:, np.random.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
def get_room_type_by_point(self, xy):
"""
Return the room type given a point
Args:
xy (2-array): 2D location in world reference frame (in metric space)
Returns:
None or str: room type that this point is in or None, if this point is not on the room segmentation map
"""
x, y = self.world_to_map(xy)
if x < 0 or x >= self.room_sem_map.shape[0] or y < 0 or y >= self.room_sem_map.shape[1]:
return None
sem_id = self.room_sem_map[x, y]
# room boundary
if sem_id == 0:
return None
else:
return self.room_sem_id_to_sem_name[sem_id]
def get_room_instance_by_point(self, xy):
"""
Return the room type given a point
Args:
xy (2-array): 2D location in world reference frame (in metric space)
Returns:
None or str: room instance that this point is in or None, if this point is not on the room segmentation map
"""
x, y = self.world_to_map(xy)
if x < 0 or x >= self.room_ins_map.shape[0] or y < 0 or y >= self.room_ins_map.shape[1]:
return None
ins_id = self.room_ins_map[x, y]
# room boundary
if ins_id == 0:
return None
else:
return self.room_ins_id_to_ins_name[ins_id]
| 7,535 | Python | 36.869347 | 119 | 0.580889 |
StanfordVL/OmniGibson/omnigibson/controllers/joint_controller.py | import numpy as np
from omnigibson.controllers import IsGraspingState, ControlType, LocomotionController, ManipulationController, \
GripperController
from omnigibson.utils.python_utils import assert_valid_key
import omnigibson.utils.transform_utils as T
from omnigibson.macros import create_module_macros
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_JOINT_POS_KP = 50.0
m.DEFAULT_JOINT_POS_DAMPING_RATIO = 1.0 # critically damped
m.DEFAULT_JOINT_VEL_KP = 2.0
class JointController(LocomotionController, ManipulationController, GripperController):
"""
Controller class for joint control. Because omniverse can handle direct position / velocity / effort
control signals, this is merely a pass-through operation from command to control (with clipping / scaling built in).
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2a. If using delta commands, then adds the command to the current joint state
2b. Clips the resulting command by the motor limits
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
kp=None,
damping_ratio=None,
use_impedances=False,
use_delta_commands=False,
compute_delta_in_quat_space=None,
):
"""
Args:
control_freq (int): controller loop frequency
motor_type (str): type of motor being controlled, one of {position, velocity, effort}
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
kp (None or float): If @motor_type is "position" or "velocity" and @use_impedances=True, this is the
proportional gain applied to the joint controller. If None, a default value will be used.
damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the
damping ratio applied to the joint controller. If None, a default value will be used.
use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts
applied
use_delta_commands (bool): whether inputted commands should be interpreted as delta or absolute values
compute_delta_in_quat_space (None or List[(rx_idx, ry_idx, rz_idx), ...]): if specified, groups of
joints that need to be processed in quaternion space to avoid gimbal lock issues normally faced by
3 DOF rotation joints. Each group needs to consist of three idxes corresponding to the indices in
the input space. This is only used in the delta_commands mode.
"""
# Store arguments
assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
self._motor_type = motor_type.lower()
self._use_delta_commands = use_delta_commands
self._compute_delta_in_quat_space = [] if compute_delta_in_quat_space is None else compute_delta_in_quat_space
# Store control gains
if self._motor_type == "position":
kp = m.DEFAULT_JOINT_POS_KP if kp is None else kp
damping_ratio = m.DEFAULT_JOINT_POS_DAMPING_RATIO if damping_ratio is None else damping_ratio
elif self._motor_type == "velocity":
kp = m.DEFAULT_JOINT_VEL_KP if kp is None else kp
assert damping_ratio is None, "Cannot set damping_ratio for JointController with motor_type=velocity!"
else: # effort
assert kp is None, "Cannot set kp for JointController with motor_type=effort!"
assert damping_ratio is None, "Cannot set damping_ratio for JointController with motor_type=effort!"
self.kp = kp
self.kd = None if damping_ratio is None else 2 * np.sqrt(self.kp) * damping_ratio
self._use_impedances = use_impedances
# When in delta mode, it doesn't make sense to infer output range using the joint limits (since that's an
# absolute range and our values are relative). So reject the default mode option in that case.
assert not (self._use_delta_commands and command_output_limits == "default"), \
"Cannot use 'default' command output limits in delta commands mode of JointController. Try None instead."
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def _update_goal(self, command, control_dict):
# Compute the base value for the command
base_value = control_dict[f"joint_{self._motor_type}"][self.dof_idx]
# If we're using delta commands, add this value
if self._use_delta_commands:
# Apply the command to the base value.
target = base_value + command
# Correct any gimbal lock issues using the compute_delta_in_quat_space group.
for rx_ind, ry_ind, rz_ind in self._compute_delta_in_quat_space:
# Grab the starting rotations of these joints.
start_rots = base_value[[rx_ind, ry_ind, rz_ind]]
# Grab the delta rotations.
delta_rots = command[[rx_ind, ry_ind, rz_ind]]
# Compute the final rotations in the quaternion space.
_, end_quat = T.pose_transform(np.zeros(3), T.euler2quat(delta_rots),
np.zeros(3), T.euler2quat(start_rots))
end_rots = T.quat2euler(end_quat)
# Update the command
target[[rx_ind, ry_ind, rz_ind]] = end_rots
# Otherwise, goal is simply the command itself
else:
target = command
# Clip the command based on the limits
target = target.clip(
self._control_limits[ControlType.get_type(self._motor_type)][0][self.dof_idx],
self._control_limits[ControlType.get_type(self._motor_type)][1][self.dof_idx],
)
return dict(target=target)
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target: desired N-dof absolute joint values used as setpoint
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
joint_effort: Array of current joint effort
Returns:
Array[float]: outputted (non-clipped!) control signal to deploy
"""
base_value = control_dict[f"joint_{self._motor_type}"][self.dof_idx]
target = goal_dict["target"]
# Convert control into efforts
if self._use_impedances:
if self._motor_type == "position":
# Run impedance controller -- effort = pos_err * kp + vel_err * kd
position_error = target - base_value
vel_pos_error = -control_dict[f"joint_velocity"][self.dof_idx]
u = position_error * self.kp + vel_pos_error * self.kd
elif self._motor_type == "velocity":
# Compute command torques via PI velocity controller plus gravity compensation torques
velocity_error = target - base_value
u = velocity_error * self.kp
else: # effort
u = target
dof_idxs_mat = tuple(np.meshgrid(self.dof_idx, self.dof_idx))
mm = control_dict["mass_matrix"][dof_idxs_mat]
u = np.dot(mm, u)
# Add gravity compensation
u += control_dict["gravity_force"][self.dof_idx] + control_dict["cc_force"][self.dof_idx]
else:
# Desired is the exact goal
u = target
# Return control
return u
def compute_no_op_goal(self, control_dict):
# Compute based on mode
if self._motor_type == "position":
# Maintain current qpos
target = control_dict[f"joint_{self._motor_type}"][self.dof_idx]
else:
# For velocity / effort, directly set to 0
target = np.zeros(self.control_dim)
return dict(target=target)
def _get_goal_shapes(self):
return dict(target=(self.control_dim,))
def is_grasping(self):
# No good heuristic to determine grasping, so return UNKNOWN
return IsGraspingState.UNKNOWN
@property
def use_delta_commands(self):
"""
Returns:
bool: Whether this controller is using delta commands or not
"""
return self._use_delta_commands
@property
def motor_type(self):
"""
Returns:
str: The type of motor being simulated by this controller. One of {"position", "velocity", "effort"}
"""
return self._motor_type
@property
def control_type(self):
return ControlType.EFFORT if self._use_impedances else ControlType.get_type(type_str=self._motor_type)
@property
def command_dim(self):
return len(self.dof_idx)
| 11,270 | Python | 46.357143 | 120 | 0.624224 |
StanfordVL/OmniGibson/omnigibson/controllers/ik_controller.py | import numpy as np
from omnigibson.macros import gm, create_module_macros
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import ControlType, ManipulationController
from omnigibson.controllers.joint_controller import JointController
from omnigibson.utils.processing_utils import MovingAverageFilter
from omnigibson.utils.control_utils import IKSolver
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Set some macros
m = create_module_macros(module_path=__file__)
m.IK_POS_TOLERANCE = 0.002
m.IK_POS_WEIGHT = 20.0
m.IK_ORN_TOLERANCE = 0.01
m.IK_ORN_WEIGHT = 0.05
m.IK_MAX_ITERATIONS = 100
# Different modes
IK_MODE_COMMAND_DIMS = {
"absolute_pose": 6, # 6DOF (x,y,z,ax,ay,az) control of pose, whether both position and orientation is given in absolute coordinates
"pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
"pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose
"position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
"position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
}
IK_MODES = set(IK_MODE_COMMAND_DIMS.keys())
class InverseKinematicsController(JointController, ManipulationController):
"""
Controller class to convert (delta) EEF commands into joint velocities using Inverse Kinematics (IK).
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Run Inverse Kinematics to back out joint velocities for a desired task frame command
3. Clips the resulting command by the motor (velocity) limits
"""
def __init__(
self,
task_name,
robot_description_path,
robot_urdf_path,
eef_name,
control_freq,
reset_joint_pos,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)),
kp=None,
damping_ratio=None,
use_impedances=True,
mode="pose_delta_ori",
smoothing_filter_size=None,
workspace_pose_limiter=None,
condition_on_current_position=True,
):
"""
Args:
task_name (str): name assigned to this task frame for computing IK control. During control calculations,
the inputted control_dict should include entries named <@task_name>_pos_relative and
<@task_name>_quat_relative. See self._command_to_control() for what these values should entail.
robot_description_path (str): path to robot descriptor yaml file
robot_urdf_path (str): path to robot urdf file
eef_name (str): end effector frame name
control_freq (int): controller loop frequency
reset_joint_pos (Array[float]): reset joint positions, used as part of nullspace controller in IK.
Note that this should correspond to ALL the joints; the exact indices will be extracted via @dof_idx
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
kp (None or float): The proportional gain applied to the joint controller. If None, a default value
will be used. Only relevant if @use_impedances=True
damping_ratio (None or float): The damping ratio applied to the joint controller. If None, a default
value will be used. Only relevant if @use_impedances=True
use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts
applied
mode (str): mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz)
cartesian values, relative to the robot base frame. Valid options are:
- "absolute_pose": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where both the position and the orientation is given in absolute axis-angle coordinates
- "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where the orientation is given in absolute axis-angle coordinates
- "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose
- "position_fixed_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands being kept as fixed initial absolute orientation
- "position_compliant_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands automatically being sent as 0s (so can drift over time)
smoothing_filter_size (None or int): if specified, sets the size of a moving average filter to apply
on all outputted IK joint positions.
workspace_pose_limiter (None or function): if specified, callback method that should clip absolute
target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace
range (i.e.: this can be unique to each robot, and implemented by each embodiment).
Function signature should be:
def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]]
where target_pos is (x,y,z) cartesian position values, target_quat is (x,y,z,w) quarternion orientation
values, and the returned tuple is the processed (pos, quat) command.
condition_on_current_position (bool): if True, will use the current joint position as the initial guess for the IK algorithm.
Otherwise, will use the reset_joint_pos as the initial guess.
"""
# Store arguments
control_dim = len(dof_idx)
self.control_filter = (
None
if smoothing_filter_size in {None, 0}
else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size)
)
assert mode in IK_MODES, f"Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}"
self.mode = mode
self.workspace_pose_limiter = workspace_pose_limiter
self.task_name = task_name
self.reset_joint_pos = reset_joint_pos[dof_idx]
self.condition_on_current_position = condition_on_current_position
# Create the lula IKSolver
self.solver = IKSolver(
robot_description_path=robot_description_path,
robot_urdf_path=robot_urdf_path,
eef_name=eef_name,
reset_joint_pos=self.reset_joint_pos,
)
# Other variables that will be filled in at runtime
self._fixed_quat_target = None
# If the mode is set as absolute orientation and using default config,
# change input and output limits accordingly.
# By default, the input limits are set as 1, so we modify this to have a correct range.
# The output orientation limits are also set to be values assuming delta commands, so those are updated too
if self.mode == "pose_absolute_ori":
if command_input_limits is not None:
if command_input_limits == "default":
command_input_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_input_limits[0][3:] = -np.pi
command_input_limits[1][3:] = np.pi
if command_output_limits is not None:
if command_output_limits == "default":
command_output_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_output_limits[0][3:] = -np.pi
command_output_limits[1][3:] = np.pi
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
kp=kp,
damping_ratio=damping_ratio,
motor_type="position",
use_delta_commands=False,
use_impedances=use_impedances,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Call super first
super().reset()
# Reset the filter and clear internal control state
if self.control_filter is not None:
self.control_filter.reset()
self._fixed_quat_target = None
@property
def state_size(self):
# Add state size from the control filter
return super().state_size + self.control_filter.state_size
def _dump_state(self):
# Run super first
state = super()._dump_state()
# Add internal quaternion target and filter state
state["control_filter"] = self.control_filter.dump_state(serialized=False)
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# If self._goal is populated, then set fixed_quat_target as well if the mode uses it
if self.mode == "position_fixed_ori" and self._goal is not None:
self._fixed_quat_target = self._goal["target_quat"]
# Load relevant info for this controller
self.control_filter.load_state(state["control_filter"], serialized=False)
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
# Serialize state for this controller
return np.concatenate([
state_flat,
self.control_filter.serialize(state=state["control_filter"]),
]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize state for this controller
state_dict["control_filter"] = self.control_filter.deserialize(state=state[idx: idx + self.control_filter.state_size])
return state_dict, idx + self.control_filter.state_size
def _update_goal(self, command, control_dict):
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
target_pos = command[:3]
else:
dpos = command[:3]
target_pos = pos_relative + dpos
# Compute orientation
if self.mode == "position_fixed_ori":
# We need to grab the current robot orientation as the commanded orientation if there is none saved
if self._fixed_quat_target is None:
self._fixed_quat_target = quat_relative.astype(np.float32) \
if (self._goal is None) else self._goal["target_quat"]
target_quat = self._fixed_quat_target
elif self.mode == "position_compliant_ori":
# Target quat is simply the current robot orientation
target_quat = quat_relative
elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose":
# Received "delta" ori is in fact the desired absolute orientation
target_quat = T.axisangle2quat(command[3:6])
else: # pose_delta_ori control
# Grab dori and compute target ori
dori = T.quat2mat(T.axisangle2quat(command[3:6]))
target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative))
# Possibly limit to workspace if specified
if self.workspace_pose_limiter is not None:
target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict)
goal_dict = dict(
target_pos=target_pos,
target_quat=target_quat,
)
return goal_dict
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
This processes the command based on self.mode, possibly clips the command based on self.workspace_pose_limiter,
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target_pos: robot-frame (x,y,z) desired end effector position
target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
base_pos: (x,y,z) cartesian position of the robot's base relative to the static global frame
base_quat: (x,y,z,w) quaternion orientation of the robot's base relative to the static global frame
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
Returns:
Array[float]: outputted (non-clipped!) velocity control signal to deploy
"""
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
target_pos = goal_dict["target_pos"]
target_quat = goal_dict["target_quat"]
# Calculate and return IK-backed out joint angles
current_joint_pos = control_dict["joint_position"][self.dof_idx]
# If the delta is really small, we just keep the current joint position. This avoids joint
# drift caused by IK solver inaccuracy even when zero delta actions are provided.
if np.allclose(pos_relative, target_pos, atol=1e-4) and np.allclose(quat_relative, target_quat, atol=1e-4):
target_joint_pos = current_joint_pos
else:
# Otherwise we try to solve for the IK configuration.
if self.condition_on_current_position:
target_joint_pos = self.solver.solve(
target_pos=target_pos,
target_quat=target_quat,
tolerance_pos=m.IK_POS_TOLERANCE,
tolerance_quat=m.IK_ORN_TOLERANCE,
weight_pos=m.IK_POS_WEIGHT,
weight_quat=m.IK_ORN_WEIGHT,
max_iterations=m.IK_MAX_ITERATIONS,
initial_joint_pos=current_joint_pos,
)
else:
target_joint_pos = self.solver.solve(
target_pos=target_pos,
target_quat=target_quat,
tolerance_pos=m.IK_POS_TOLERANCE,
tolerance_quat=m.IK_ORN_TOLERANCE,
weight_pos=m.IK_POS_WEIGHT,
weight_quat=m.IK_ORN_WEIGHT,
max_iterations=m.IK_MAX_ITERATIONS,
)
if target_joint_pos is None:
# Print warning that we couldn't find a valid solution, and return the current joint configuration
# instead so that we execute a no-op control
if gm.DEBUG:
log.warning(f"Could not find valid IK configuration! Returning no-op control instead.")
target_joint_pos = current_joint_pos
# Optionally pass through smoothing filter for better stability
if self.control_filter is not None:
target_joint_pos = self.control_filter.estimate(target_joint_pos)
# Run super to reach desired position / velocity setpoint
return super().compute_control(goal_dict=dict(target=target_joint_pos), control_dict=control_dict)
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
return dict(
target_pos=np.array(control_dict[f"{self.task_name}_pos_relative"]),
target_quat=np.array(control_dict[f"{self.task_name}_quat_relative"]),
)
def _get_goal_shapes(self):
return dict(
target_pos=(3,),
target_quat=(4,),
)
@property
def command_dim(self):
return IK_MODE_COMMAND_DIMS[self.mode]
| 18,891 | Python | 49.784946 | 158 | 0.616802 |
Subsets and Splits