file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
StanfordVL/OmniGibson/omnigibson/examples/environments/navigation_env_demo.py | import os
import yaml
import omnigibson as og
from omnigibson.utils.ui_utils import choose_from_options
def main(random_selection=False, headless=False, short_exec=False):
"""
Prompts the user to select a type of scene and loads a turtlebot into it, generating a Point-Goal navigation
task within the environment.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Load the config
config_filename = os.path.join(og.example_config_path, f"turtlebot_nav.yaml")
config = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
# check if we want to quick load or full load the scene
load_options = {
"Quick": "Only load the building assets (i.e.: the floors, walls, ceilings)",
"Full": "Load all interactive objects in the scene",
}
load_mode = choose_from_options(options=load_options, name="load mode", random_selection=random_selection)
if load_mode == "Quick":
config["scene"]["load_object_categories"] = ["floors", "walls", "ceilings"]
# Load the environment
env = og.Environment(configs=config)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
# Run a simple loop and reset periodically
max_iterations = 10 if not short_exec else 1
for j in range(max_iterations):
og.log.info("Resetting environment")
env.reset()
for i in range(100):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
og.log.info("Episode finished after {} timesteps".format(i + 1))
break
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 1,944 | Python | 33.732142 | 112 | 0.645062 |
StanfordVL/OmniGibson/omnigibson/examples/environments/behavior_env_demo.py | import os
import yaml
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.ui_utils import choose_from_options
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Generates a BEHAVIOR Task environment in an online fashion.
It steps the environment 100 times with random actions sampled from the action space,
using the Gym interface, resetting it 10 times.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Ask the user whether they want online object sampling or not
sampling_options = {
False: "Use a pre-sampled cached BEHAVIOR activity scene",
True: "Sample the BEHAVIOR activity in an online fashion",
}
should_sample = choose_from_options(options=sampling_options, name="online object sampling", random_selection=random_selection)
# Load the pre-selected configuration and set the online_sampling flag
config_filename = os.path.join(og.example_config_path, "fetch_behavior.yaml")
cfg = yaml.load(open(config_filename, "r"), Loader=yaml.FullLoader)
cfg["task"]["online_object_sampling"] = should_sample
# Load the environment
env = og.Environment(configs=cfg)
# Allow user to move camera more easily
og.sim.enable_viewer_camera_teleoperation()
# Run a simple loop and reset periodically
max_iterations = 10 if not short_exec else 1
for j in range(max_iterations):
og.log.info("Resetting environment")
env.reset()
for i in range(100):
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
og.log.info("Episode finished after {} timesteps".format(i + 1))
break
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,003 | Python | 32.966101 | 131 | 0.667998 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/attachment_demo.py | import yaml
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of attachment of different parts of a shelf
"""
cfg = yaml.load(open(f"{og.example_config_path}/default_cfg.yaml", "r"), Loader=yaml.FullLoader)
# Add objects that we want to create
obj_cfgs = []
obj_cfgs.append(dict(
type="LightObject",
name="light",
light_type="Sphere",
radius=0.01,
intensity=5000,
position=[0, 0, 1.0],
))
base_z = 0.2
delta_z = 0.01
idx = 0
obj_cfgs.append(dict(
type="DatasetObject",
name="shelf_back_panel",
category="shelf_back_panel",
model="gjsnrt",
position=[0, 0, 0.01],
fixed_base=True,
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_side_left",
category="shelf_side",
model="bxfkjj",
position=[-0.4, 0, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_side_right",
category="shelf_side",
model="yujrmw",
position=[0.4, 0, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
ys = [-0.93, -0.61, -0.29, 0.03, 0.35, 0.68]
for i in range(6):
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_shelf_{i}",
category="shelf_shelf",
model="ymtnqa",
position=[0, ys[i], base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name="shelf_top_0",
category="shelf_top",
model="pfiole",
position=[0, 1.0, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
obj_cfgs.append(dict(
type="DatasetObject",
name=f"shelf_baseboard",
category="shelf_baseboard",
model="hlhneo",
position=[0, -0.97884506, base_z + delta_z * idx],
abilities={"attachable": {}},
))
idx += 1
cfg["objects"] = obj_cfgs
env = og.Environment(configs=cfg)
# Set viewer camera pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-1.689292, -2.11718198, 0.93332228]),
orientation=np.array([0.57687967, -0.22995655, -0.29022759, 0.72807814]),
)
for _ in range(10):
env.step([])
shelf_baseboard = og.sim.scene.object_registry("name", "shelf_baseboard")
shelf_baseboard.set_position_orientation([0, -0.979, 0.26], [0, 0, 0, 1])
shelf_baseboard.keep_still()
shelf_baseboard.set_linear_velocity(np.array([-0.2, 0, 0]))
shelf_side_left = og.sim.scene.object_registry("name", "shelf_side_left")
shelf_side_left.set_position_orientation([-0.4, 0.0, 0.2], [0, 0, 0, 1])
shelf_side_left.keep_still()
input("\n\nShelf parts fall to their correct poses and get automatically attached to the back panel.\n"
"You can try to drag (Shift + Left-CLICK + Drag) parts of the shelf to break it apart (you may need to zoom out and drag with a larger force).\n"
"Press [ENTER] to continue.\n")
for _ in range(5000):
og.sim.step()
og.shutdown()
if __name__ == "__main__":
main()
| 3,567 | Python | 27.31746 | 155 | 0.563218 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/heat_source_or_sink_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main():
# Create the scene config to load -- empty scene with a stove object added
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
{
"type": "DatasetObject",
"name": "stove",
"category": "stove",
"model": "qbjiva",
"bounding_box": [1.611, 0.769, 1.147],
"abilities": {
"heatSource": {"requires_toggled_on": True},
"toggleable": {},
},
"position": [0, 0, 0.61],
}
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Get reference to stove object
stove = env.scene.object_registry("name", "stove")
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.0792399, -1.30104, 1.51981]),
orientation=np.array([0.54897692, 0.00110359, 0.00168013, 0.83583509]),
)
# Make sure necessary object states are included with the stove
assert object_states.HeatSourceOrSink in stove.states
assert object_states.ToggledOn in stove.states
# Take a few steps so that visibility propagates
for _ in range(5):
env.step(np.array([]))
# Heat source is off.
print("Heat source is OFF.")
heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value()
assert not heat_source_state
# Toggle on stove, notify user
input("Heat source will now turn ON: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(True)
assert stove.states[object_states.ToggledOn].get_value()
# Need to take a step to update the state.
env.step(np.array([]))
# Heat source is on
heat_source_state = stove.states[object_states.HeatSourceOrSink].get_value()
assert heat_source_state
for _ in range(500):
env.step(np.array([]))
# Toggle off stove, notify user
input("Heat source will now turn OFF: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(False)
assert not stove.states[object_states.ToggledOn].get_value()
for _ in range(200):
env.step(np.array([]))
# Move stove, notify user
input("Heat source is now moving: Press ENTER to continue.")
stove.set_position(np.array([0, 1.0, 0.61]))
for i in range(100):
env.step(np.array([]))
# Toggle on stove again, notify user
input("Heat source will now turn ON: Press ENTER to continue.")
stove.states[object_states.ToggledOn].set_value(True)
assert stove.states[object_states.ToggledOn].get_value()
for i in range(500):
env.step(np.array([]))
# Shutdown environment at end
env.close()
if __name__ == "__main__":
main()
| 3,025 | Python | 29.877551 | 80 | 0.611901 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/onfire_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of on fire state.
Loads a stove (toggled on), and two apples.
The first apple will be ignited by the stove first, then the second apple will be ignited by the first apple.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Define specific objects we want to load in with the scene directly
obj_configs = []
# Light
obj_configs.append(dict(
type="LightObject",
light_type="Sphere",
name="light",
radius=0.01,
intensity=1e8,
position=[-2.0, -2.0, 1.0],
))
# Stove
obj_configs.append(dict(
type="DatasetObject",
name="stove",
category="stove",
model="yhjzwg",
bounding_box=[1.185, 0.978, 1.387],
position=[0, 0, 0.69],
))
# 2 Apples
for i in range(2):
obj_configs.append(dict(
type="DatasetObject",
name=f"apple{i}",
category="apple",
model="agveuv",
bounding_box=[0.065, 0.065, 0.077],
position=[0, i * 0.07, 2.0],
abilities={"flammable": {"ignition_temperature": 100, "distance_threshold": 0.5}},
))
# Create the scene config to load -- empty scene with desired objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": obj_configs,
}
# Create the environment
env = og.Environment(configs=cfg)
# Get reference to relevant objects
stove = env.scene.object_registry("name", "stove")
apples = list(env.scene.object_registry("category", "apple"))
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.42246569, -0.34745704, 1.56810353]),
orientation=np.array([0.50083786, -0.10407796, -0.17482619, 0.84128772]),
)
# Let objects settle
for _ in range(10):
env.step(np.array([]))
# Turn on the stove
stove.states[object_states.ToggledOn].set_value(True)
# The first apple will be affected by the stove
apples[0].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.11, 0, 0.1]))
# The second apple will NOT be affected by the stove, but will be affected by the first apple once it's on fire.
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0.32, 0, 0.1]))
steps = 0
max_steps = -1 if not short_exec else 1000
# Main recording loop
while steps != max_steps:
env.step(np.array([]))
temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples]
print(f"{'Apple temperature:':<20}", *temps, end="\r")
steps += 1
# Always close env at the end
env.close()
if __name__ == "__main__":
main()
| 3,147 | Python | 29.26923 | 119 | 0.599619 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/temperature_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of temperature change
Loads a stove, a microwave and an oven, all toggled on, and five frozen apples
The user can move the apples to see them change from frozen, to normal temperature, to cooked and burnt
This demo also shows how to load objects ToggledOn and how to set the initial temperature of an object
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Define specific objects we want to load in with the scene directly
obj_configs = []
# Light
obj_configs.append(dict(
type="LightObject",
light_type="Sphere",
name="light",
radius=0.01,
intensity=1e8,
position=[-2.0, -2.0, 1.0],
))
# Stove
obj_configs.append(dict(
type="DatasetObject",
name="stove",
category="stove",
model="yhjzwg",
bounding_box=[1.185, 0.978, 1.387],
position=[0, 0, 0.69],
))
# Microwave
obj_configs.append(dict(
type="DatasetObject",
name="microwave",
category="microwave",
model="hjjxmi",
bounding_box=[0.384, 0.256, 0.196],
position=[2.5, 0, 0.10],
))
# Oven
obj_configs.append(dict(
type="DatasetObject",
name="oven",
category="oven",
model="wuinhm",
bounding_box=[1.075, 0.926, 1.552],
position=[-1.25, 0, 0.88],
))
# Tray
obj_configs.append(dict(
type="DatasetObject",
name="tray",
category="tray",
model="xzcnjq",
bounding_box=[0.319, 0.478, 0.046],
position=[-0.25, -0.12, 1.26],
))
# Fridge
obj_configs.append(dict(
type="DatasetObject",
name="fridge",
category="fridge",
model="hivvdf",
bounding_box=[1.065, 1.149, 1.528],
abilities={
"coldSource": {
"temperature": -100.0,
"requires_inside": True,
}
},
position=[1.25, 0, 0.81],
))
# 5 Apples
for i in range(5):
obj_configs.append(dict(
type="DatasetObject",
name=f"apple{i}",
category="apple",
model="agveuv",
bounding_box=[0.065, 0.065, 0.077],
position=[0, i * 0.1, 5.0],
))
# Create the scene config to load -- empty scene with desired objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": obj_configs,
}
# Create the environment
env = og.Environment(configs=cfg)
# Get reference to relevant objects
stove = env.scene.object_registry("name", "stove")
microwave = env.scene.object_registry("name", "microwave")
oven = env.scene.object_registry("name", "oven")
tray = env.scene.object_registry("name", "tray")
fridge = env.scene.object_registry("name", "fridge")
apples = list(env.scene.object_registry("category", "apple"))
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.46938863, -3.97887141, 1.64106008]),
orientation=np.array([0.63311689, 0.00127259, 0.00155577, 0.77405359]),
)
# Let objects settle
for _ in range(25):
env.step(np.array([]))
# Turn on all scene objects
stove.states[object_states.ToggledOn].set_value(True)
microwave.states[object_states.ToggledOn].set_value(True)
oven.states[object_states.ToggledOn].set_value(True)
# Set initial temperature of the apples to -50 degrees Celsius, and move the apples to different objects
for apple in apples:
apple.states[object_states.Temperature].set_value(-50)
apples[0].states[object_states.Inside].set_value(oven, True)
apples[1].set_position(stove.states[object_states.HeatSourceOrSink].link.get_position() + np.array([0, 0, 0.1]))
apples[2].states[object_states.OnTop].set_value(tray, True)
apples[3].states[object_states.Inside].set_value(fridge, True)
apples[4].states[object_states.Inside].set_value(microwave, True)
steps = 0
max_steps = -1 if not short_exec else 1000
# Main recording loop
locations = [f'{loc:>20}' for loc in ["Inside oven", "On stove", "On tray", "Inside fridge", "Inside microwave"]]
print()
print(f"{'Apple location:':<20}", *locations)
while steps != max_steps:
env.step(np.array([]))
temps = [f"{apple.states[object_states.Temperature].get_value():>20.2f}" for apple in apples]
print(f"{'Apple temperature:':<20}", *temps, end="\r")
steps += 1
# Always close env at the end
env.close()
if __name__ == "__main__":
main()
| 4,976 | Python | 29.722222 | 117 | 0.590836 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/sample_kinematics_demo.py | import os
import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
from omnigibson.objects import DatasetObject
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo to use the raycasting-based sampler to load objects onTop and/or inside another
Loads a cabinet, a microwave open on top of it, and two plates with apples on top, one inside and one on top of the cabinet
Then loads a shelf and cracker boxes inside of it
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "Scene",
},
}
# Define objects we want to sample at runtime
microwave_cfg = dict(
type="DatasetObject",
name="microwave",
category="microwave",
model="hjjxmi",
bounding_box=[0.768, 0.512, 0.392],
)
cabinet_cfg = dict(
type="DatasetObject",
name="cabinet",
category="bottom_cabinet",
model="bamfsz",
bounding_box=[1.075, 1.131, 1.355],
)
plate_cfgs = [dict(
type="DatasetObject",
name=f"plate{i}",
category="plate",
model="iawoof",
bounding_box=np.array([0.20, 0.20, 0.05]),
) for i in range(2)]
apple_cfgs = [dict(
type="DatasetObject",
name=f"apple{i}",
category="apple",
model="agveuv",
bounding_box=[0.065, 0.065, 0.077],
) for i in range(4)]
shelf_cfg = dict(
type="DatasetObject",
name=f"shelf",
category="shelf",
model="pkgbcp",
bounding_box=np.array([1.0, 0.4, 2.0]),
)
box_cfgs = [dict(
type="DatasetObject",
name=f"box{i}",
category="box_of_crackers",
model="cmdigf",
bounding_box=np.array([0.2, 0.05, 0.3]),
) for i in range(5)]
# Compose objects cfg
objects_cfg = [
microwave_cfg,
cabinet_cfg,
*plate_cfgs,
*apple_cfgs,
shelf_cfg,
*box_cfgs,
]
# Update their spawn positions so they don't collide immediately
for i, obj_cfg in enumerate(objects_cfg):
obj_cfg["position"] = [100 + i, 100 + i, 100 + i]
cfg["objects"] = objects_cfg
# Create the environment
env = og.Environment(configs=cfg)
env.step([])
# Sample microwave and boxes
sample_boxes_on_shelf(env)
sample_microwave_plates_apples(env)
max_steps = 100 if short_exec else -1
step = 0
while step != max_steps:
env.step(np.array([]))
step += 1
# Always close environment at the end
env.close()
def sample_microwave_plates_apples(env):
microwave = env.scene.object_registry("name", "microwave")
cabinet = env.scene.object_registry("name", "cabinet")
plates = list(env.scene.object_registry("category", "plate"))
apples = list(env.scene.object_registry("category", "apple"))
# Place the cabinet at a pre-determined location on the floor
og.log.info("Placing cabinet on the floor...")
cabinet.set_orientation([0, 0, 0, 1.0])
env.step(np.array([]))
offset = cabinet.get_position()[2] - cabinet.aabb_center[2]
cabinet.set_position(np.array([1.0, 0, cabinet.aabb_extent[2] / 2]) + offset)
env.step(np.array([]))
# Set microwave on top of the cabinet, open it, and step 100 times
og.log.info("Placing microwave OnTop of the cabinet...")
assert microwave.states[object_states.OnTop].set_value(cabinet, True)
assert microwave.states[object_states.Open].set_value(True)
og.log.info("Microwave placed.")
for _ in range(50):
env.step(np.array([]))
og.log.info("Placing plates")
n_apples_per_plate = int(len(apples) / len(plates))
for i, plate in enumerate(plates):
# Put the 1st plate in the microwave
if i == 0:
og.log.info(f"Placing plate {i} Inside the microwave...")
assert plate.states[object_states.Inside].set_value(microwave, True)
else:
og.log.info(f"Placing plate {i} OnTop the microwave...")
assert plate.states[object_states.OnTop].set_value(microwave, True)
og.log.info(f"Plate {i} placed.")
for _ in range(50):
env.step(np.array([]))
og.log.info(f"Placing {n_apples_per_plate} apples OnTop of the plate...")
for j in range(n_apples_per_plate):
apple_idx = i * n_apples_per_plate + j
apple = apples[apple_idx]
assert apple.states[object_states.OnTop].set_value(plate, True)
og.log.info(f"Apple {apple_idx} placed.")
for _ in range(50):
env.step(np.array([]))
def sample_boxes_on_shelf(env):
shelf = env.scene.object_registry("name", "shelf")
boxes = list(env.scene.object_registry("category", "box_of_crackers"))
# Place the shelf at a pre-determined location on the floor
og.log.info("Placing shelf on the floor...")
shelf.set_orientation([0, 0, 0, 1.0])
env.step(np.array([]))
offset = shelf.get_position()[2] - shelf.aabb_center[2]
shelf.set_position(np.array([-1.0, 0, shelf.aabb_extent[2] / 2]) + offset)
env.step(np.array([])) # One step is needed for the object to be fully initialized
og.log.info("Shelf placed.")
for _ in range(50):
env.step(np.array([]))
og.log.info("Placing boxes...")
for i, box in enumerate(boxes):
box.states[object_states.Inside].set_value(shelf, True)
og.log.info(f"Box {i} placed.")
for _ in range(50):
env.step(np.array([]))
if __name__ == "__main__":
main()
| 5,940 | Python | 30.94086 | 127 | 0.584343 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/object_state_texture_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm, macros
from omnigibson.systems import get_system
from omnigibson.utils.constants import ParticleModifyMethod
# Make sure object states are enabled, we're using GPU dynamics, and HQ rendering is enabled
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_HQ_RENDERING = True
def main():
# Create the scene config to load -- empty scene plus a cabinet
cfg = {
"scene": {
"type": "Scene",
"floor_plane_visible": True,
},
"objects": [
{
"type": "DatasetObject",
"name": "cabinet",
"category": "bottom_cabinet",
"model": "zuwvdo",
"bounding_box": [1.595, 0.537, 1.14],
"abilities": {
"freezable": {},
"cookable": {},
"burnable": {},
"saturable": {},
"particleRemover": {
"method": ParticleModifyMethod.ADJACENCY,
"conditions": {
# For a specific particle system, this specifies what conditions are required in order for the
# particle applier / remover to apply / remover particles associated with that system
# The list should contain functions with signature condition() --> bool,
# where True means the condition is satisfied
# In this case, we only allow our cabinet to absorb water, with no conditions needed.
# This is needed for the Saturated ("saturable") state so that we can modify the texture
# according to the water.
# NOTE: This will only change color if gm.ENABLE_HQ_RENDERING and gm.USE_GPU_DYNAMICS is
# enabled!
"water": [],
},
},
},
"position": [0, 0, 0.59],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 1.7789 , -1.68822, 1.13551]),
orientation=np.array([0.57065614, 0.20331904, 0.267029 , 0.74947212]),
)
# Grab reference to object of interest
obj = env.scene.object_registry("name", "cabinet")
# Make sure all the appropriate states are in the object
assert object_states.Frozen in obj.states
assert object_states.Cooked in obj.states
assert object_states.Burnt in obj.states
assert object_states.Saturated in obj.states
def report_states():
# Make sure states are propagated before printing
for i in range(5):
env.step(np.array([]))
print("=" * 20)
print("temperature:", obj.states[object_states.Temperature].get_value())
print("obj is frozen:", obj.states[object_states.Frozen].get_value())
print("obj is cooked:", obj.states[object_states.Cooked].get_value())
print("obj is burnt:", obj.states[object_states.Burnt].get_value())
print("obj is soaked:", obj.states[object_states.Saturated].get_value(get_system("water")))
print("obj textures:", obj.get_textures())
# Report default states
print("==== Initial state ====")
report_states()
# Notify user that we're about to freeze the object, and then freeze the object
input("\nObject will be frozen. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(-50)
report_states()
# Notify user that we're about to cook the object, and then cook the object
input("\nObject will be cooked. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(100)
report_states()
# Notify user that we're about to burn the object, and then burn the object
input("\nObject will be burned. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(250)
report_states()
# Notify user that we're about to reset the object to its default state, and then reset state
input("\nObject will be reset to default state. Press ENTER to continue.")
obj.states[object_states.Temperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
obj.states[object_states.MaxTemperature].set_value(macros.object_states.temperature.DEFAULT_TEMPERATURE)
report_states()
# Notify user that we're about to soak the object, and then soak the object
input("\nObject will be saturated with water. Press ENTER to continue.")
obj.states[object_states.Saturated].set_value(get_system("water"), True)
report_states()
# Notify user that we're about to unsoak the object, and then unsoak the object
input("\nObject will be unsaturated with water. Press ENTER to continue.")
obj.states[object_states.Saturated].set_value(get_system("water"), False)
report_states()
# Close environment at the end
input("Demo completed. Press ENTER to shutdown environment.")
env.close()
if __name__ == "__main__":
main() | 5,330 | Python | 41.309523 | 122 | 0.611257 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/particle_applier_remover_demo.py | import numpy as np
import omnigibson as og
from omnigibson.object_states import Covered
from omnigibson.objects import DatasetObject
from omnigibson.macros import gm, macros
from omnigibson.systems import get_system
from omnigibson.utils.usd_utils import create_joint
from omnigibson.utils.ui_utils import choose_from_options
from omnigibson.utils.constants import ParticleModifyMethod
# Set macros for this example
macros.object_states.particle_modifier.VISUAL_PARTICLES_REMOVAL_LIMIT = 1000
macros.object_states.particle_modifier.PHYSICAL_PARTICLES_REMOVAL_LIMIT = 8000
macros.object_states.particle_modifier.MAX_VISUAL_PARTICLES_APPLIED_PER_STEP = 4
macros.object_states.particle_modifier.MAX_PHYSICAL_PARTICLES_APPLIED_PER_STEP = 40
macros.object_states.covered.MAX_VISUAL_PARTICLES = 300
# Make sure object states and GPU dynamics are enabled (GPU dynamics needed for fluids)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_HQ_RENDERING = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of ParticleApplier and ParticleRemover object states, which enable objects to either apply arbitrary
particles and remove arbitrary particles from the simulator, respectively.
Loads an empty scene with a table, and starts clean to allow particles to be applied or pre-covers the table
with particles to be removed. The ParticleApplier / ParticleRemover state is applied to an imported cloth object
and allowed to interact with the table, applying / removing particles from the table.
NOTE: The key difference between ParticleApplier/Removers and ParticleSource/Sinks is that Applier/Removers
requires contact (if using ParticleProjectionMethod.ADJACENCY) or overlap
(if using ParticleProjectionMethod.PROJECTION) in order to spawn / remove particles, and generally only spawn
particles at the contact points. ParticleSource/Sinks are special cases of ParticleApplier/Removers that
always use ParticleProjectionMethod.PROJECTION and always spawn / remove particles within their projection volume,
irregardless of overlap with other objects!
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose what configuration to load
modifier_type = choose_from_options(
options={
"particleApplier": "Demo object's ability to apply particles in the simulator",
"particleRemover": "Demo object's ability to remove particles from the simulator",
},
name="particle modifier type",
random_selection=random_selection,
)
modification_metalink = {
"particleApplier": "particleapplier_link",
"particleRemover": "particleremover_link",
}
particle_types = ["stain", "water"]
particle_type = choose_from_options(
options={name: f"{name} particles will be applied or removed from the simulator" for name in particle_types},
name="particle type",
random_selection=random_selection,
)
modification_method = {
"Adjacency": ParticleModifyMethod.ADJACENCY,
"Projection": ParticleModifyMethod.PROJECTION,
}
projection_mesh_params = {
"Adjacency": None,
"Projection": {
# Either Cone or Cylinder; shape of the projection where particles can be applied / removed
"type": "Cone",
# Size of the cone
"extents": np.array([0.1875, 0.1875, 0.375]),
},
}
method_type = choose_from_options(
options={
"Adjacency": "Close proximity to the object will be used to determine whether particles can be applied / removed",
"Projection": "A Cone or Cylinder shape protruding from the object will be used to determine whether particles can be applied / removed",
},
name="modifier method type",
random_selection=random_selection,
)
# Create the ability kwargs to pass to the object state
abilities = {
modifier_type: {
"method": modification_method[method_type],
"conditions": {
# For a specific particle system, this specifies what conditions are required in order for the
# particle applier / remover to apply / remover particles associated with that system
# The list should contain functions with signature condition() --> bool,
# where True means the condition is satisified
particle_type: [],
},
"projection_mesh_params": projection_mesh_params[method_type],
}
}
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="kwmfdg",
bounding_box=[3.402, 1.745, 1.175],
position=[0, 0, 0.98],
)
# Create the scene config to load -- empty scene with a light and table
cfg = {
"scene": {
"type": "Scene",
},
"objects": [table_cfg],
}
# Sanity check inputs: Remover + Adjacency + Fluid will not work because we are using a visual_only
# object, so contacts will not be triggered with this object
# Load the environment, then immediately stop the simulator since we need to add in the modifier object
env = og.Environment(configs=cfg)
og.sim.stop()
# Grab references to table
table = env.scene.object_registry("name", "table")
# Set the viewer camera appropriately
og.sim.viewer_camera.set_position_orientation(
position=np.array([-1.61340969, -1.79803028, 2.53167412]),
orientation=np.array([ 0.46291845, -0.12381886, -0.22679218, 0.84790371]),
)
# If we're using a projection volume, we manually add in the required metalink required in order to use the volume
modifier = DatasetObject(
name="modifier",
category="dishtowel",
model="dtfspn",
bounding_box=[0.34245, 0.46798, 0.07],
visual_only=method_type == "Projection", # Non-fluid adjacency requires the object to have collision geoms active
abilities=abilities,
)
modifier_root_link_path = f"{modifier.prim_path}/base_link"
modifier._prim = modifier._load()
if method_type == "Projection":
metalink_path = f"{modifier.prim_path}/{modification_metalink[modifier_type]}"
og.sim.stage.DefinePrim(metalink_path, "Xform")
create_joint(
prim_path=f"{modifier_root_link_path}/{modification_metalink[modifier_type]}_joint",
body0=modifier_root_link_path,
body1=metalink_path,
joint_type="FixedJoint",
enabled=True,
)
modifier._post_load()
modifier._loaded = True
og.sim.import_object(modifier)
modifier.set_position(np.array([0, 0, 5.0]))
# Play the simulator and take some environment steps to let the objects settle
og.sim.play()
for _ in range(25):
env.step(np.array([]))
# If we're removing particles, set the table's covered state to be True
if modifier_type == "particleRemover":
table.states[Covered].set_value(get_system(particle_type), True)
# Take a few steps to let particles settle
for _ in range(25):
env.step(np.array([]))
# Enable camera teleoperation for convenience
og.sim.enable_viewer_camera_teleoperation()
# Set the modifier object to be in position to modify particles
if method_type == "Projection":
# Higher z to showcase projection volume at work
z = 1.85
elif particle_type == "stain":
# Lower z needed to allow for adjacency bounding box to overlap properly
z = 1.175
else:
# Higher z needed for actual physical interaction to accommodate non-negligible particle radius
z = 1.22
modifier.keep_still()
modifier.set_position_orientation(
position=np.array([0, 0.3, z]),
orientation=np.array([0, 0, 0, 1.0]),
)
# Move object in square around table
deltas = [
[130, np.array([-0.01, 0, 0])],
[60, np.array([0, -0.01, 0])],
[130, np.array([0.01, 0, 0])],
[60, np.array([0, 0.01, 0])],
]
for t, delta in deltas:
for i in range(t):
modifier.set_position(modifier.get_position() + delta)
env.step(np.array([]))
# Always shut down environment at the end
env.close()
if __name__ == "__main__":
main()
| 8,488 | Python | 38.85446 | 149 | 0.661051 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/folded_unfolded_state_demo.py | from omnigibson.utils.constants import PrimType
from omnigibson.object_states import Folded, Unfolded
from omnigibson.macros import gm
import numpy as np
import omnigibson as og
# Make sure object states and GPU dynamics are enabled (GPU dynamics needed for cloth)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of cloth objects that can potentially be folded.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene + custom cloth object
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
{
"type": "DatasetObject",
"name": "carpet",
"category": "carpet",
"model": "ctclvd",
"bounding_box": [0.897, 0.568, 0.012],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [0, 0, 0.5],
},
{
"type": "DatasetObject",
"name": "dishtowel",
"category": "dishtowel",
"model": "dtfspn",
"bounding_box": [0.852, 1.1165, 0.174],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [1, 1, 0.5],
},
{
"type": "DatasetObject",
"name": "shirt",
"category": "t_shirt",
"model": "kvidcx",
"bounding_box": [0.472, 1.243, 1.158],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [-1, 1, 0.5],
"orientation": [0.7071, 0., 0.7071, 0.],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab object references
carpet = env.scene.object_registry("name", "carpet")
dishtowel = env.scene.object_registry("name", "dishtowel")
shirt = env.scene.object_registry("name", "shirt")
objs = [carpet, dishtowel, shirt]
# Set viewer camera
og.sim.viewer_camera.set_position_orientation(
position=np.array([0.46382895, -2.66703958, 1.22616824]),
orientation=np.array([0.58779174, -0.00231237, -0.00318273, 0.80900271]),
)
def print_state():
folded = carpet.states[Folded].get_value()
unfolded = carpet.states[Unfolded].get_value()
info = "carpet: [folded] %d [unfolded] %d" % (folded, unfolded)
folded = dishtowel.states[Folded].get_value()
unfolded = dishtowel.states[Unfolded].get_value()
info += " || dishtowel: [folded] %d [unfolded] %d" % (folded, unfolded)
folded = shirt.states[Folded].get_value()
unfolded = shirt.states[Unfolded].get_value()
info += " || tshirt: [folded] %d [unfolded] %d" % (folded, unfolded)
print(f"{info}{' ' * (110 - len(info))}", end="\r")
for _ in range(100):
og.sim.step()
print("\nCloth state:\n")
if not short_exec:
# Fold all three cloths along the x-axis
for i in range(3):
obj = objs[i]
pos = obj.root_link.compute_particle_positions()
x_min, x_max = np.min(pos, axis=0)[0], np.max(pos, axis=0)[0]
x_extent = x_max - x_min
# Get indices for the bottom 10 percent vertices in the x-axis
indices = np.argsort(pos, axis=0)[:, 0][:(pos.shape[0] // 10)]
start = np.copy(pos[indices])
# lift up a bit
mid = np.copy(start)
mid[:, 2] += x_extent * 0.2
# move towards x_max
end = np.copy(mid)
end[:, 0] += x_extent * 0.9
increments = 25
for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]):
obj.root_link.set_particle_positions(ctrl_pts, idxs=indices)
og.sim.step()
print_state()
# Fold the t-shirt twice again along the y-axis
for direction in [-1, 1]:
obj = shirt
pos = obj.root_link.compute_particle_positions()
y_min, y_max = np.min(pos, axis=0)[1], np.max(pos, axis=0)[1]
y_extent = y_max - y_min
if direction == 1:
indices = np.argsort(pos, axis=0)[:, 1][:(pos.shape[0] // 20)]
else:
indices = np.argsort(pos, axis=0)[:, 1][-(pos.shape[0] // 20):]
start = np.copy(pos[indices])
# lift up a bit
mid = np.copy(start)
mid[:, 2] += y_extent * 0.2
# move towards y_max
end = np.copy(mid)
end[:, 1] += direction * y_extent * 0.4
increments = 25
for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]):
obj.root_link.set_particle_positions(ctrl_pts, idxs=indices)
env.step(np.array([]))
print_state()
while True:
env.step(np.array([]))
print_state()
# Shut down env at the end
print()
env.close()
if __name__ == "__main__":
main()
| 5,379 | Python | 33.487179 | 117 | 0.5066 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/overlaid_demo.py | import numpy as np
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.utils.constants import PrimType
from omnigibson.object_states import Overlaid
# Make sure object states and GPU dynamics are enabled (GPU dynamics needed for cloth)
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of cloth objects that can be overlaid on rigid objects.
Loads a carpet on top of a table. Initially Overlaid will be True because the carpet largely covers the table.
If you drag the carpet off the table or even just fold it into half, Overlaid will become False.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene + custom cloth object + custom rigid object
cfg = {
"scene": {
"type": "Scene",
},
"objects": [
{
"type": "DatasetObject",
"name": "carpet",
"category": "carpet",
"model": "ctclvd",
"bounding_box": [1.346, 0.852, 0.017],
"prim_type": PrimType.CLOTH,
"abilities": {"cloth": {}},
"position": [0, 0, 1.0],
},
{
"type": "DatasetObject",
"name": "breakfast_table",
"category": "breakfast_table",
"model": "rjgmmy",
"bounding_box": [1.36, 1.081, 0.84],
"prim_type": PrimType.RIGID,
"position": [0, 0, 0.58],
},
],
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab object references
carpet = env.scene.object_registry("name", "carpet")
breakfast_table = env.scene.object_registry("name", "breakfast_table")
# Set camera pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.88215526, -1.40086216, 2.00311063]),
orientation=np.array([0.42013364, 0.12342107, 0.25339685, 0.86258043]),
)
max_steps = 100 if short_exec else -1
steps = 0
print("\nTry dragging cloth around with CTRL + Left-Click to see the Overlaid state change:\n")
while steps != max_steps:
print(f"Overlaid {carpet.states[Overlaid].get_value(breakfast_table)} ", end="\r")
env.step(np.array([]))
# Shut down env at the end
env.close()
if __name__ == "__main__":
main()
| 2,554 | Python | 31.341772 | 114 | 0.568912 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/particle_source_sink_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
from omnigibson.utils.constants import ParticleModifyCondition
# Make sure object states are enabled and GPU dynamics are used
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_HQ_RENDERING = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of ParticleSource and ParticleSink object states, which enable objects to either spawn arbitrary
particles and remove arbitrary particles from the simulator, respectively.
Loads an empty scene with a sink, which is enabled with both the ParticleSource and ParticleSink states.
The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles,
which is then destroyed ("sunk") by the sink's particle sink located at the drain.
NOTE: The key difference between ParticleApplier/Removers and ParticleSource/Sinks is that Applier/Removers
requires contact (if using ParticleProjectionMethod.ADJACENCY) or overlap
(if using ParticleProjectionMethod.PROJECTION) in order to spawn / remove particles, and generally only spawn
particles at the contact points. ParticleSource/Sinks are special cases of ParticleApplier/Removers that
always use ParticleProjectionMethod.PROJECTION and always spawn / remove particles within their projection volume,
irregardless of overlap with other objects!
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "Scene",
}
}
# Define objects to load into the environment
sink_cfg = dict(
type="DatasetObject",
name="sink",
category="sink",
model="egwapq",
bounding_box=[2.427, 0.625, 1.2],
abilities={
"toggleable": {},
"particleSource": {
"conditions": {
"water": [(ParticleModifyCondition.TOGGLEDON, True)], # Must be toggled on for water source to be active
},
"initial_speed": 0.0, # Water merely falls out of the spout
},
"particleSink": {
"conditions": {
"water": [], # No conditions, always sinking nearby particles
},
},
},
position=[0.0, 0, 0.42],
)
cfg["objects"] = [sink_cfg]
# Create the environment!
env = og.Environment(configs=cfg)
# Set camera to ideal angle for viewing objects
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.37860532, -0.65396566, 1.4067066 ]),
orientation=np.array([0.49909498, 0.15201752, 0.24857062, 0.81609284]),
)
# Take a few steps to let the objects settle, and then turn on the sink
for _ in range(10):
env.step(np.array([])) # Empty action since no robots are in the scene
sink = env.scene.object_registry("name", "sink")
assert sink.states[object_states.ToggledOn].set_value(True)
# Take a step, and save the state
env.step(np.array([]))
initial_state = og.sim.dump_state()
# Main simulation loop.
max_steps = 1000
max_iterations = -1 if not short_exec else 1
iteration = 0
try:
while iteration != max_iterations:
# Keep stepping until table or bowl are clean, or we reach 1000 steps
steps = 0
while steps != max_steps:
steps += 1
env.step(np.array([]))
og.log.info("Max steps reached; resetting.")
# Reset to the initial state
og.sim.load_state(initial_state)
iteration += 1
finally:
# Always shut down environment at the end
env.close()
if __name__ == "__main__":
main()
| 3,985 | Python | 34.90991 | 126 | 0.627353 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/slicing_demo.py | import numpy as np
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.utils.transform_utils as T
# Make sure object states and transition rules are enabled
gm.ENABLE_OBJECT_STATES = True
gm.ENABLE_TRANSITION_RULES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of slicing an apple into two apple slices
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene with table, knife, and apple
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="rjgmmy",
bounding_box=[1.36, 1.081, 0.84],
position=[0, 0, 0.58],
)
apple_cfg = dict(
type="DatasetObject",
name="apple",
category="apple",
model="agveuv",
bounding_box=[0.098, 0.098, 0.115],
position=[0.085, 0, 0.92],
)
knife_cfg = dict(
type="DatasetObject",
name="knife",
category="table_knife",
model="lrdmpf",
bounding_box=[0.401, 0.044, 0.009],
position=[0, 0, 20.0],
)
light0_cfg = dict(
type="LightObject",
name="light0",
light_type="Sphere",
radius=0.01,
intensity=4000.0,
position=[1.217, -0.848, 1.388],
)
light1_cfg = dict(
type="LightObject",
name="light1",
light_type="Sphere",
radius=0.01,
intensity=4000.0,
position=[-1.217, 0.848, 1.388],
)
cfg = {
"scene": {
"type": "Scene",
},
"objects": [table_cfg, apple_cfg, knife_cfg, light0_cfg, light1_cfg]
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab reference to apple and knife
apple = env.scene.object_registry("name", "apple")
knife = env.scene.object_registry("name", "knife")
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.544888, -0.412084, 1.11569 ]),
orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
for _ in range(50):
env.step(np.array([]))
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + np.array([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-np.pi / 2, 0, 0]),
)
input("The knife will fall on the apple and slice it. Press [ENTER] to continue.")
# Step simulation for a bit so that apple is sliced
for i in range(1000):
env.step(np.array([]))
input("Apple has been sliced! Press [ENTER] to terminate the demo.")
# Always close environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,901 | Python | 25.87037 | 103 | 0.577732 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/dicing_demo.py | import numpy as np
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.utils.transform_utils as T
# Make sure object states, GPU dynamics, and transition rules are enabled
gm.ENABLE_OBJECT_STATES = True
gm.USE_GPU_DYNAMICS = True
gm.ENABLE_TRANSITION_RULES = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Demo of dicing an apple into apple dices
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene with table, knife, and apple
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="rjgmmy",
bounding_box=[1.36, 1.08, 0.84],
position=[0, 0, 0.58],
)
apple_cfg = dict(
type="DatasetObject",
name="apple",
category="apple",
model="agveuv",
bounding_box=[0.098, 0.098, 0.115],
position=[0.085, 0, 0.92],
abilities={"diceable": {}}
)
knife_cfg = dict(
type="DatasetObject",
name="knife",
category="table_knife",
model="lrdmpf",
bounding_box=[0.401, 0.044, 0.009],
position=[0, 0, 20.0],
)
light0_cfg = dict(
type="LightObject",
name="light0",
light_type="Sphere",
radius=0.01,
intensity=1e7,
position=[1.217, -0.848, 1.388],
)
light1_cfg = dict(
type="LightObject",
name="light1",
light_type="Sphere",
radius=0.01,
intensity=1e7,
position=[-1.217, 0.848, 1.388],
)
cfg = {
"scene": {
"type": "Scene",
},
"objects": [table_cfg, apple_cfg, knife_cfg, light0_cfg, light1_cfg]
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab reference to apple and knife
apple = env.scene.object_registry("name", "apple")
knife = env.scene.object_registry("name", "knife")
# Update the simulator's viewer camera's pose so it points towards the table
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.544888, -0.412084, 1.11569 ]),
orientation=np.array([0.54757518, 0.27792802, 0.35721896, 0.70378409]),
)
# Let apple settle
for _ in range(50):
env.step(np.array([]))
knife.keep_still()
knife.set_position_orientation(
position=apple.get_position() + np.array([-0.15, 0.0, 0.2]),
orientation=T.euler2quat([-np.pi / 2, 0, 0]),
)
input("The knife will fall on the apple and dice it. Press [ENTER] to continue.")
# Step simulation for a bit so that apple is diced
for i in range(1000):
env.step(np.array([]))
input("Apple has been diced! Press [ENTER] to terminate the demo.")
# Always close environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,962 | Python | 25.936363 | 103 | 0.57765 |
StanfordVL/OmniGibson/omnigibson/examples/object_states/heated_state_demo.py | import numpy as np
import omnigibson as og
from omnigibson import object_states
from omnigibson.macros import gm
# Make sure object states are enabled
gm.ENABLE_OBJECT_STATES = True
def main():
# Define object configurations for objects to load -- we want to load a light and three bowls
obj_configs = []
obj_configs.append(dict(
type="LightObject",
light_type="Sphere",
name="light",
radius=0.01,
intensity=1e8,
position=[-2.0, -2.0, 1.0],
))
for i, (scale, x) in enumerate(zip([0.5, 1.0, 2.0], [-0.6, 0, 0.8])):
obj_configs.append(dict(
type="DatasetObject",
name=f"bowl{i}",
category="bowl",
model="ajzltc",
bounding_box=np.array([0.329, 0.293, 0.168]) * scale,
abilities={"heatable": {}},
position=[x, 0, 0.2],
))
# Create the scene config to load -- empty scene with light object and bowls
cfg = {
"scene": {
"type": "Scene",
},
"objects": obj_configs,
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 0.182103, -2.07295 , 0.14017 ]),
orientation=np.array([0.77787037, 0.00267566, 0.00216149, 0.62841535]),
)
# Dim the skybox so we can see the bowls' steam effectively
env.scene.skybox.intensity = 100.0
# Grab reference to objects of relevance
objs = list(env.scene.object_registry("category", "bowl"))
def report_states(objs):
for obj in objs:
print("=" * 20)
print("object:", obj.name)
print("temperature:", obj.states[object_states.Temperature].get_value())
print("obj is heated:", obj.states[object_states.Heated].get_value())
# Report default states
print("==== Initial state ====")
report_states(objs)
# Notify user that we're about to heat the object
input("Objects will be heated, and steam will slowly rise. Press ENTER to continue.")
# Heated.
for obj in objs:
obj.states[object_states.Temperature].set_value(50)
env.step(np.array([]))
report_states(objs)
# Take a look at the steam effect.
# After a while, objects will be below the Steam temperature threshold.
print("==== Objects are now heated... ====")
print()
for _ in range(2000):
env.step(np.array([]))
# Also print temperatures
temps = [f"{obj.states[object_states.Temperature].get_value():>7.2f}" for obj in objs]
print(f"obj temps:", *temps, end="\r")
print()
# Objects are not heated anymore.
print("==== Objects are no longer heated... ====")
report_states(objs)
# Close environment at the end
input("Demo completed. Press ENTER to shutdown environment.")
env.close()
if __name__ == "__main__":
main()
| 2,977 | Python | 29.080808 | 97 | 0.595566 |
StanfordVL/OmniGibson/omnigibson/examples/objects/draw_bounding_box.py | import matplotlib.pyplot as plt
import numpy as np
import omnigibson as og
def main(random_selection=False, headless=False, short_exec=False):
"""
Shows how to obtain the bounding box of an articulated object.
Draws the bounding box around the loaded object, a cabinet, and writes the visualized image to disk at the
current directory named 'bbox_2d_[loose / tight]_img.png'.
NOTE: In the GUI, bounding boxes can be natively viewed by clicking on the sensor ((*)) icon at the top,
and then selecting the appropriate bounding box modalities, and clicking "Show". See:
https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_replicator/visualization.html#the-visualizer
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Specify objects to load
banana_cfg = dict(
type="DatasetObject",
name="banana",
category="banana",
model="vvyyyv",
bounding_box=[0.643, 0.224, 0.269],
position=[-0.906661, -0.545106, 0.136824],
orientation=[0, 0, 0.76040583, -0.6494482],
)
door_cfg = dict(
type="DatasetObject",
name="door",
category="door",
model="ohagsq",
bounding_box=[1.528, 0.064, 1.299],
position=[-2.0, 0, 0.70000001],
orientation=[0, 0, -0.38268343, 0.92387953],
)
# Create the scene config to load -- empty scene with a few objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": [banana_cfg, door_cfg],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=np.array([-4.62785 , -0.418575, 0.933943]),
orientation=np.array([ 0.52196595, -0.4231939 , -0.46640436, 0.5752612 ]),
)
# Add bounding boxes to camera sensor
bbox_modalities = ["bbox_3d", "bbox_2d_loose", "bbox_2d_tight"]
for bbox_modality in bbox_modalities:
cam.add_modality(bbox_modality)
# Take a few steps to let objects settle
for i in range(100):
env.step(np.array([]))
# Grab observations from viewer camera and write them to disk
obs, _ = cam.get_obs()
for bbox_modality in bbox_modalities:
# Print out each of the modalities
og.log.info(f"Observation modality {bbox_modality}:\n{obs[bbox_modality]}")
# Also write the 2d loose bounding box to disk
if "3d" not in bbox_modality:
from omnigibson.utils.deprecated_utils import colorize_bboxes
colorized_img = colorize_bboxes(bboxes_2d_data=obs[bbox_modality], bboxes_2d_rgb=obs["rgb"], num_channels=4)
fpath = f"{bbox_modality}_img.png"
plt.imsave(fpath, colorized_img)
og.log.info(f"Saving modality [{bbox_modality}] image to: {fpath}")
# Always close environment down at end
env.close()
if __name__ == "__main__":
main()
| 3,048 | Python | 33.647727 | 120 | 0.621391 |
StanfordVL/OmniGibson/omnigibson/examples/objects/highlight_objects.py | import numpy as np
import omnigibson as og
def main(random_selection=False, headless=False, short_exec=False):
"""
Highlights visually all object instances of windows and then removes the highlighting
It also demonstrates how to apply an action on all instances of objects of a given category
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "InteractiveTraversableScene",
"scene_model": "Rs_int",
}
}
# Create the environment
env = og.Environment(configs=cfg)
# Grab all window objects
windows = og.sim.scene.object_registry("category", "window")
# Step environment while toggling window highlighting
i = 0
highlighted = False
max_steps = -1 if not short_exec else 1000
while i != max_steps:
env.step(np.array([]))
if i % 50 == 0:
highlighted = not highlighted
og.log.info(f"Toggling window highlight to: {highlighted}")
for window in windows:
# Note that this property is R/W!
window.highlighted = highlighted
i += 1
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 1,359 | Python | 28.565217 | 103 | 0.595291 |
StanfordVL/OmniGibson/omnigibson/examples/objects/load_object_selector.py | import numpy as np
import omnigibson as og
from omnigibson.utils.asset_utils import (
get_all_object_categories,
get_og_avg_category_specs,
get_all_object_category_models,
)
from omnigibson.utils.ui_utils import choose_from_options
def main(random_selection=False, headless=False, short_exec=False):
"""
This demo shows how to load any scaled objects from the OG object model dataset
The user selects an object model to load
The objects can be loaded into an empty scene or an interactive scene (OG)
The example also shows how to use the Environment API or directly the Simulator API, loading objects and robots
and executing actions
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
scene_options = ["Scene", "InteractiveTraversableScene"]
scene_type = choose_from_options(options=scene_options, name="scene type", random_selection=random_selection)
# -- Choose the object to load --
# Select a category to load
available_obj_categories = get_all_object_categories()
obj_category = choose_from_options(options=available_obj_categories, name="object category", random_selection=random_selection)
# Select a model to load
available_obj_models = get_all_object_category_models(obj_category)
obj_model = choose_from_options(options=available_obj_models, name="object model", random_selection=random_selection)
# Load the specs of the object categories, e.g., common scaling factor
avg_category_spec = get_og_avg_category_specs()
# Create and load this object into the simulator
obj_cfg = dict(
type="DatasetObject",
name="obj",
category=obj_category,
model=obj_model,
position=[0, 0, 50.0],
)
cfg = {
"scene": {
"type": scene_type,
},
"objects": [obj_cfg],
}
if scene_type == "InteractiveTraversableScene":
cfg["scene"]["scene_model"] = "Rs_int"
# Create the environment
env = og.Environment(configs=cfg)
# Place the object so it rests on the floor
obj = env.scene.object_registry("name", "obj")
center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, obj.aabb_extent[2] / 2.0])
obj.set_position(center_offset)
# Step through the environment
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
env.step(np.array([]))
# Always close the environment at the end
env.close()
if __name__ == "__main__":
main()
| 2,553 | Python | 33.986301 | 131 | 0.66275 |
StanfordVL/OmniGibson/omnigibson/examples/objects/visualize_object.py | import argparse
import numpy as np
import omnigibson as og
from omnigibson.utils.asset_utils import (
get_all_object_categories,
get_all_object_category_models,
)
from omnigibson.utils.ui_utils import choose_from_options
import omnigibson.utils.transform_utils as T
def main(random_selection=False, headless=False, short_exec=False):
"""
Visualizes object as specified by its USD path, @usd_path. If None if specified, will instead
result in an object selection from OmniGibson's object dataset
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Assuming that if random_selection=True, headless=True, short_exec=True, we are calling it from tests and we
# do not want to parse args (it would fail because the calling function is pytest "testfile.py")
usd_path = None
if not (random_selection and headless and short_exec):
parser = argparse.ArgumentParser()
parser.add_argument(
"--usd_path",
default=None,
help="USD Model to load",
)
args = parser.parse_args()
usd_path = args.usd_path
# Define objects to load
light0_cfg = dict(
type="LightObject",
light_type="Sphere",
name="sphere_light0",
radius=0.01,
intensity=1e5,
position=[-2.0, -2.0, 2.0],
)
light1_cfg = dict(
type="LightObject",
light_type="Sphere",
name="sphere_light1",
radius=0.01,
intensity=1e5,
position=[-2.0, 2.0, 2.0],
)
# Make sure we have a valid usd path
if usd_path is None:
# Select a category to load
available_obj_categories = get_all_object_categories()
obj_category = choose_from_options(options=available_obj_categories, name="object category",
random_selection=random_selection)
# Select a model to load
available_obj_models = get_all_object_category_models(obj_category)
obj_model = choose_from_options(options=available_obj_models, name="object model",
random_selection=random_selection)
kwargs = {
"type": "DatasetObject",
"category": obj_category,
"model": obj_model,
}
else:
kwargs = {
"type": "USDObject",
"usd_path": usd_path,
}
# Import the desired object
obj_cfg = dict(
**kwargs,
name="obj",
usd_path=usd_path,
visual_only=True,
position=[0, 0, 10.0],
)
# Create the scene config to load -- empty scene
cfg = {
"scene": {
"type": "Scene",
},
"objects": [light0_cfg, light1_cfg, obj_cfg],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
og.sim.viewer_camera.set_position_orientation(
position=np.array([-0.00913503, -1.95750906, 1.36407314]),
orientation=np.array([0.6350064 , 0. , 0. , 0.77250687]),
)
# Grab the object references
obj = env.scene.object_registry("name", "obj")
# Standardize the scale of the object so it fits in a [1,1,1] box -- note that we have to stop the simulator
# in order to set the scale
extents = obj.aabb_extent
og.sim.stop()
obj.scale = (np.ones(3) / extents).min()
og.sim.play()
env.step(np.array([]))
# Move the object so that its center is at [0, 0, 1]
center_offset = obj.get_position() - obj.aabb_center + np.array([0, 0, 1.0])
obj.set_position(center_offset)
# Allow the user to easily move the camera around
og.sim.enable_viewer_camera_teleoperation()
# Rotate the object in place
steps_per_rotate = 360
steps_per_joint = steps_per_rotate / 10
max_steps = 100 if short_exec else 10000
for i in range(max_steps):
z_angle = (2 * np.pi * (i % steps_per_rotate) / steps_per_rotate)
quat = T.euler2quat(np.array([0, 0, z_angle]))
pos = T.quat2mat(quat) @ center_offset
if obj.n_dof > 0:
frac = (i % steps_per_joint) / steps_per_joint
j_frac = -1.0 + 2.0 * frac if (i // steps_per_joint) % 2 == 0 else 1.0 - 2.0 * frac
obj.set_joint_positions(positions=j_frac * np.ones(obj.n_dof), normalized=True, drive=False)
obj.keep_still()
obj.set_position_orientation(position=pos, orientation=quat)
env.step(np.array([]))
# Shut down at the end
og.shutdown()
if __name__ == "__main__":
main()
| 4,641 | Python | 31.921986 | 113 | 0.587373 |
StanfordVL/OmniGibson/omnigibson/examples/renderer_settings/renderer_settings_example.py | import numpy as np
import omnigibson as og
from omnigibson.renderer_settings.renderer_settings import RendererSettings
def main(random_selection=False, headless=False, short_exec=False):
"""
Shows how to use RendererSettings class
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Specify objects to load
banana_cfg = dict(
type="DatasetObject",
name="banana",
category="banana",
model="vvyyyv",
scale=[3.0, 5.0, 2.0],
position=[-0.906661, -0.545106, 0.136824],
orientation=[0, 0, 0.76040583, -0.6494482 ],
)
door_cfg = dict(
type="DatasetObject",
name="door",
category="door",
model="ohagsq",
position=[-2.0, 0, 0.70000001],
orientation=[0, 0, -0.38268343, 0.92387953],
)
# Create the scene config to load -- empty scene with a few objects
cfg = {
"scene": {
"type": "Scene",
},
"objects": [banana_cfg, door_cfg],
}
# Create the environment
env = og.Environment(configs=cfg)
# Set camera to appropriate viewing pose
cam = og.sim.viewer_camera
cam.set_position_orientation(
position=np.array([-4.62785 , -0.418575, 0.933943]),
orientation=np.array([ 0.52196595, -0.4231939 , -0.46640436, 0.5752612 ]),
)
def steps(n):
for _ in range(n):
env.step(np.array([]))
# Take a few steps to let objects settle
steps(25)
# Create renderer settings object.
renderer_setting = RendererSettings()
# RendererSettings is a singleton.
renderer_setting2 = RendererSettings()
assert renderer_setting == renderer_setting2
# Set current renderer.
input("Setting renderer to Real-Time. Press [ENTER] to continue.")
renderer_setting.set_current_renderer("Real-Time")
assert renderer_setting.get_current_renderer() == "Real-Time"
steps(5)
input("Setting renderer to Interactive (Path Tracing). Press [ENTER] to continue.")
renderer_setting.set_current_renderer("Interactive (Path Tracing)")
assert renderer_setting.get_current_renderer() == "Interactive (Path Tracing)"
steps(5)
# Get all available settings.
print(renderer_setting.settings.keys())
input("Showcasing how to use RendererSetting APIs. Please see example script for more information. "
"Press [ENTER] to continue.")
# Set setting (2 lines below are equivalent).
renderer_setting.set_setting(path="/app/renderer/skipMaterialLoading", value=True)
renderer_setting.common_settings.materials_settings.skip_material_loading.set(True)
# Get setting (3 lines below are equivalent).
assert renderer_setting.get_setting_from_path(path="/app/renderer/skipMaterialLoading") == True
assert renderer_setting.common_settings.materials_settings.skip_material_loading.value == True
assert renderer_setting.common_settings.materials_settings.skip_material_loading.get() == True
# Reset setting (2 lines below are equivalent).
renderer_setting.reset_setting(path="/app/renderer/skipMaterialLoading")
renderer_setting.common_settings.materials_settings.skip_material_loading.reset()
assert renderer_setting.get_setting_from_path(path="/app/renderer/skipMaterialLoading") == False
# Set setting to an unallowed value using top-level method.
# Examples below will use the "top-level" setting method.
try:
renderer_setting.set_setting(path="/app/renderer/skipMaterialLoading", value="foo")
except AssertionError as e:
print(e) # All good. We got an AssertionError.
# Set setting to a value out-of-range.
try:
renderer_setting.set_setting(path="/rtx/fog/fogColorIntensity", value=0.0)
except AssertionError as e:
print(e) # All good. We got an AssertionError.
# Set unallowed setting.
try:
renderer_setting.set_setting(path="foo", value="bar")
except NotImplementedError as e:
print(e) # All good. We got a NotImplementedError.
# Set setting but the setting group is not enabled.
# Setting is successful but there will be a warning message printed.
renderer_setting.set_setting(path="/rtx/fog/fogColorIntensity", value=1.0)
# Shutdown sim
input("Completed demo. Press [ENTER] to shutdown simulation.")
og.shutdown()
if __name__ == "__main__":
main()
| 4,453 | Python | 34.919355 | 104 | 0.662924 |
StanfordVL/OmniGibson/omnigibson/examples/robots/grasping_mode_example.py | """
Example script demo'ing robot manipulation control with grasping.
"""
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.sensors import VisionSensor
from omnigibson.utils.ui_utils import choose_from_options, KeyboardRobotController
GRASPING_MODES = dict(
sticky="Sticky Mitten - Objects are magnetized when they touch the fingers and a CLOSE command is given",
assisted="Assisted Grasping - Objects are magnetized when they touch the fingers, are within the hand, and a CLOSE command is given",
physical="Physical Grasping - No additional grasping assistance applied",
)
# Don't use GPU dynamics and Use flatcache for performance boost
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_FLATCACHE = True
def main(random_selection=False, headless=False, short_exec=False):
"""
Robot grasping mode demo with selection
Queries the user to select a type of grasping mode
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose type of grasping
grasping_mode = choose_from_options(options=GRASPING_MODES, name="grasping mode", random_selection=random_selection)
# Create environment configuration to use
scene_cfg = dict(type="Scene")
robot0_cfg = dict(
type="Fetch",
obs_modalities=["rgb"], # we're just doing a grasping demo so we don't need all observation modalities
action_type="continuous",
action_normalize=True,
grasping_mode=grasping_mode,
)
# Define objects to load
table_cfg = dict(
type="DatasetObject",
name="table",
category="breakfast_table",
model="lcsizg",
bounding_box=[0.5, 0.5, 0.8],
fixed_base=True,
position=[0.7, -0.1, 0.6],
orientation=[0, 0, 0.707, 0.707],
)
chair_cfg = dict(
type="DatasetObject",
name="chair",
category="straight_chair",
model="amgwaw",
bounding_box=None,
fixed_base=False,
position=[0.45, 0.65, 0.425],
orientation=[0, 0, -0.9990215, -0.0442276],
)
box_cfg = dict(
type="PrimitiveObject",
name="box",
primitive_type="Cube",
rgba=[1.0, 0, 0, 1.0],
size=0.05,
position=[0.53, -0.1, 0.97],
)
# Compile config
cfg = dict(scene=scene_cfg, robots=[robot0_cfg], objects=[table_cfg, chair_cfg, box_cfg])
# Create the environment
env = og.Environment(configs=cfg)
# Reset the robot
robot = env.robots[0]
robot.set_position([0, 0, 0])
robot.reset()
robot.keep_still()
# Make the robot's camera(s) high-res
for sensor in robot.sensors.values():
if isinstance(sensor, VisionSensor):
sensor.image_height = 720
sensor.image_width = 720
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([-2.39951, 2.26469, 2.66227]),
orientation=np.array([-0.23898481, 0.48475231, 0.75464013, -0.37204802]),
)
# Create teleop controller
action_generator = KeyboardRobotController(robot=robot)
# Print out relevant keyboard info if using keyboard teleop
action_generator.print_keyboard_teleop_info()
# Other helpful user info
print("Running demo with grasping mode {}.".format(grasping_mode))
print("Press ESC to quit")
# Loop control until user quits
max_steps = -1 if not short_exec else 100
step = 0
while step != max_steps:
action = action_generator.get_random_action() if random_selection else action_generator.get_teleop_action()
for _ in range(10):
env.step(action)
step += 1
# Always shut down the environment cleanly at the end
env.close()
if __name__ == "__main__":
main()
| 3,902 | Python | 30.731707 | 137 | 0.639672 |
StanfordVL/OmniGibson/omnigibson/examples/robots/all_robots_visualizer.py | import numpy as np
import omnigibson as og
from omnigibson.robots import REGISTERED_ROBOTS
def main(random_selection=False, headless=False, short_exec=False):
"""
Robot demo
Loads all robots in an empty scene, generate random actions
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Create empty scene with no robots in it initially
cfg = {
"scene": {
"type": "Scene",
}
}
env = og.Environment(configs=cfg)
# Iterate over all robots and demo their motion
for robot_name, robot_cls in REGISTERED_ROBOTS.items():
# Create and import robot
robot = robot_cls(
prim_path=f"/World/{robot_name}",
name=robot_name,
obs_modalities=[], # We're just moving robots around so don't load any observation modalities
)
og.sim.import_object(robot)
# At least one step is always needed while sim is playing for any imported object to be fully initialized
og.sim.play()
og.sim.step()
# Reset robot and make sure it's not moving
robot.reset()
robot.keep_still()
# Log information
og.log.info(f"Loaded {robot_name}")
og.log.info(f"Moving {robot_name}")
if not headless:
# Set viewer in front facing robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([ 2.69918369, -3.63686664, 4.57894564]),
orientation=np.array([0.39592411, 0.1348514 , 0.29286304, 0.85982 ]),
)
og.sim.enable_viewer_camera_teleoperation()
# Hold still briefly so viewer can see robot
for _ in range(100):
og.sim.step()
# Then apply random actions for a bit
for _ in range(30):
action = np.random.uniform(-1, 1, robot.action_dim)
if robot_name == "Tiago":
action[robot.base_action_idx] = np.random.uniform(-0.1, 0.1, len(robot.base_action_idx))
for _ in range(10):
env.step(action)
# Stop the simulator and remove the robot
og.sim.stop()
og.sim.remove_object(obj=robot)
# Always shut down the environment cleanly at the end
env.close()
if __name__ == "__main__":
main()
| 2,369 | Python | 30.6 | 118 | 0.57577 |
StanfordVL/OmniGibson/omnigibson/examples/robots/robot_control_example.py | """
Example script demo'ing robot control.
Options for random actions, as well as selection of robot action space
"""
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
from omnigibson.robots import REGISTERED_ROBOTS
from omnigibson.utils.ui_utils import choose_from_options, KeyboardRobotController
CONTROL_MODES = dict(
random="Use autonomous random actions (default)",
teleop="Use keyboard control",
)
SCENES = dict(
Rs_int="Realistic interactive home environment (default)",
empty="Empty environment with no objects",
)
# Don't use GPU dynamics and use flatcache for performance boost
gm.USE_GPU_DYNAMICS = False
gm.ENABLE_FLATCACHE = True
def choose_controllers(robot, random_selection=False):
"""
For a given robot, iterates over all components of the robot, and returns the requested controller type for each
component.
:param robot: BaseRobot, robot class from which to infer relevant valid controller options
:param random_selection: bool, if the selection is random (for automatic demo execution). Default False
:return dict: Mapping from individual robot component (e.g.: base, arm, etc.) to selected controller names
"""
# Create new dict to store responses from user
controller_choices = dict()
# Grab the default controller config so we have the registry of all possible controller options
default_config = robot._default_controller_config
# Iterate over all components in robot
for component, controller_options in default_config.items():
# Select controller
options = list(sorted(controller_options.keys()))
choice = choose_from_options(
options=options, name="{} controller".format(component), random_selection=random_selection
)
# Add to user responses
controller_choices[component] = choice
return controller_choices
def main(random_selection=False, headless=False, short_exec=False):
"""
Robot control demo with selection
Queries the user to select a robot, the controllers, a scene and a type of input (random actions or teleop)
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Choose scene to load
scene_model = choose_from_options(options=SCENES, name="scene", random_selection=random_selection)
# Choose robot to create
robot_name = choose_from_options(
options=list(sorted(REGISTERED_ROBOTS.keys())), name="robot", random_selection=random_selection
)
scene_cfg = dict()
if scene_model == "empty":
scene_cfg["type"] = "Scene"
else:
scene_cfg["type"] = "InteractiveTraversableScene"
scene_cfg["scene_model"] = scene_model
# Add the robot we want to load
robot0_cfg = dict()
robot0_cfg["type"] = robot_name
robot0_cfg["obs_modalities"] = ["rgb", "depth", "seg_instance", "normal", "scan", "occupancy_grid"]
robot0_cfg["action_type"] = "continuous"
robot0_cfg["action_normalize"] = True
# Compile config
cfg = dict(scene=scene_cfg, robots=[robot0_cfg])
# Create the environment
env = og.Environment(configs=cfg)
# Choose robot controller to use
robot = env.robots[0]
controller_choices = choose_controllers(robot=robot, random_selection=random_selection)
# Choose control mode
if random_selection:
control_mode = "random"
else:
control_mode = choose_from_options(options=CONTROL_MODES, name="control mode")
# Update the control mode of the robot
controller_config = {component: {"name": name} for component, name in controller_choices.items()}
robot.reload_controllers(controller_config=controller_config)
# Because the controllers have been updated, we need to update the initial state so the correct controller state
# is preserved
env.scene.update_initial_state()
# Update the simulator's viewer camera's pose so it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([1.46949, -3.97358, 2.21529]),
orientation=np.array([0.56829048, 0.09569975, 0.13571846, 0.80589577]),
)
# Reset environment and robot
env.reset()
robot.reset()
# Create teleop controller
action_generator = KeyboardRobotController(robot=robot)
# Register custom binding to reset the environment
action_generator.register_custom_keymapping(
key=lazy.carb.input.KeyboardInput.R,
description="Reset the robot",
callback_fn=lambda: env.reset(),
)
# Print out relevant keyboard info if using keyboard teleop
if control_mode == "teleop":
action_generator.print_keyboard_teleop_info()
# Other helpful user info
print("Running demo.")
print("Press ESC to quit")
# Loop control until user quits
max_steps = -1 if not short_exec else 100
step = 0
while step != max_steps:
action = action_generator.get_random_action() if control_mode == "random" else action_generator.get_teleop_action()
env.step(action=action)
step += 1
# Always shut down the environment cleanly at the end
env.close()
if __name__ == "__main__":
main()
| 5,275 | Python | 33.038709 | 123 | 0.68872 |
StanfordVL/OmniGibson/omnigibson/examples/robots/advanced/ik_example.py | import argparse
import time
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.objects import PrimitiveObject
from omnigibson.robots import Fetch
from omnigibson.scenes import Scene
from omnigibson.utils.control_utils import IKSolver
def main(random_selection=False, headless=False, short_exec=False):
"""
Minimal example of usage of inverse kinematics solver
This example showcases how to construct your own IK functionality using omniverse's native lula library
without explicitly utilizing all of OmniGibson's class abstractions, and also showcases how to manipulate
the simulator at a lower-level than the main Environment entry point.
"""
og.log.info(f"Demo {__file__}\n " + "*" * 80 + "\n Description:\n" + main.__doc__ + "*" * 80)
# Assuming that if random_selection=True, headless=True, short_exec=True, we are calling it from tests and we
# do not want to parse args (it would fail because the calling function is pytest "testfile.py")
if not (random_selection and headless and short_exec):
parser = argparse.ArgumentParser()
parser.add_argument(
"--programmatic",
"-p",
dest="programmatic_pos",
action="store_true",
help="if the IK solvers should be used with the GUI or programmatically",
)
args = parser.parse_args()
programmatic_pos = args.programmatic_pos
else:
programmatic_pos = True
# Import scene and robot (Fetch)
scene_cfg = {"type": "Scene"}
# Create Fetch robot
# Note that since we only care about IK functionality, we fix the base (this also makes the robot more stable)
# (any object can also have its fixed_base attribute set to True!)
# Note that since we're going to be setting joint position targets, we also need to make sure the robot's arm joints
# (which includes the trunk) are being controlled using joint positions
robot_cfg = {
"type": "Fetch",
"fixed_base": True,
"controller_config": {
"arm_0": {
"name": "NullJointController",
"motor_type": "position",
}
}
}
cfg = dict(scene=scene_cfg, robots=[robot_cfg])
env = og.Environment(configs=cfg)
# Update the viewer camera's pose so that it points towards the robot
og.sim.viewer_camera.set_position_orientation(
position=np.array([4.32248, -5.74338, 6.85436]),
orientation=np.array([0.39592, 0.13485, 0.29286, 0.85982]),
)
robot = env.robots[0]
# Set robot base at the origin
robot.set_position_orientation(np.array([0, 0, 0]), np.array([0, 0, 0, 1]))
# At least one simulation step while the simulator is playing must occur for the robot (or in general, any object)
# to be fully initialized after it is imported into the simulator
og.sim.play()
og.sim.step()
# Make sure none of the joints are moving
robot.keep_still()
# Since this demo aims to showcase how users can directly control the robot with IK,
# we will need to disable the built-in controllers in OmniGibson
robot.control_enabled = False
# Create the IK solver -- note that we are controlling both the trunk and the arm since both are part of the
# controllable kinematic chain for the end-effector!
control_idx = np.concatenate([robot.trunk_control_idx, robot.arm_control_idx[robot.default_arm]])
ik_solver = IKSolver(
robot_description_path=robot.robot_arm_descriptor_yamls[robot.default_arm],
robot_urdf_path=robot.urdf_path,
reset_joint_pos=robot.get_joint_positions()[control_idx],
eef_name=robot.eef_link_names[robot.default_arm],
)
# Define a helper function for executing specific end-effector commands using the ik solver
def execute_ik(pos, quat=None, max_iter=100):
og.log.info("Querying joint configuration to current marker position")
# Grab the joint positions in order to reach the desired pose target
joint_pos = ik_solver.solve(
target_pos=pos,
target_quat=quat,
tolerance_pos=0.002,
tolerance_quat=0.01,
weight_pos=20.0,
weight_quat=0.05,
max_iterations=max_iter,
initial_joint_pos=robot.get_joint_positions()[control_idx],
)
if joint_pos is not None:
og.log.info("Solution found. Setting new arm configuration.")
robot.set_joint_positions(joint_pos, indices=control_idx, drive=True)
else:
og.log.info("EE position not reachable.")
og.sim.step()
if programmatic_pos or headless:
# Sanity check IK using pre-defined hardcoded positions
query_positions = [[1, 0, 0.8], [1, 1, 1], [0.5, 0.5, 0], [0.5, 0.5, 0.5]]
for query_pos in query_positions:
execute_ik(query_pos)
time.sleep(2)
else:
# Create a visual marker to be moved by the user, representing desired end-effector position
marker = PrimitiveObject(
prim_path=f"/World/marker",
name="marker",
primitive_type="Sphere",
radius=0.03,
visual_only=True,
rgba=[1.0, 0, 0, 1.0],
)
og.sim.import_object(marker)
# Get initial EE position and set marker to that location
command = robot.get_eef_position()
marker.set_position(command)
og.sim.step()
# Setup callbacks for grabbing keyboard inputs from omni
exit_now = False
def keyboard_event_handler(event, *args, **kwargs):
nonlocal command, exit_now
# Check if we've received a key press or repeat
if event.type == lazy.carb.input.KeyboardEventType.KEY_PRESS \
or event.type == lazy.carb.input.KeyboardEventType.KEY_REPEAT:
if event.input == lazy.carb.input.KeyboardInput.ENTER:
# Execute the command
execute_ik(pos=command)
elif event.input == lazy.carb.input.KeyboardInput.ESCAPE:
# Quit
og.log.info("Quit.")
exit_now = True
else:
# We see if we received a valid delta command, and if so, we update our command and visualized
# marker position
delta_cmd = input_to_xyz_delta_command(inp=event.input)
if delta_cmd is not None:
command = command + delta_cmd
marker.set_position(command)
og.sim.step()
# Callback must return True if valid
return True
# Hook up the callback function with omni's user interface
appwindow = lazy.omni.appwindow.get_default_app_window()
input_interface = lazy.carb.input.acquire_input_interface()
keyboard = appwindow.get_keyboard()
sub_keyboard = input_interface.subscribe_to_keyboard_events(keyboard, keyboard_event_handler)
# Print out helpful information to the user
print_message()
# Loop until the user requests an exit
while not exit_now:
og.sim.step()
# Always shut the simulation down cleanly at the end
og.app.close()
def input_to_xyz_delta_command(inp, delta=0.01):
mapping = {
lazy.carb.input.KeyboardInput.W: np.array([delta, 0, 0]),
lazy.carb.input.KeyboardInput.S: np.array([-delta, 0, 0]),
lazy.carb.input.KeyboardInput.DOWN: np.array([0, 0, -delta]),
lazy.carb.input.KeyboardInput.UP: np.array([0, 0, delta]),
lazy.carb.input.KeyboardInput.A: np.array([0, delta, 0]),
lazy.carb.input.KeyboardInput.D: np.array([0, -delta, 0]),
}
return mapping.get(inp)
def print_message():
print("*" * 80)
print("Move the marker to a desired position to query IK and press ENTER")
print("W/S: move marker further away or closer to the robot")
print("A/D: move marker to the left or the right of the robot")
print("UP/DOWN: move marker up and down")
print("ESC: quit")
if __name__ == "__main__":
main()
| 8,259 | Python | 39.891089 | 120 | 0.623683 |
StanfordVL/OmniGibson/omnigibson/sensors/sensor_noise_base.py | from abc import ABCMeta, abstractmethod
from omnigibson.utils.python_utils import classproperty, Registerable
# Registered sensor noises
REGISTERED_SENSOR_NOISES = dict()
class BaseSensorNoise(Registerable, metaclass=ABCMeta):
"""
Base SensorNoise class.
Sensor noise-specific add_noise method is implemented in subclasses
Args:
enabled (bool): Whether this sensor should be enabled by default
"""
def __init__(self, enabled=True):
# Store whether this noise model is enabled or not
self._enabled = enabled
def __call__(self, obs):
"""
If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading. This is an
identical call to self.corrupt(...)
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
return self.corrupt(obs=obs)
def corrupt(self, obs):
"""
If this noise is enabled, corrupts observation @obs by adding sensor noise to sensor reading.
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array if self.enabled is True, otherwise this is a pass-through
"""
# Run sanity check to make sure obs is in acceptable range
assert len(obs[(obs < 0.0) | (obs > 1.0)]) == 0, "sensor reading has to be between [0.0, 1.0]"
return self._corrupt(obs=obs) if self._enabled else obs
@abstractmethod
def _corrupt(self, obs):
"""
Corrupts observation @obs by adding sensor noise to sensor reading
Args:
obs (np.array): observation numpy array of values of arbitrary dimension normalized to range [0.0, 1.0]
Returns:
np.array: Corrupted observation numpy array
"""
raise NotImplementedError()
@property
def enabled(self):
"""
Returns:
bool: Whether this noise model is enabled or not
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
En/disables this noise model
Args:
enabled (bool): Whether this noise model should be enabled or not
"""
self._enabled = enabled
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseSensorNoise")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_SENSOR_NOISES
return REGISTERED_SENSOR_NOISES
| 2,917 | Python | 30.376344 | 115 | 0.633528 |
StanfordVL/OmniGibson/omnigibson/sensors/dropout_sensor_noise.py | import numpy as np
from omnigibson.sensors.sensor_noise_base import BaseSensorNoise
class DropoutSensorNoise(BaseSensorNoise):
"""
Naive dropout sensor noise model
Args:
dropout_prob (float): Value in [0.0, 1.0] representing fraction of a single observation to be replaced
with @dropout_value
dropout_value (float): Value in [0.0, 1.0] to replace observations selected to be dropped out
enabled (bool): Whether this sensor should be enabled by default
"""
def __init__(
self,
dropout_prob=0.05,
dropout_value=1.0,
enabled=True,
):
# Store args, and make sure values are in acceptable range
for name, val in zip(("dropout_prob", "dropout_value"), (dropout_prob, dropout_value)):
assert 0.0 <= val <= 1.0, f"{name} should be in range [0.0, 1.0], got: {val}"
self._dropout_prob = dropout_prob
self._dropout_value = dropout_value
# Run super method
super().__init__(enabled=enabled)
def _corrupt(self, obs):
# If our noise rate is 0, we just return the obs
if self._dropout_prob == 0.0:
return obs
# Corrupt with randomized dropout
valid_mask = np.random.choice(2, obs.shape, p=[self._dropout_prob, 1.0 - self._dropout_prob])
obs[valid_mask == 0] = self._dropout_value
return obs
@property
def dropout_prob(self):
"""
Returns:
float: Value in [0.0, 1.0] representing fraction of a single observation to be replaced
with self.dropout_value
"""
return self._dropout_prob
@dropout_prob.setter
def dropout_prob(self, p):
"""
Set the dropout probability for this noise model.
Args:
p (float): Value in [0.0, 1.0] representing fraction of a single observation to be replaced
with self.dropout_value
"""
assert 0.0 <= p <= 1.0, f"dropout_prob should be in range [0.0, 1.0], got: {p}"
self._dropout_prob = p
@property
def dropout_value(self):
"""
Returns:
float: Value in [0.0, 1.0] to replace observations selected to be dropped out
"""
return self._dropout_value
@dropout_value.setter
def dropout_value(self, val):
"""
Set the dropout value for this noise model.
Args:
val (float): Value in [0.0, 1.0] to replace observations selected to be dropped out
"""
assert 0.0 <= val <= 1.0, f"dropout_value should be in range [0.0, 1.0], got: {val}"
self._dropout_value = val
| 2,676 | Python | 32.049382 | 110 | 0.583707 |
StanfordVL/OmniGibson/omnigibson/sensors/__init__.py | from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.sensors.sensor_base import BaseSensor, REGISTERED_SENSORS, ALL_SENSOR_MODALITIES
from omnigibson.sensors.scan_sensor import ScanSensor
from omnigibson.sensors.vision_sensor import VisionSensor
from omnigibson.sensors.sensor_noise_base import BaseSensorNoise, REGISTERED_SENSOR_NOISES
from omnigibson.sensors.dropout_sensor_noise import DropoutSensorNoise
# Map sensor prim names to corresponding sensor classes
SENSOR_PRIMS_TO_SENSOR_CLS = {
"Lidar": ScanSensor,
"Camera": VisionSensor,
}
def create_sensor(
sensor_type,
prim_path,
name,
modalities="all",
enabled=True,
sensor_kwargs=None,
noise_type=None,
noise_kwargs=None
):
"""
Create a sensor of type @sensor_type with optional keyword args @sensor_kwargs that should be passed to the
constructor. Also, additionally send noise of type @noise_type with corresponding keyword args @noise_kwargs
that should be passed to the noise constructor.
Args:
sensor_type (str): Type of sensor to create. Should be either one of SENSOR_PRIM_TO_SENSOR.keys() or
one of REGISTERED_SENSORS (i.e.: the string name of the desired class to create)
prim_path (str): prim path of the Sensor to encapsulate or create.
name (str): Name for the sensor. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Valid options are part of
sensor.all_modalities. Default is "all", which corresponds to all modalities being used
enabled (bool): Whether this sensor should be enabled or not
sensor_kwargs (dict): Any keyword kwargs to pass to the constructor
noise_type (str): Type of sensor to create. Should be one of REGISTERED_SENSOR_NOISES
(i.e.: the string name of the desired class to create)
noise_kwargs (dict): Any keyword kwargs to pass to the constructor
Returns:
BaseSensor: Created sensor with specified params
"""
# Run basic sanity check
assert isinstance(sensor_type, str), "Inputted sensor_type must be a string!"
# Grab the requested sensor class
if sensor_type in SENSOR_PRIMS_TO_SENSOR_CLS:
sensor_cls = SENSOR_PRIMS_TO_SENSOR_CLS[sensor_type]
elif sensor_type in REGISTERED_SENSORS:
sensor_cls = REGISTERED_SENSORS[sensor_type]
else:
# This is an error, we didn't find the requested sensor ):
raise ValueError(f"No sensor found with corresponding sensor_type: {sensor_type}")
# Create the noise, and sanity check to make sure it's a valid type
noise = None
if noise_type is not None:
assert_valid_key(key=noise_type, valid_keys=REGISTERED_SENSOR_NOISES, name="sensor noise type")
noise_kwargs = dict() if noise_kwargs is None else noise_kwargs
noise = REGISTERED_SENSOR_NOISES[noise_type](**noise_kwargs)
# Create the sensor
sensor_kwargs = dict() if sensor_kwargs is None else sensor_kwargs
sensor = sensor_cls(
prim_path=prim_path,
name=name,
modalities=modalities,
enabled=enabled,
noise=noise,
**sensor_kwargs,
)
return sensor
| 3,282 | Python | 40.556962 | 112 | 0.696222 |
StanfordVL/OmniGibson/omnigibson/sensors/sensor_base.py | from abc import ABCMeta
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.python_utils import classproperty, assert_valid_key, Registerable
from omnigibson.utils.gym_utils import GymObservable
from gym.spaces import Space
# Registered sensors
REGISTERED_SENSORS = dict()
# All possible modalities across all sensors
ALL_SENSOR_MODALITIES = set()
class BaseSensor(XFormPrim, GymObservable, Registerable, metaclass=ABCMeta):
"""
Base Sensor class.
Sensor-specific get_obs method is implemented in subclasses
Args:
prim_path (str): prim path of the Sensor to encapsulate or create.
name (str): Name for the sensor. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds
to all modalities being used. Otherwise, valid options should be part of cls.all_modalities.
enabled (bool): Whether this sensor should be enabled by default
noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this sensor's prim at runtime.
"""
def __init__(
self,
prim_path,
name,
modalities="all",
enabled=True,
noise=None,
load_config=None,
):
# Store inputs (and sanity check modalities along the way)
if modalities == "all":
modalities = self.all_modalities
else:
modalities = [modalities] if isinstance(modalities, str) else modalities
for modality in modalities:
assert_valid_key(key=modality, valid_keys=self.all_modalities, name="modality")
self._modalities = set(modalities)
self._enabled = enabled
self._noise = noise
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# Sub-sensors must implement this class directly! Cannot use parent XForm class by default
raise NotImplementedError("Sensor class must implement _load!")
def _post_load(self):
# Run super first
super()._post_load()
# Set the enabled property based on the internal value
# This is done so that any subclassed sensors which require simulator specific enabling can handle this now
self.enabled = self._enabled
def get_obs(self):
# Get sensor reading, and optionally corrupt the readings with noise using self.noise if
# self.noise.enabled is True.
# Note that the returned dictionary will only be filled in if this sensor is enabled!
if not self._enabled:
return dict()
obs, info = self._get_obs()
if self._noise is not None:
for k, v in obs.items():
if k not in self.no_noise_modalities:
obs[k] = self._noise(v)
return obs, info
def _get_obs(self):
"""
Get sensor reading. Should generally be extended by subclass.
Returns:
2-tuple:
dict: Keyword-mapped observations mapping modality names to numpy arrays of arbitrary dimension
dict: Additional information about the observations.
"""
# Default is returning an empty dict
return dict(), dict()
def _load_observation_space(self):
# Fill in observation space based on mapping and active modalities
obs_space = dict()
for modality, space in self._obs_space_mapping.items():
if modality in self._modalities:
if isinstance(space, Space):
# Directly add this space
obs_space[modality] = space
else:
# Assume we are procedurally generating a box space
shape, low, high, dtype = space
obs_space[modality] = self._build_obs_box_space(shape=shape, low=low, high=high, dtype=dtype)
return obs_space
def add_modality(self, modality):
"""
Add a modality to this sensor. Must be a valid modality (one of self.all_modalities)
Args:
modality (str): Name of the modality to add to this sensor
"""
assert_valid_key(key=modality, valid_keys=self.all_modalities, name="modality")
if modality not in self._modalities:
self._modalities.add(modality)
# Update observation space
self.load_observation_space()
def remove_modality(self, modality):
"""
Remove a modality from this sensor. Must be a valid modality that is active (one of self.modalities)
Args:
modality (str): Name of the modality to remove from this sensor
"""
assert_valid_key(key=modality, valid_keys=self._modalities, name="modality")
if modality in self._modalities:
self._modalities.remove(modality)
# Update observation space
self.load_observation_space()
@property
def modalities(self):
"""
Returns:
set: Name of modalities provided by this sensor. This should correspond to all the keys provided
in self.get_obs()
"""
return self._modalities
@property
def _obs_space_mapping(self):
"""
Returns:
dict: Keyword-mapped observation space settings for each modality. For each modality in
cls.all_modalities, its name should map directly to the corresponding gym space Space for that modality
or a 4-tuple entry (shape, low, high, dtype) for procedurally generating the appropriate Box Space
for that modality
"""
raise NotImplementedError()
@classproperty
def all_modalities(cls):
"""
Returns:
set: All possible valid modalities for this sensor. Should be implemented by subclass.
"""
raise NotImplementedError()
@property
def noise(self):
"""
Returns:
None or BaseSensorNoise: Noise model to use for this sensor
"""
return self._noise
@classproperty
def no_noise_modalities(cls):
"""
Returns:
set: Modalities that should NOT be passed through noise, irregardless of whether noise is enabled or not.
This is useful for some modalities which are not exclusively numerical arrays.
"""
raise NotImplementedError()
@property
def enabled(self):
"""
Returns:
bool: Whether this sensor is enabled or not
"""
# By default, we simply return the internal value. Subclasses may need to extend this functionality,
# e.g. by disabling actual sim functionality for better computational efficiency
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Args:
enabled (bool): Whether this sensor should be enabled or not
"""
# By default, we simply store the value internally. Subclasses may need to extend this functionality,
# e.g. by disabling actual sim functionality for better computational efficiency
self._enabled = enabled
@classproperty
def sensor_type(cls):
"""
Returns:
str: Type of this sensor. By default, this is the sensor class name
"""
return cls.__name__
@classmethod
def _register_cls(cls):
global ALL_SENSOR_MODALITIES
# Run super first
super()._register_cls()
# Also store modalities from this sensor class if we're registering it
if cls.__name__ not in cls._do_not_register_classes:
ALL_SENSOR_MODALITIES.union(cls.all_modalities)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseSensor")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_SENSORS
return REGISTERED_SENSORS
| 8,385 | Python | 34.837607 | 119 | 0.620155 |
StanfordVL/OmniGibson/omnigibson/sensors/scan_sensor.py | import cv2
import numpy as np
from collections.abc import Iterable
from transforms3d.quaternions import quat2mat
import omnigibson.lazy as lazy
from omnigibson.sensors.sensor_base import BaseSensor
from omnigibson.utils.constants import OccupancyGridState
from omnigibson.utils.python_utils import classproperty
class ScanSensor(BaseSensor):
"""
General 2D LiDAR range sensor and occupancy grid sensor.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds
to all modalities being used. Otherwise, valid options should be part of cls.all_modalities.
For this scan sensor, this includes any of:
{scan, occupancy_grid}
Note that in order for "occupancy_grid" to be used, "scan" must also be included.
enabled (bool): Whether this sensor should be enabled by default
noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this sensor's prim at runtime.
min_range (float): Minimum range to sense in meters
max_range (float): Maximum range to sense in meters
horizontal_fov (float): Field of view of sensor, in degrees
vertical_fov (float): Field of view of sensor, in degrees
yaw_offset (float): Degrees for offsetting this sensors horizontal FOV.
Useful in cases where this sensor's forward direction is different than expected
horizontal_resolution (float): Degrees in between each horizontal scan hit
vertical_resolution (float): Degrees in between each vertical scan hit
rotation_rate (float): How fast the range sensor is rotating, in rotations per sec. Set to 0 for all scans
be to hit at once
draw_points (bool): Whether to draw the points hit by this sensor
draw_lines (bool): Whether to draw the lines representing the scans from this sensor
occupancy_grid_resolution (int): How many discretized nodes in the occupancy grid. This will specify the
height == width of the map
occupancy_grid_range (float): Range of the occupancy grid, in meters
occupancy_grid_inner_radius (float): Inner range of the occupancy grid that will assumed to be empty, in meters
occupancy_grid_local_link (None or XFormPrim): XForm prim that represents the "origin" of any generated
occupancy grid, e.g.: if this scan sensor is attached to a robot, then this should possibly be the base link
for that robot. If None is specified, then this will default to this own sensor's frame as the origin.
"""
def __init__(
self,
prim_path,
name,
modalities="all",
enabled=True,
noise=None,
load_config=None,
# Basic LIDAR kwargs
min_range=0.05,
max_range=10.0,
horizontal_fov=360.0,
vertical_fov=1.0,
yaw_offset=0.0,
horizontal_resolution=1.0,
vertical_resolution=1.0,
rotation_rate=0.0,
draw_points=False,
draw_lines=False,
# Occupancy Grid kwargs
occupancy_grid_resolution=128,
occupancy_grid_range=5.0,
occupancy_grid_inner_radius=0.5,
occupancy_grid_local_link=None,
):
# Store settings
self.occupancy_grid_resolution = occupancy_grid_resolution
self.occupancy_grid_range = occupancy_grid_range
self.occupancy_grid_inner_radius = int(occupancy_grid_inner_radius * occupancy_grid_resolution
/ occupancy_grid_range)
self.occupancy_grid_local_link = self if occupancy_grid_local_link is None else occupancy_grid_local_link
# Create variables that will be filled in at runtime
self._rs = None # Range sensor interface, analagous to others, e.g.: dynamic control interface
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["min_range"] = min_range
load_config["max_range"] = max_range
load_config["horizontal_fov"] = horizontal_fov
load_config["vertical_fov"] = vertical_fov
load_config["yaw_offset"] = yaw_offset
load_config["horizontal_resolution"] = horizontal_resolution
load_config["vertical_resolution"] = vertical_resolution
load_config["rotation_rate"] = rotation_rate
load_config["draw_points"] = draw_points
load_config["draw_lines"] = draw_lines
# Sanity check modalities -- if we're using occupancy_grid without scan modality, raise an error
if isinstance(modalities, Iterable) and not isinstance(modalities, str) and "occupancy_grid" in modalities:
assert "scan" in modalities, f"'scan' modality must be included in order to get occupancy_grid modality!"
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
modalities=modalities,
enabled=enabled,
noise=noise,
load_config=load_config,
)
def _load(self):
# Define a LIDAR prim at the current stage
result, lidar = lazy.omni.kit.commands.execute("RangeSensorCreateLidar", path=self._prim_path)
return lidar.GetPrim()
def _post_load(self):
# run super first
super()._post_load()
# Set all the lidar kwargs
self.min_range = self._load_config["min_range"]
self.max_range = self._load_config["max_range"]
self.horizontal_fov = self._load_config["horizontal_fov"]
self.vertical_fov = self._load_config["vertical_fov"]
self.yaw_offset = self._load_config["yaw_offset"]
self.horizontal_resolution = self._load_config["horizontal_resolution"]
self.vertical_resolution = self._load_config["vertical_resolution"]
self.rotation_rate = self._load_config["rotation_rate"]
self.draw_points = self._load_config["draw_points"]
self.draw_lines = self._load_config["draw_lines"]
def _initialize(self):
# run super first
super()._initialize()
# Initialize lidar sensor interface
self._rs = lazy.omni.isaac.range_sensor._range_sensor.acquire_lidar_sensor_interface()
@property
def _obs_space_mapping(self):
# Set the remaining modalities' values
# (obs modality, shape, low, high)
obs_space_mapping = dict(
scan=((self.n_horizontal_rays, self.n_vertical_rays), 0.0, 1.0, np.float32),
occupancy_grid=((self.occupancy_grid_resolution, self.occupancy_grid_resolution, 1), 0.0, 1.0, np.float32),
)
return obs_space_mapping
def get_local_occupancy_grid(self, scan):
"""
Get local occupancy grid based on current 1D scan
Args:
n-array: 1D LiDAR scan
Returns:
2D-array: (occupancy_grid_resolution, occupancy_grid_resolution)-sized numpy array of the local occupancy grid
"""
# Run sanity checks first
assert "occupancy_grid" in self._modalities, "Occupancy grid is not enabled for this range sensor!"
assert self.n_vertical_rays == 1, "Occupancy grid is only valid for a 1D range sensor (n_vertical_rays = 1)!"
# Grab vector of corresponding angles for each scan line
angles = np.arange(
-np.radians(self.horizontal_fov / 2),
np.radians(self.horizontal_fov / 2),
np.radians(self.horizontal_resolution),
)
# Convert into 3D unit vectors for each angle
unit_vector_laser = np.array([[np.cos(ang), np.sin(ang), 0.0] for ang in angles])
# Scale unit vectors by corresponding laser scan distnaces
assert ((scan >= 0.0) & (scan <= 1.0)).all(), "scan out of valid range [0, 1]"
scan_laser = unit_vector_laser * (scan * (self.max_range - self.min_range) + self.min_range)
# Convert scans from laser frame to world frame
pos, ori = self.get_position_orientation()
scan_world = quat2mat(ori).dot(scan_laser.T).T + pos
# Convert scans from world frame to local base frame
base_pos, base_ori = self.occupancy_grid_local_link.get_position_orientation()
scan_local = quat2mat(base_ori).T.dot((scan_world - base_pos).T).T
scan_local = scan_local[:, :2]
scan_local = np.concatenate([np.array([[0, 0]]), scan_local, np.array([[0, 0]])], axis=0)
# flip y axis
scan_local[:, 1] *= -1
# Initialize occupancy grid -- default is unknown values
occupancy_grid = np.zeros((self.occupancy_grid_resolution, self.occupancy_grid_resolution)).astype(np.uint8)
occupancy_grid.fill(int(OccupancyGridState.UNKNOWN * 2.0))
# Convert local scans into the corresponding OG square it should belong to (note now all values are > 0, since
# OG ranges from [0, resolution] x [0, resolution])
scan_local_in_map = scan_local / self.occupancy_grid_range * self.occupancy_grid_resolution + \
(self.occupancy_grid_resolution / 2)
scan_local_in_map = scan_local_in_map.reshape((1, -1, 1, 2)).astype(np.int32)
# For each scan hit,
for i in range(scan_local_in_map.shape[1]):
cv2.circle(
img=occupancy_grid,
center=(scan_local_in_map[0, i, 0, 0], scan_local_in_map[0, i, 0, 1]),
radius=2,
color=int(OccupancyGridState.OBSTACLES * 2.0),
thickness=-1,
)
cv2.fillPoly(
img=occupancy_grid, pts=scan_local_in_map, color=int(OccupancyGridState.FREESPACE * 2.0), lineType=1
)
cv2.circle(
img=occupancy_grid,
center=(self.occupancy_grid_resolution // 2, self.occupancy_grid_resolution // 2),
radius=self.occupancy_grid_inner_radius,
color=int(OccupancyGridState.FREESPACE * 2.0),
thickness=-1,
)
return occupancy_grid[:, :, None].astype(np.float32) / 2.0
def _get_obs(self):
# Run super first to grab any upstream obs
obs, info = super()._get_obs()
# Add scan info (normalized to [0.0, 1.0])
if "scan" in self._modalities:
raw_scan = self._rs.get_linear_depth_data(self._prim_path)
# Sometimes get_linear_depth_data will return values that are slightly out of range, needs clipping
raw_scan = np.clip(raw_scan, self.min_range, self.max_range)
obs["scan"] = (raw_scan - self.min_range) / (self.max_range - self.min_range)
# Optionally add occupancy grid info
if "occupancy_grid" in self._modalities:
obs["occupancy_grid"] = self.get_local_occupancy_grid(scan=obs["scan"])
return obs, info
@property
def n_horizontal_rays(self):
"""
Returns:
int: Number of horizontal rays for this range sensor
"""
return int(self.horizontal_fov // self.horizontal_resolution)
@property
def n_vertical_rays(self):
"""
Returns:
int: Number of vertical rays for this range sensor
"""
return int(self.vertical_fov // self.vertical_resolution)
@property
def min_range(self):
"""
Gets this range sensor's min_range (minimum distance in meters which will register a hit)
Returns:
float: minimum range for this range sensor, in meters
"""
return self.get_attribute("minRange")
@min_range.setter
def min_range(self, val):
"""
Sets this range sensor's min_range (minimum distance in meters which will register a hit)
Args:
val (float): minimum range for this range sensor, in meters
"""
self.set_attribute("minRange", val)
@property
def max_range(self):
"""
Gets this range sensor's max_range (maximum distance in meters which will register a hit)
Returns:
float: maximum range for this range sensor, in meters
"""
return self.get_attribute("maxRange")
@max_range.setter
def max_range(self, val):
"""
Sets this range sensor's max_range (maximum distance in meters which will register a hit)
Args:
val (float): maximum range for this range sensor, in meters
"""
self.set_attribute("maxRange", val)
@property
def draw_lines(self):
"""
Gets whether range lines are drawn for this sensor
Returns:
bool: Whether range lines are drawn for this sensor
"""
return self.get_attribute("drawLines")
@draw_lines.setter
def draw_lines(self, draw):
"""
Sets whether range lines are drawn for this sensor
Args:
draw (float): Whether range lines are drawn for this sensor
"""
self.set_attribute("drawLines", draw)
@property
def draw_points(self):
"""
Gets whether range points are drawn for this sensor
Returns:
bool: Whether range points are drawn for this sensor
"""
return self.get_attribute("drawPoints")
@draw_points.setter
def draw_points(self, draw):
"""
Sets whether range points are drawn for this sensor
Args:
draw (float): Whether range points are drawn for this sensor
"""
self.set_attribute("drawPoints", draw)
@property
def horizontal_fov(self):
"""
Gets this range sensor's horizontal_fov
Returns:
float: horizontal field of view for this range sensor
"""
return self.get_attribute("horizontalFov")
@horizontal_fov.setter
def horizontal_fov(self, fov):
"""
Sets this range sensor's horizontal_fov
Args:
fov (float): horizontal field of view to set
"""
self.set_attribute("horizontalFov", fov)
@property
def horizontal_resolution(self):
"""
Gets this range sensor's horizontal_resolution (degrees in between each horizontal hit)
Returns:
float: horizontal resolution for this range sensor, in degrees
"""
return self.get_attribute("horizontalResolution")
@horizontal_resolution.setter
def horizontal_resolution(self, resolution):
"""
Sets this range sensor's horizontal_resolution (degrees in between each horizontal hit)
Args:
resolution (float): horizontal resolution to set, in degrees
"""
self.set_attribute("horizontalResolution", resolution)
@property
def vertical_fov(self):
"""
Gets this range sensor's vertical_fov
Returns:
float: vertical field of view for this range sensor
"""
return self.get_attribute("verticalFov")
@vertical_fov.setter
def vertical_fov(self, fov):
"""
Sets this range sensor's vertical_fov
Args:
fov (float): vertical field of view to set
"""
self.set_attribute("verticalFov", fov)
@property
def vertical_resolution(self):
"""
Gets this range sensor's vertical_resolution (degrees in between each vertical hit)
Returns:
float: vertical resolution for this range sensor, in degrees
"""
return self.get_attribute("verticalResolution")
@vertical_resolution.setter
def vertical_resolution(self, resolution):
"""
Sets this range sensor's vertical_resolution (degrees in between each vertical hit)
Args:
resolution (float): vertical resolution to set, in degrees
"""
self.set_attribute("verticalResolution", resolution)
@property
def yaw_offset(self):
"""
Gets this range sensor's yaw_offset (used in cases where this sensor's forward direction is different than expected)
Returns:
float: yaw offset for this range sensor in degrees
"""
return self.get_attribute("yawOffset")
@yaw_offset.setter
def yaw_offset(self, offset):
"""
Sets this range sensor's yaw_offset (used in cases where this sensor's forward direction is different than expected)
Args:
offset (float): yaw offset to set in degrees.
"""
self.set_attribute("yawOffset", offset)
@property
def rotation_rate(self):
"""
Gets this range sensor's rotation_rate, in degrees per second. Note that a 0 value corresponds to no rotation,
and all range hits are assumed to be received at the exact same time.
Returns:
float: rotation rate for this range sensor in degrees per second
"""
return self.get_attribute("rotationRate")
@rotation_rate.setter
def rotation_rate(self, rate):
"""
Sets this range sensor's rotation_rate, in degrees per second. Note that a 0 value corresponds to no rotation,
and all range hits are assumed to be received at the exact same time.
Args:
rate (float): rotation rate for this range sensor in degrees per second
"""
self.set_attribute("rotationRate", rate)
@classproperty
def all_modalities(cls):
return {"scan", "occupancy_grid"}
@classproperty
def no_noise_modalities(cls):
# Occupancy grid should have no noise
return {"occupancy_grid"}
@property
def enabled(self):
# Just use super
return super().enabled
@enabled.setter
def enabled(self, enabled):
# We must use super and additionally directly en/disable the sensor in the simulation
# Note: weird syntax below required to "extend" super class's implementation, see:
# https://stackoverflow.com/a/37663266
super(ScanSensor, self.__class__).enabled.fset(self, enabled)
self.set_attribute("enabled", enabled)
| 18,414 | Python | 37.205394 | 124 | 0.624796 |
StanfordVL/OmniGibson/omnigibson/sensors/vision_sensor.py | import numpy as np
import time
import gym
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.sensors.sensor_base import BaseSensor
from omnigibson.systems.system_base import REGISTERED_SYSTEMS
from omnigibson.utils.constants import MAX_CLASS_COUNT, MAX_INSTANCE_COUNT, MAX_VIEWER_SIZE, semantic_class_name_to_id, semantic_class_id_to_name
from omnigibson.utils.python_utils import assert_valid_key, classproperty
from omnigibson.utils.sim_utils import set_carb_setting
from omnigibson.utils.ui_utils import dock_window
from omnigibson.utils.vision_utils import Remapper
# Duplicate of simulator's render method, used so that this can be done before simulator is created!
def render():
"""
Refreshes the Isaac Sim app rendering components including UI elements and view ports..etc.
"""
set_carb_setting(og.app._carb_settings, "/app/player/playSimulations", False)
og.app.update()
set_carb_setting(og.app._carb_settings, "/app/player/playSimulations", True)
class VisionSensor(BaseSensor):
"""
Vision sensor that handles a variety of modalities, including:
- RGB (normal)
- Depth (normal, linear)
- Normals
- Segmentation (semantic, instance)
- Optical flow
- 2D Bounding boxes (tight, loose)
- 3D Bounding boxes
- Camera state
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
modalities (str or list of str): Modality(s) supported by this sensor. Default is "all", which corresponds
to all modalities being used. Otherwise, valid options should be part of cls.all_modalities.
For this vision sensor, this includes any of:
{rgb, depth, depth_linear, normal, seg_semantic, seg_instance, flow, bbox_2d_tight,
bbox_2d_loose, bbox_3d, camera}
enabled (bool): Whether this sensor should be enabled by default
noise (None or BaseSensorNoise): If specified, sensor noise model to apply to this sensor.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this sensor's prim at runtime.
image_height (int): Height of generated images, in pixels
image_width (int): Width of generated images, in pixels
focal_length (float): Focal length to set
clipping_range (2-tuple): (min, max) viewing range of this vision sensor
viewport_name (None or str): If specified, will link this camera to the specified viewport, overriding its
current camera. Otherwise, creates a new viewport
"""
ALL_MODALITIES = (
"rgb",
"depth",
"depth_linear",
"normal",
"seg_semantic", # Semantic segmentation shows the category each pixel belongs to
"seg_instance", # Instance segmentation shows the name of the object each pixel belongs to
"seg_instance_id", # Instance ID segmentation shows the prim path of the visual mesh each pixel belongs to
"flow",
"bbox_2d_tight",
"bbox_2d_loose",
"bbox_3d",
"camera_params",
)
# Documentation for the different types of segmentation for particle systems:
# - Cloth (e.g. `dishtowel`):
# - semantic: all shows up under one semantic label (e.g. `"4207839377": "dishtowel"`)
# - instance: entire cloth shows up under one label (e.g. `"87": "dishtowel_0"`)
# - instance id: entire cloth shows up under one label (e.g. `"31": "/World/dishtowel_0/base_link_cloth"`)
# - MicroPhysicalParticleSystem - FluidSystem (e.g. `water`):
# - semantic: all shows up under one semantic label (e.g. `"3330677804": "water"`)
# - instance: all shows up under one instance label (e.g. `"21": "water"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "water"`)
# - MicroPhysicalParticleSystem - GranularSystem (e.g. `sesame seed`):
# - semantic: all shows up under one semantic label (e.g. `"2975304485": "sesame_seed"`)
# - instance: all shows up under one instance label (e.g. `"21": "sesame_seed"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "sesame_seed"`)
# - MacroPhysicalParticleSystem (e.g. `diced__carrot`):
# - semantic: all shows up under one semantic label (e.g. `"2419487146": "diced__carrot"`)
# - instance: all shows up under one instance label (e.g. `"21": "diced__carrot"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "diced__carrot"`)
# - MacroVisualParticleSystem (e.g. `stain`):
# - semantic: all shows up under one semantic label (e.g. `"884110082": "stain"`)
# - instance: all shows up under one instance label (e.g. `"21": "stain"`)
# - instance id: all shows up under one instance ID label (e.g. `"36": "stain"`)
# Persistent dictionary of sensors, mapped from prim_path to sensor
SENSORS = dict()
SEMANTIC_REMAPPER = Remapper()
INSTANCE_REMAPPER = Remapper()
INSTANCE_ID_REMAPPER = Remapper()
INSTANCE_REGISTRY = {0: "background", 1: "unlabelled"}
INSTANCE_ID_REGISTRY = {0: "background"}
def __init__(
self,
prim_path,
name,
modalities="all",
enabled=True,
noise=None,
load_config=None,
image_height=128,
image_width=128,
focal_length=17.0, # Default 17.0 since this is roughly the human eye focal length
clipping_range=(0.001, 10000000.0),
viewport_name=None,
):
# Create load config from inputs
load_config = dict() if load_config is None else load_config
load_config["image_height"] = image_height
load_config["image_width"] = image_width
load_config["focal_length"] = focal_length
load_config["clipping_range"] = clipping_range
load_config["viewport_name"] = viewport_name
# Create variables that will be filled in later at runtime
self._viewport = None # Viewport from which to grab data
self._annotators = None
self._render_product = None
self._RAW_SENSOR_TYPES = dict(
rgb="rgb",
depth="distance_to_camera",
depth_linear="distance_to_image_plane",
normal="normals",
# Semantic segmentation shows the category each pixel belongs to
seg_semantic="semantic_segmentation",
# Instance segmentation shows the name of the object each pixel belongs to
seg_instance="instance_segmentation",
# Instance ID segmentation shows the prim path of the visual mesh each pixel belongs to
seg_instance_id="instance_id_segmentation",
flow="motion_vectors",
bbox_2d_tight="bounding_box_2d_tight",
bbox_2d_loose="bounding_box_2d_loose",
bbox_3d="bounding_box_3d",
camera_params="camera_params",
)
assert {key for key in self._RAW_SENSOR_TYPES.keys() if key != "camera_params"} == set(self.all_modalities), \
"VisionSensor._RAW_SENSOR_TYPES must have the same keys as VisionSensor.all_modalities!"
modalities = set([modalities]) if isinstance(modalities, str) else modalities
# 1) seg_instance and seg_instance_id require seg_semantic to be enabled (for rendering particle systems)
# 2) bounding box observations require seg_semantic to be enabled (for remapping bounding box semantic IDs)
semantic_dependent_modalities = {"seg_instance", "seg_instance_id", "bbox_2d_loose", "bbox_2d_tight", "bbox_3d"}
# if any of the semantic dependent modalities are enabled, then seg_semantic must be enabled
if semantic_dependent_modalities.intersection(modalities) and "seg_semantic" not in modalities:
modalities.add("seg_semantic")
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
modalities=modalities,
enabled=enabled,
noise=noise,
load_config=load_config,
)
def _load(self):
# Define a new camera prim at the current stage
# Note that we can't use og.sim.stage here because the vision sensors get loaded first
return lazy.pxr.UsdGeom.Camera.Define(lazy.omni.isaac.core.utils.stage.get_current_stage(), self._prim_path).GetPrim()
def _post_load(self):
# run super first
super()._post_load()
# Add this sensor to the list of global sensors
self.SENSORS[self._prim_path] = self
resolution = (self._load_config["image_width"], self._load_config["image_height"])
self._render_product = lazy.omni.replicator.core.create.render_product(self._prim_path, resolution)
# Create a new viewport to link to this camera or link to a pre-existing one
viewport_name = self._load_config["viewport_name"]
if viewport_name is not None:
vp_names_to_handles = {vp.name: vp for vp in lazy.omni.kit.viewport.window.get_viewport_window_instances()}
assert_valid_key(key=viewport_name, valid_keys=vp_names_to_handles, name="viewport name")
viewport = vp_names_to_handles[viewport_name]
else:
viewport = lazy.omni.kit.viewport.utility.create_viewport_window()
# Take a render step to make sure the viewport is generated before docking it
render()
# Grab the newly created viewport and dock it to the GUI
# The first viewport is always the "main" global camera, and any additional cameras are auxiliary views
# These auxiliary views will be stacked in a single column
# Thus, the first auxiliary viewport should be generated to the left of the main dockspace, and any
# subsequent viewports should be equally spaced according to the number of pre-existing auxiliary views
n_auxiliary_sensors = len(self.SENSORS) - 1
if n_auxiliary_sensors == 1:
# This is the first auxiliary viewport, dock to the left of the main dockspace
dock_window(space=lazy.omni.ui.Workspace.get_window("DockSpace"), name=viewport.name,
location=lazy.omni.ui.DockPosition.LEFT, ratio=0.25)
elif n_auxiliary_sensors > 1:
# This is any additional auxiliary viewports, dock equally-spaced in the auxiliary column
# We also need to re-dock any prior viewports!
for i in range(2, n_auxiliary_sensors + 1):
dock_window(space=lazy.omni.ui.Workspace.get_window(f"Viewport {i - 1}"), name=f"Viewport {i}",
location=lazy.omni.ui.DockPosition.BOTTOM, ratio=(1 + n_auxiliary_sensors - i) / (2 + n_auxiliary_sensors - i))
self._viewport = viewport
# Link the camera and viewport together
self._viewport.viewport_api.set_active_camera(self._prim_path)
# Requires 3 render updates to propagate changes
for i in range(3):
render()
# Set the viewer size (requires taking one render step afterwards)
self._viewport.viewport_api.set_texture_resolution(resolution)
# Also update focal length and clipping range
self.focal_length = self._load_config["focal_length"]
self.clipping_range = self._load_config["clipping_range"]
# Requires 3 render updates to propagate changes
for i in range(3):
render()
def _initialize(self):
# Run super first
super()._initialize()
self._annotators = {modality: None for modality in self._modalities}
# Initialize sensors
self.initialize_sensors(names=self._modalities)
for _ in range(3):
render()
def initialize_sensors(self, names):
"""Initializes a raw sensor in the simulation.
Args:
names (str or list of str): Name of the raw sensor(s) to initialize.
If they are not part of self._RAW_SENSOR_TYPES' keys, we will simply pass over them
"""
names = {names} if isinstance(names, str) else set(names)
for name in names:
self._add_modality_to_backend(modality=name)
def _get_obs(self):
# Make sure we're initialized
assert self.initialized, "Cannot grab vision observations without first initializing this VisionSensor!"
# Run super first to grab any upstream obs
obs, info = super()._get_obs()
# Reorder modalities to ensure that seg_semantic is always ran before seg_instance or seg_instance_id
if "seg_semantic" in self._modalities:
reordered_modalities = ["seg_semantic"] + [modality for modality in self._modalities if modality != "seg_semantic"]
else:
reordered_modalities = self._modalities
for modality in reordered_modalities:
raw_obs = self._annotators[modality].get_data()
# Obs is either a dictionary of {"data":, ..., "info": ...} or a direct array
obs[modality] = raw_obs["data"] if isinstance(raw_obs, dict) else raw_obs
if modality == "seg_semantic":
id_to_labels = raw_obs["info"]["idToLabels"]
obs[modality], info[modality] = self._remap_semantic_segmentation(obs[modality], id_to_labels)
elif modality == "seg_instance":
id_to_labels = raw_obs["info"]["idToLabels"]
obs[modality], info[modality] = self._remap_instance_segmentation(
obs[modality], id_to_labels, obs["seg_semantic"], info["seg_semantic"], id=False)
elif modality == "seg_instance_id":
id_to_labels = raw_obs["info"]["idToLabels"]
obs[modality], info[modality] = self._remap_instance_segmentation(
obs[modality], id_to_labels, obs["seg_semantic"], info["seg_semantic"], id=True)
elif "bbox" in modality:
obs[modality] = self._remap_bounding_box_semantic_ids(obs[modality])
return obs, info
def _remap_semantic_segmentation(self, img, id_to_labels):
"""
Remap the semantic segmentation image to the class IDs defined in semantic_class_name_to_id().
Also, correct the id_to_labels input with the labels from semantic_class_name_to_id() and return it.
Args:
img (np.ndarray): Semantic segmentation image to remap
id_to_labels (dict): Dictionary of semantic IDs to class labels
Returns:
np.ndarray: Remapped semantic segmentation image
dict: Corrected id_to_labels dictionary
"""
# Preprocess id_to_labels to feed into the remapper
replicator_mapping = {}
for key, val in id_to_labels.items():
key = int(key)
replicator_mapping[key] = val["class"].lower()
if "," in replicator_mapping[key]:
# If there are multiple class names, grab the one that is a registered system
# This happens with MacroVisual particles, e.g. {"11": {"class": "breakfast_table,stain"}}
categories = [cat for cat in replicator_mapping[key].split(",") if cat in REGISTERED_SYSTEMS]
assert len(categories) == 1, "There should be exactly one category that belongs to REGISTERED_SYSTEMS"
replicator_mapping[key] = categories[0]
assert replicator_mapping[key] in semantic_class_id_to_name().values(), f"Class {val['class']} does not exist in the semantic class name to id mapping!"
assert set(np.unique(img)).issubset(set(replicator_mapping.keys())), "Semantic segmentation image does not match the original id_to_labels mapping."
return VisionSensor.SEMANTIC_REMAPPER.remap(replicator_mapping, semantic_class_id_to_name(), img)
def _remap_instance_segmentation(self, img, id_to_labels, semantic_img, semantic_labels, id=False):
"""
Remap the instance segmentation image to our own instance IDs.
Also, correct the id_to_labels input with our new labels and return it.
Args:
img (np.ndarray): Instance segmentation image to remap
id_to_labels (dict): Dictionary of instance IDs to class labels
semantic_img (np.ndarray): Semantic segmentation image to use for instance registry
semantic_labels (dict): Dictionary of semantic IDs to class labels
id (bool): Whether to remap for instance ID segmentation
Returns:
np.ndarray: Remapped instance segmentation image
dict: Corrected id_to_labels dictionary
"""
# Sometimes 0 and 1 show up in the image, but they are not in the id_to_labels mapping
id_to_labels.update({"0": "BACKGROUND"})
if not id:
id_to_labels.update({"1": "UNLABELLED"})
# Preprocess id_to_labels and update instance registry
replicator_mapping = {}
for key, value in id_to_labels.items():
key = int(key)
if value in ["BACKGROUND", "UNLABELLED"]:
value = value.lower()
else:
assert "/" in value, f"Instance segmentation (ID) label {value} is not a valid prim path!"
prim_name = value.split("/")[-1]
# Hacky way to get the particles of MacroVisual/PhysicalParticleSystem
# Remap instance segmentation and instance segmentation ID labels to system name
if "Particle" in prim_name:
category_name = prim_name.split("Particle")[0]
assert category_name in REGISTERED_SYSTEMS, f"System name {category_name} is not in the registered systems!"
value = category_name
else:
# Remap instance segmentation labels to object name
if not id:
# value is the prim path of the object
if value == "/World/groundPlane":
value = "groundPlane"
else:
obj = og.sim.scene.object_registry("prim_path", value)
# Remap instance segmentation labels from prim path to object name
assert obj is not None, f"Object with prim path {value} cannot be found in objct registry!"
value = obj.name
# Keep the instance segmentation ID labels intact (prim paths of visual meshes)
else:
pass
self._register_instance(value, id=id)
replicator_mapping[key] = value
# Handle the cases for MicroPhysicalParticleSystem (FluidSystem, GranularSystem).
# They show up in the image, but not in the info (id_to_labels).
# We identify these values, find the corresponding semantic label (system name), and add the mapping.
for key, img_idx in zip(*np.unique(img, return_index=True)):
if str(key) not in id_to_labels:
semantic_label = semantic_img.flatten()[img_idx]
assert semantic_label in semantic_labels, f"Semantic map value {semantic_label} is not in the semantic labels!"
category_name = semantic_labels[semantic_label]
if category_name in REGISTERED_SYSTEMS:
value = category_name
self._register_instance(value, id=id)
# If the category name is not in the registered systems,
# which happens because replicator sometimes returns segmentation map and id_to_labels that are not in sync,
# we will label this as "unlabelled" for now
else:
value = "unlabelled"
replicator_mapping[key] = value
registry = VisionSensor.INSTANCE_ID_REGISTRY if id else VisionSensor.INSTANCE_REGISTRY
remapper = VisionSensor.INSTANCE_ID_REMAPPER if id else VisionSensor.INSTANCE_REMAPPER
assert set(np.unique(img)).issubset(set(replicator_mapping.keys())), "Instance segmentation image does not match the original id_to_labels mapping."
return remapper.remap(replicator_mapping, registry, img)
def _register_instance(self, instance_name, id=False):
registry = VisionSensor.INSTANCE_ID_REGISTRY if id else VisionSensor.INSTANCE_REGISTRY
if instance_name not in registry.values():
registry[len(registry)] = instance_name
def _remap_bounding_box_semantic_ids(self, bboxes):
"""
Remap the semantic IDs of the bounding boxes to our own semantic IDs.
Args:
bboxes (list of dict): List of bounding boxes to remap
Returns:
list of dict: Remapped list of bounding boxes
"""
for bbox in bboxes:
bbox["semanticId"] = VisionSensor.SEMANTIC_REMAPPER.remap_bbox(bbox["semanticId"])
return bboxes
def add_modality(self, modality):
# Check if we already have this modality (if so, no need to initialize it explicitly)
should_initialize = modality not in self._modalities
# Run super
super().add_modality(modality=modality)
# We also need to initialize this new modality
if should_initialize:
self.initialize_sensors(names=modality)
def remove_modality(self, modality):
# Check if we don't have this modality (if not, no need to remove it explicitly)
should_remove = modality in self._modalities
# Run super
super().remove_modality(modality=modality)
if should_remove:
self._remove_modality_from_backend(modality=modality)
def _add_modality_to_backend(self, modality):
"""
Helper function to add specified modality @modality to the omniverse Replicator backend so that its data is
generated during get_obs()
Args:
modality (str): Name of the modality to add to the Replicator backend
"""
if self._annotators.get(modality, None) is None:
self._annotators[modality] = lazy.omni.replicator.core.AnnotatorRegistry.get_annotator(self._RAW_SENSOR_TYPES[modality])
self._annotators[modality].attach([self._render_product])
def _remove_modality_from_backend(self, modality):
"""
Helper function to remove specified modality @modality from the omniverse Replicator backend so that its data is
no longer generated during get_obs()
Args:
modality (str): Name of the modality to remove from the Replicator backend
"""
if self._annotators.get(modality, None) is not None:
self._annotators[modality].detach([self._render_product])
self._annotators[modality] = None
def remove(self):
# Remove from global sensors dictionary
self.SENSORS.pop(self._prim_path)
# Remove viewport
self._viewport.destroy()
# Run super
super().remove()
@property
def camera_parameters(self):
"""
Returns a dictionary of keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor.
The returned dictionary includes the following keys and their corresponding data types:
- "cameraAperture": np.ndarray (float32) - Camera aperture dimensions.
- "cameraApertureOffset": np.ndarray (float32) - Offset of the camera aperture.
- "cameraFisheyeLensP": np.ndarray (float32) - Fisheye lens P parameter.
- "cameraFisheyeLensS": np.ndarray (float32) - Fisheye lens S parameter.
- "cameraFisheyeMaxFOV": float - Maximum field of view for fisheye lens.
- "cameraFisheyeNominalHeight": int - Nominal height for fisheye lens.
- "cameraFisheyeNominalWidth": int - Nominal width for fisheye lens.
- "cameraFisheyeOpticalCentre": np.ndarray (float32) - Optical center for fisheye lens.
- "cameraFisheyePolynomial": np.ndarray (float32) - Polynomial parameters for fisheye lens distortion.
- "cameraFocalLength": float - Focal length of the camera.
- "cameraFocusDistance": float - Focus distance of the camera.
- "cameraFStop": float - F-stop value of the camera.
- "cameraModel": str - Camera model identifier.
- "cameraNearFar": np.ndarray (float32) - Near and far plane distances.
- "cameraProjection": np.ndarray (float32) - Camera projection matrix.
- "cameraViewTransform": np.ndarray (float32) - Camera view transformation matrix.
- "metersPerSceneUnit": float - Scale factor from scene units to meters.
- "renderProductResolution": np.ndarray (int32) - Resolution of the rendered product.
Returns:
dict: Keyword-mapped relevant intrinsic and extrinsic camera parameters for this vision sensor.
"""
# Add the camera params modality if it doesn't already exist
if "camera_params" not in self._annotators:
self.initialize_sensors(names="camera_params")
# Requires 3 render updates for camera params annotator to decome active
for _ in range(3):
render()
# Grab and return the parameters
return self._annotators["camera_params"].get_data()
@property
def viewer_visibility(self):
"""
Returns:
bool: Whether the viewer is visible or not
"""
return self._viewport.visible
@viewer_visibility.setter
def viewer_visibility(self, visible):
"""
Sets whether the viewer should be visible or not in the Omni UI
Args:
visible (bool): Whether the viewer should be visible or not
"""
self._viewport.visible = visible
# Requires 1 render update to propagate changes
render()
@property
def image_height(self):
"""
Returns:
int: Image height of this sensor, in pixels
"""
return self._viewport.viewport_api.get_texture_resolution()[1]
@image_height.setter
def image_height(self, height):
"""
Sets the image height @height for this sensor
Args:
height (int): Image height of this sensor, in pixels
"""
width, _ = self._viewport.viewport_api.get_texture_resolution()
self._viewport.viewport_api.set_texture_resolution((width, height))
# Requires 3 updates to propagate changes
for i in range(3):
render()
@property
def image_width(self):
"""
Returns:
int: Image width of this sensor, in pixels
"""
return self._viewport.viewport_api.get_texture_resolution()[0]
@image_width.setter
def image_width(self, width):
"""
Sets the image width @width for this sensor
Args:
width (int): Image width of this sensor, in pixels
"""
_, height = self._viewport.viewport_api.get_texture_resolution()
self._viewport.viewport_api.set_texture_resolution((width, height))
# Requires 3 updates to propagate changes
for i in range(3):
render()
@property
def clipping_range(self):
"""
Returns:
2-tuple: [min, max] value of the sensor's clipping range, in meters
"""
return np.array(self.get_attribute("clippingRange"))
@clipping_range.setter
def clipping_range(self, limits):
"""
Sets the clipping range @limits for this sensor
Args:
limits (2-tuple): [min, max] value of the sensor's clipping range, in meters
"""
self.set_attribute(attr="clippingRange", val=lazy.pxr.Gf.Vec2f(*limits))
# In order for sensor changes to propagate, we must toggle its visibility
self.visible = False
# A single update step has to happen here before we toggle visibility for changes to propagate
render()
self.visible = True
@property
def horizontal_aperture(self):
"""
Returns:
float: horizontal aperture of this sensor, in mm
"""
return self.get_attribute("horizontalAperture")
@horizontal_aperture.setter
def horizontal_aperture(self, length):
"""
Sets the focal length @length for this sensor
Args:
length (float): horizontal aperture of this sensor, in meters
"""
self.set_attribute("horizontalAperture", length)
@property
def focal_length(self):
"""
Returns:
float: focal length of this sensor, in mm
"""
return self.get_attribute("focalLength")
@focal_length.setter
def focal_length(self, length):
"""
Sets the focal length @length for this sensor
Args:
length (float): focal length of this sensor, in mm
"""
self.set_attribute("focalLength", length)
@property
def intrinsic_matrix(self):
"""
Returns:
n-array: (3, 3) camera intrinsic matrix. Transforming point p (x,y,z) in the camera frame via K * p will
produce p' (x', y', w) - the point in the image plane. To get pixel coordiantes, divide x' and y' by w
"""
projection_matrix = self.camera_parameters["cameraProjection"]
projection_matrix = np.array(projection_matrix).reshape(4, 4)
fx = projection_matrix[0, 0]
fy = projection_matrix[1, 1]
cx = projection_matrix[0, 2]
cy = projection_matrix[1, 2]
s = projection_matrix[0, 1] # Skew factor
intrinsic_matrix = np.array([[fx, s, cx],
[0.0, fy, cy],
[0.0, 0.0, 1.0]])
return intrinsic_matrix
@property
def _obs_space_mapping(self):
# Generate the complex space types for special modalities:
# {"bbox_2d_tight", "bbox_2d_loose", "bbox_3d"}
bbox_3d_space = gym.spaces.Sequence(space=gym.spaces.Tuple((
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_min
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_min
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_min
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # x_max
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # y_max
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(), dtype=np.float32), # z_max
gym.spaces.Box(low=-np.inf, high=np.inf, shape=(4, 4), dtype=np.float32), # transform
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio
)))
bbox_2d_space = gym.spaces.Sequence(space=gym.spaces.Tuple((
gym.spaces.Box(low=0, high=MAX_CLASS_COUNT, shape=(), dtype=np.uint32), # semanticId
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_min
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_min
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # x_max
gym.spaces.Box(low=0, high=MAX_VIEWER_SIZE, shape=(), dtype=np.int32), # y_max
gym.spaces.Box(low=-1.0, high=1.0, shape=(), dtype=np.float32), # occlusion ratio
)))
obs_space_mapping = dict(
rgb=((self.image_height, self.image_width, 4), 0, 255, np.uint8),
depth=((self.image_height, self.image_width), 0.0, np.inf, np.float32),
depth_linear=((self.image_height, self.image_width), 0.0, np.inf, np.float32),
normal=((self.image_height, self.image_width, 4), -1.0, 1.0, np.float32),
seg_semantic=((self.image_height, self.image_width), 0, MAX_CLASS_COUNT, np.uint32),
seg_instance=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32),
seg_instance_id=((self.image_height, self.image_width), 0, MAX_INSTANCE_COUNT, np.uint32),
flow=((self.image_height, self.image_width, 4), -np.inf, np.inf, np.float32),
bbox_2d_tight=bbox_2d_space,
bbox_2d_loose=bbox_2d_space,
bbox_3d=bbox_3d_space,
)
return obs_space_mapping
@classmethod
def clear(cls):
"""
Clears all cached sensors that have been generated. Should be used when the simulator is completely reset; i.e.:
all objects on the stage are destroyed
"""
for sensor in cls.SENSORS.values():
# Destroy any sensor that is not attached to the main viewport window
if sensor._viewport.name != "Viewport":
sensor._viewport.destroy()
# Render to update
render()
cls.SENSORS = dict()
cls.KNOWN_SEMANTIC_IDS = set()
cls.KEY_ARRAY = None
cls.INSTANCE_REGISTRY = {0: "background", 1: "unlabelled"}
cls.INSTANCE_ID_REGISTRY = {0: "background"}
@classproperty
def all_modalities(cls):
return {modality for modality in cls.ALL_MODALITIES if modality != "camera_params"}
@classproperty
def no_noise_modalities(cls):
# bounding boxes and camera state should not have noise
return {"bbox_2d_tight", "bbox_2d_loose", "bbox_3d"}
| 33,871 | Python | 46.175487 | 164 | 0.62059 |
StanfordVL/OmniGibson/omnigibson/envs/__init__.py | from omnigibson.envs.env_base import Environment
from omnigibson.envs.env_wrapper import EnvironmentWrapper, create_wrapper, REGISTERED_ENV_WRAPPERS
| 149 | Python | 48.999984 | 99 | 0.85906 |
StanfordVL/OmniGibson/omnigibson/envs/env_base.py | import gym
import numpy as np
from copy import deepcopy
import omnigibson as og
from omnigibson.objects import REGISTERED_OBJECTS
from omnigibson.robots import REGISTERED_ROBOTS
from omnigibson.scene_graphs.graph_builder import SceneGraphBuilder
from omnigibson.simulator import launch_simulator
from omnigibson.tasks import REGISTERED_TASKS
from omnigibson.scenes import REGISTERED_SCENES
from omnigibson.sensors import create_sensor
from omnigibson.utils.gym_utils import GymObservable, recursively_generate_flat_dict, recursively_generate_compatible_dict
from omnigibson.utils.config_utils import parse_config
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.python_utils import assert_valid_key, merge_nested_dicts, create_class_from_registry_and_config,\
Recreatable
from omnigibson.macros import gm
# Create module logger
log = create_module_logger(module_name=__name__)
class Environment(gym.Env, GymObservable, Recreatable):
"""
Core environment class that handles loading scene, robot(s), and task, following OpenAI Gym interface.
"""
def __init__(self, configs):
"""
Args:
configs (str or dict or list of str or dict): config_file path(s) or raw config dictionaries.
If multiple configs are specified, they will be merged sequentially in the order specified.
This allows procedural generation of a "full" config from small sub-configs. For valid keys, please
see @default_config below
"""
# Call super first
super().__init__()
# Launch Isaac Sim
launch_simulator()
# Initialize other placeholders that will be filled in later
self._task = None
self._external_sensors = None
self._loaded = None
self._current_episode = 0
# Variables reset at the beginning of each episode
self._current_step = 0
# Convert config file(s) into a single parsed dict
configs = configs if isinstance(configs, list) or isinstance(configs, tuple) else [configs]
# Initial default config
self.config = self.default_config
# Merge in specified configs
for config in configs:
merge_nested_dicts(base_dict=self.config, extra_dict=parse_config(config), inplace=True)
# Store settings and other initialized values
self._automatic_reset = self.env_config["automatic_reset"]
self._flatten_action_space = self.env_config["flatten_action_space"]
self._flatten_obs_space = self.env_config["flatten_obs_space"]
self.physics_frequency = self.env_config["physics_frequency"]
self.action_frequency = self.env_config["action_frequency"]
self.device = self.env_config["device"]
self._initial_pos_z_offset = self.env_config["initial_pos_z_offset"] # how high to offset object placement to account for one action step of dropping
# Create the scene graph builder
self._scene_graph_builder = None
if "scene_graph" in self.config and self.config["scene_graph"] is not None:
self._scene_graph_builder = SceneGraphBuilder(**self.config["scene_graph"])
# Load this environment
self.load()
def reload(self, configs, overwrite_old=True):
"""
Reload using another set of config file(s).
This allows one to change the configuration and hot-reload the environment on the fly.
Args:
configs (dict or str or list of dict or list of str): config_file dict(s) or path(s).
If multiple configs are specified, they will be merged sequentially in the order specified.
This allows procedural generation of a "full" config from small sub-configs.
overwrite_old (bool): If True, will overwrite the internal self.config with @configs. Otherwise, will
merge in the new config(s) into the pre-existing one. Setting this to False allows for minor
modifications to be made without having to specify entire configs during each reload.
"""
# Convert config file(s) into a single parsed dict
configs = [configs] if isinstance(configs, dict) or isinstance(configs, str) else configs
# Initial default config
new_config = self.default_config
# Merge in specified configs
for config in configs:
merge_nested_dicts(base_dict=new_config, extra_dict=parse_config(config), inplace=True)
# Either merge in or overwrite the old config
if overwrite_old:
self.config = new_config
else:
merge_nested_dicts(base_dict=self.config, extra_dict=new_config, inplace=True)
# Load this environment again
self.load()
def reload_model(self, scene_model):
"""
Reload another scene model.
This allows one to change the scene on the fly.
Args:
scene_model (str): new scene model to load (eg.: Rs_int)
"""
self.scene_config["model"] = scene_model
self.load()
def _load_variables(self):
"""
Load variables from config
"""
# Store additional variables after config has been loaded fully
self._initial_pos_z_offset = self.env_config["initial_pos_z_offset"]
# Reset bookkeeping variables
self._reset_variables()
self._current_episode = 0 # Manually set this to 0 since resetting actually increments this
# - Potentially overwrite the USD entry for the scene if none is specified and we're online sampling -
# Make sure the requested scene is valid
scene_type = self.scene_config["type"]
assert_valid_key(key=scene_type, valid_keys=REGISTERED_SCENES, name="scene type")
# Verify scene and task configs are valid for the given task type
REGISTERED_TASKS[self.task_config["type"]].verify_scene_and_task_config(
scene_cfg=self.scene_config,
task_cfg=self.task_config,
)
# - Additionally run some sanity checks on these values -
# Check to make sure our z offset is valid -- check that the distance travelled over 1 action timestep is
# less than the offset we set (dist = 0.5 * gravity * (t^2))
drop_distance = 0.5 * 9.8 * ((1. / self.action_frequency) ** 2)
assert drop_distance < self._initial_pos_z_offset, "initial_pos_z_offset is too small for collision checking"
def _load_task(self, task_config=None):
"""
Load task
Args:
task_confg (None or dict): If specified, custom task configuration to use. Otherwise, will use
self.task_config. Note that if a custom task configuration is specified, the internal task config
will be updated as well
"""
# Update internal config if specified
if task_config is not None:
# Copy task config, in case self.task_config and task_config are the same!
task_config = deepcopy(task_config)
self.task_config.clear()
self.task_config.update(task_config)
# Sanity check task to make sure it's valid
task_type = self.task_config["type"]
assert_valid_key(key=task_type, valid_keys=REGISTERED_TASKS, name="task type")
# Grab the kwargs relevant for the specific task and create the task
self._task = create_class_from_registry_and_config(
cls_name=self.task_config["type"],
cls_registry=REGISTERED_TASKS,
cfg=self.task_config,
cls_type_descriptor="task",
)
assert og.sim.is_stopped(), "Simulator must be stopped before loading tasks!"
# Load task. Should load additional task-relevant objects and configure the scene into its default initial state
self._task.load(env=self)
assert og.sim.is_stopped(), "Simulator must be stopped after loading tasks!"
def _load_scene(self):
"""
Load the scene and robot specified in the config file.
"""
assert og.sim.is_stopped(), "Simulator must be stopped before loading scene!"
# Set the simulator settings
# NOTE: This must be done BEFORE the scene is loaded, or else all vision sensors can't retrieve observations
og.sim.set_simulation_dt(physics_dt=(1. / self.physics_frequency), rendering_dt=(1. / self.action_frequency))
# Create the scene from our scene config
scene = create_class_from_registry_and_config(
cls_name=self.scene_config["type"],
cls_registry=REGISTERED_SCENES,
cfg=self.scene_config,
cls_type_descriptor="scene",
)
og.sim.import_scene(scene)
# Set the rendering settings
if gm.RENDER_VIEWER_CAMERA:
og.sim.viewer_width = self.render_config["viewer_width"]
og.sim.viewer_height = self.render_config["viewer_height"]
og.sim.device = self.device
assert og.sim.is_stopped(), "Simulator must be stopped after loading scene!"
def _load_robots(self):
"""
Load robots into the scene
"""
# Only actually load robots if no robot has been imported from the scene loading directly yet
if len(self.scene.robots) == 0:
assert og.sim.is_stopped(), "Simulator must be stopped before loading robots!"
# Iterate over all robots to generate in the robot config
for i, robot_config in enumerate(self.robots_config):
# Add a name for the robot if necessary
if "name" not in robot_config:
robot_config["name"] = f"robot{i}"
position, orientation = robot_config.pop("position", None), robot_config.pop("orientation", None)
# Make sure robot exists, grab its corresponding kwargs, and create / import the robot
robot = create_class_from_registry_and_config(
cls_name=robot_config["type"],
cls_registry=REGISTERED_ROBOTS,
cfg=robot_config,
cls_type_descriptor="robot",
)
# Import the robot into the simulator
og.sim.import_object(robot)
robot.set_position_orientation(position=position, orientation=orientation)
if len(self.robots_config) > 0:
# Auto-initialize all robots
og.sim.play()
self.scene.reset()
self.scene.update_initial_state()
og.sim.stop()
assert og.sim.is_stopped(), "Simulator must be stopped after loading robots!"
def _load_objects(self):
"""
Load any additional custom objects into the scene
"""
assert og.sim.is_stopped(), "Simulator must be stopped before loading objects!"
for i, obj_config in enumerate(self.objects_config):
# Add a name for the object if necessary
if "name" not in obj_config:
obj_config["name"] = f"obj{i}"
# Pop the desired position and orientation
position, orientation = obj_config.pop("position", None), obj_config.pop("orientation", None)
# Make sure robot exists, grab its corresponding kwargs, and create / import the robot
obj = create_class_from_registry_and_config(
cls_name=obj_config["type"],
cls_registry=REGISTERED_OBJECTS,
cfg=obj_config,
cls_type_descriptor="object",
)
# Import the robot into the simulator and set the pose
og.sim.import_object(obj)
obj.set_position_orientation(position=position, orientation=orientation)
if len(self.objects_config) > 0:
# Auto-initialize all objects
og.sim.play()
self.scene.reset()
self.scene.update_initial_state()
og.sim.stop()
assert og.sim.is_stopped(), "Simulator must be stopped after loading objects!"
def _load_external_sensors(self):
"""
Load any additional custom external sensors into the scene
"""
assert og.sim.is_stopped(), "Simulator must be stopped before loading external sensors!"
sensors_config = self.env_config["external_sensors"]
if sensors_config is not None:
self._external_sensors = dict()
for i, sensor_config in enumerate(sensors_config):
# Add a name for the object if necessary
if "name" not in sensor_config:
sensor_config["name"] = f"external_sensor{i}"
# Determine prim path if not specified
if "prim_path" not in sensor_config:
sensor_config["prim_path"] = f"/World/{sensor_config['name']}"
# Pop the desired position and orientation
local_position, local_orientation = sensor_config.pop("local_position", None), sensor_config.pop("local_orientation", None)
# Make sure sensor exists, grab its corresponding kwargs, and create the sensor
sensor = create_sensor(**sensor_config)
# Load an initialize this sensor
sensor.load()
sensor.initialize()
sensor.set_local_pose(local_position, local_orientation)
self._external_sensors[sensor.name] = sensor
assert og.sim.is_stopped(), "Simulator must be stopped after loading external sensors!"
def _load_observation_space(self):
# Grab robot(s) and task obs spaces
obs_space = dict()
for robot in self.robots:
# Load the observation space for the robot
obs_space[robot.name] = robot.load_observation_space()
# Also load the task obs space
obs_space["task"] = self._task.load_observation_space()
# Also load any external sensors
if self._external_sensors is not None:
external_obs_space = dict()
for sensor_name, sensor in self._external_sensors.items():
# Load the sensor observation space
external_obs_space[sensor_name] = sensor.load_observation_space()
obs_space["external"] = gym.spaces.Dict(external_obs_space)
return obs_space
def load_observation_space(self):
# Call super first
obs_space = super().load_observation_space()
# If we want to flatten it, modify the observation space by recursively searching through all
if self._flatten_obs_space:
self.observation_space = gym.spaces.Dict(recursively_generate_flat_dict(dic=obs_space))
return self.observation_space
def _load_action_space(self):
"""
Load action space for each robot
"""
action_space = gym.spaces.Dict({robot.name: robot.action_space for robot in self.robots})
# Convert into flattened 1D Box space if requested
if self._flatten_action_space:
lows = []
highs = []
for space in action_space.values():
assert isinstance(space, gym.spaces.Box), \
"Can only flatten action space where all individual spaces are gym.space.Box instances!"
assert len(space.shape) == 1, \
"Can only flatten action space where all individual spaces are 1D instances!"
lows.append(space.low)
highs.append(space.high)
action_space = gym.spaces.Box(np.concatenate(lows), np.concatenate(highs), dtype=np.float32)
# Store action space
self.action_space = action_space
def load(self):
"""
Load the scene and robot specified in the config file.
"""
# This environment is not loaded
self._loaded = False
# Load config variables
self._load_variables()
# Load the scene, robots, and task
self._load_scene()
self._load_robots()
self._load_objects()
self._load_task()
self._load_external_sensors()
og.sim.play()
self.reset()
# Load the obs / action spaces
self.load_observation_space()
self._load_action_space()
# Start the scene graph builder
if self._scene_graph_builder:
self._scene_graph_builder.start(self.scene)
# Denote that the scene is loaded
self._loaded = True
def update_task(self, task_config):
"""
Updates the internal task using @task_config. NOTE: This will internally reset the environment as well!
Args:
task_config (dict): Task configuration for updating the new task
"""
# Make sure sim is playing
assert og.sim.is_playing(), "Update task should occur while sim is playing!"
# Denote scene as not loaded yet
self._loaded = False
og.sim.stop()
self._load_task(task_config=task_config)
og.sim.play()
self.reset()
# Load obs / action spaces
self.load_observation_space()
self._load_action_space()
# Scene is now loaded again
self._loaded = True
def close(self):
"""
Clean up the environment and shut down the simulation.
"""
og.shutdown()
def get_obs(self):
"""
Get the current environment observation.
Returns:
2-tuple:
dict: Keyword-mapped observations, which are possibly nested
dict: Additional information about the observations
"""
obs = dict()
info = dict()
# Grab all observations from each robot
for robot in self.robots:
obs[robot.name], info[robot.name] = robot.get_obs()
# Add task observations
obs["task"] = self._task.get_obs(env=self)
# Add external sensor observations if they exist
if self._external_sensors is not None:
external_obs = dict()
external_info = dict()
for sensor_name, sensor in self._external_sensors.items():
external_obs[sensor_name], external_info[sensor_name] = sensor.get_obs()
obs["external"] = external_obs
info["external"] = external_info
# Possibly flatten obs if requested
if self._flatten_obs_space:
obs = recursively_generate_flat_dict(dic=obs)
return obs, info
def get_scene_graph(self):
"""
Get the current scene graph.
Returns:
SceneGraph: Current scene graph
"""
assert self._scene_graph_builder is not None, "Scene graph builder must be specified in config!"
return self._scene_graph_builder.get_scene_graph()
def _populate_info(self, info):
"""
Populate info dictionary with any useful information.
Args:
info (dict): Information dictionary to populate
Returns:
dict: Information dictionary with added info
"""
info["episode_length"] = self._current_step
if self._scene_graph_builder is not None:
info["scene_graph"] = self.get_scene_graph()
def step(self, action):
"""
Apply robot's action and return the next state, reward, done and info,
following OpenAI Gym's convention
Args:
action (gym.spaces.Dict or dict or np.array): robot actions. If a dict is specified, each entry should
map robot name to corresponding action. If a np.array, it should be the flattened, concatenated set
of actions
Returns:
4-tuple:
- dict: state, i.e. next observation
- float: reward, i.e. reward at this current timestep
- bool: done, i.e. whether this episode is terminated
- dict: info, i.e. dictionary with any useful information
"""
try:
# If the action is not a dictionary, convert into a dictionary
if not isinstance(action, dict) and not isinstance(action, gym.spaces.Dict):
action_dict = dict()
idx = 0
for robot in self.robots:
action_dim = robot.action_dim
action_dict[robot.name] = action[idx: idx + action_dim]
idx += action_dim
else:
# Our inputted action is the action dictionary
action_dict = action
# Iterate over all robots and apply actions
for robot in self.robots:
robot.apply_action(action_dict[robot.name])
# Run simulation step
og.sim.step()
# Grab observations
obs, obs_info = self.get_obs()
# Step the scene graph builder if necessary
if self._scene_graph_builder is not None:
self._scene_graph_builder.step(self.scene)
# Grab reward, done, and info, and populate with internal info
reward, done, info = self.task.step(self, action)
self._populate_info(info)
info["obs_info"] = obs_info
if done and self._automatic_reset:
# Add lost observation to our information dict, and reset
info["last_observation"] = obs
obs = self.reset()
# Increment step
self._current_step += 1
return obs, reward, done, info
except:
raise ValueError(f"Failed to execute environment step {self._current_step} in episode {self._current_episode}")
def _reset_variables(self):
"""
Reset bookkeeping variables for the next new episode.
"""
self._current_episode += 1
self._current_step = 0
# TODO: Match super class signature?
def reset(self):
"""
Reset episode.
"""
# Reset the task
self.task.reset(self)
# Reset internal variables
self._reset_variables()
# Run a single simulator step to make sure we can grab updated observations
og.sim.step()
# Grab and return observations
obs, _ = self.get_obs()
if self._loaded:
# Sanity check to make sure received observations match expected observation space
check_obs = recursively_generate_compatible_dict(dic=obs)
if not self.observation_space.contains(check_obs):
exp_obs = dict()
for key, value in recursively_generate_flat_dict(dic=self.observation_space).items():
exp_obs[key] = ("obs_space", key, value.dtype, value.shape)
real_obs = dict()
for key, value in recursively_generate_flat_dict(dic=check_obs).items():
if isinstance(value, np.ndarray):
real_obs[key] = ("obs", key, value.dtype, value.shape)
else:
real_obs[key] = ("obs", key, type(value), "()")
exp_keys = set(exp_obs.keys())
real_keys = set(real_obs.keys())
shared_keys = exp_keys.intersection(real_keys)
missing_keys = exp_keys - real_keys
extra_keys = real_keys - exp_keys
if missing_keys:
log.error("MISSING OBSERVATION KEYS:")
log.error(missing_keys)
if extra_keys:
log.error("EXTRA OBSERVATION KEYS:")
log.error(extra_keys)
mismatched_keys = []
for k in shared_keys:
if exp_obs[k][2:] != real_obs[k][2:]: # Compare dtypes and shapes
mismatched_keys.append(k)
log.error(f"MISMATCHED OBSERVATION FOR KEY '{k}':")
log.error(f"Expected: {exp_obs[k]}")
log.error(f"Received: {real_obs[k]}")
raise ValueError("Observation space does not match returned observations!")
return obs
@property
def episode_steps(self):
"""
Returns:
int: Current number of steps in episode
"""
return self._current_step
@property
def initial_pos_z_offset(self):
"""
Returns:
float: how high to offset object placement to test valid pose & account for one action step of dropping
"""
return self._initial_pos_z_offset
@property
def task(self):
"""
Returns:
BaseTask: Active task instance
"""
return self._task
@property
def scene(self):
"""
Returns:
Scene: Active scene in this environment
"""
return og.sim.scene
@property
def robots(self):
"""
Returns:
list of BaseRobot: Robots in the current scene
"""
return self.scene.robots
@property
def external_sensors(self):
"""
Returns:
None or dict: If self.env_config["external_sensors"] is specified, returns the dict mapping sensor name to
instantiated sensor. Otherwise, returns None
"""
return self._external_sensors
@property
def env_config(self):
"""
Returns:
dict: Environment-specific configuration kwargs
"""
return self.config["env"]
@property
def render_config(self):
"""
Returns:
dict: Render-specific configuration kwargs
"""
return self.config["render"]
@property
def scene_config(self):
"""
Returns:
dict: Scene-specific configuration kwargs
"""
return self.config["scene"]
@property
def robots_config(self):
"""
Returns:
dict: Robot-specific configuration kwargs
"""
return self.config["robots"]
@property
def objects_config(self):
"""
Returns:
dict: Object-specific configuration kwargs
"""
return self.config["objects"]
@property
def task_config(self):
"""
Returns:
dict: Task-specific configuration kwargs
"""
return self.config["task"]
@property
def wrapper_config(self):
"""
Returns:
dict: Wrapper-specific configuration kwargs
"""
return self.config["wrapper"]
@property
def default_config(self):
"""
Returns:
dict: Default configuration for this environment. May not be fully specified (i.e.: still requires @config
to be specified during environment creation)
"""
return {
# Environment kwargs
"env": {
"action_frequency": 30,
"physics_frequency": 120,
"device": None,
"automatic_reset": False,
"flatten_action_space": False,
"flatten_obs_space": False,
"initial_pos_z_offset": 0.1,
"external_sensors": None,
},
# Rendering kwargs
"render": {
"viewer_width": 1280,
"viewer_height": 720,
},
# Scene kwargs
"scene": {
# Traversibility map kwargs
"waypoint_resolution": 0.2,
"num_waypoints": 10,
"trav_map_resolution": 0.1,
"default_erosion_radius": 0.0,
"trav_map_with_objects": True,
"scene_instance": None,
"scene_file": None,
},
# Robot kwargs
"robots": [], # no robots by default
# Object kwargs
"objects": [], # no objects by default
# Task kwargs
"task": {
"type": "DummyTask",
},
# Wrapper kwargs
"wrapper": {
"type": None,
},
}
| 28,380 | Python | 36.147906 | 160 | 0.583298 |
StanfordVL/OmniGibson/omnigibson/envs/env_wrapper.py | from omnigibson.utils.python_utils import Wrapper
from omnigibson.utils.python_utils import Registerable, classproperty, create_class_from_registry_and_config
from omnigibson.utils.ui_utils import create_module_logger
from copy import deepcopy
# Global dicts that will contain mappings
REGISTERED_ENV_WRAPPERS = dict()
# Create module logger
log = create_module_logger(module_name=__name__)
def create_wrapper(env):
"""
Wraps environment @env with wrapper defined by env.wrapper_config
"""
wrapper_cfg = deepcopy(env.wrapper_config)
wrapper_type = wrapper_cfg.pop("type")
wrapper_cfg["env"] = env
return create_class_from_registry_and_config(
cls_name=wrapper_type,
cls_registry=REGISTERED_ENV_WRAPPERS,
cfg=wrapper_cfg,
cls_type_descriptor="wrapper",
)
class EnvironmentWrapper(Wrapper, Registerable):
"""
Base class for all environment wrappers in OmniGibson. In general, reset(), step(), and observation_spec() should
be overwritten
Args:
env (OmniGibsonEnv): The environment to wrap.
"""
def __init__(self, env):
self.env = env
# Run super
super().__init__(obj=env)
def step(self, action):
"""
By default, run the normal environment step() function
Args:
action (np.array): action to take in environment
Returns:
4-tuple:
- (dict) observations from the environment
- (float) reward from the environment
- (bool) whether the current episode is completed or not
- (dict) misc information
"""
return self.env.step(action)
def reset(self):
"""
By default, run the normal environment reset() function
Returns:
dict: Environment observation space after reset occurs
"""
return self.env.reset()
def observation_spec(self):
"""
By default, grabs the normal environment observation_spec
Returns:
dict: Observations from the environment
"""
return self.env.observation_spec()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("EnvironmentWrapper")
return classes
@classproperty
def _cls_registry(cls):
# Global robot registry
global REGISTERED_ENV_WRAPPERS
return REGISTERED_ENV_WRAPPERS
| 2,560 | Python | 27.455555 | 117 | 0.633984 |
StanfordVL/OmniGibson/omnigibson/reward_functions/potential_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
class PotentialReward(BaseRewardFunction):
"""
Potential reward
Assume task has get_potential implemented; Low potential is preferred
(e.g. a common potential for goal-directed task is the distance to goal)
Args:
potential_fcn (method): function for calculating potential. Function signature should be:
potential = potential_fcn(env)
where @env is a Environment instance, and @potential is a float value representing the calculated potential
r_potential (float): Reward weighting to give proportional to the potential difference calculated
in between env timesteps
"""
def __init__(self, potential_fcn, r_potential=1.0):
# Store internal vars
self._potential_fcn = potential_fcn
self._r_potential = r_potential
# Store internal vars that will be filled in at runtime
self._potential = None
# Run super
super().__init__()
def reset(self, task, env):
"""
Compute the initial potential after episode reset
:param task: task instance
:param env: environment instance
"""
# Reset potential
self._potential = self._potential_fcn(env)
def _step(self, task, env, action):
# Reward is proportional to the potential difference between the current and previous timestep
new_potential = self._potential_fcn(env)
reward = (self._potential - new_potential) * self._r_potential
# Update internal potential
self._potential = new_potential
return reward, {}
| 1,683 | Python | 32.019607 | 119 | 0.660131 |
StanfordVL/OmniGibson/omnigibson/reward_functions/__init__.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction, REGISTERED_REWARD_FUNCTIONS
from omnigibson.reward_functions.collision_reward import CollisionReward
from omnigibson.reward_functions.point_goal_reward import PointGoalReward
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.reward_functions.reaching_goal_reward import ReachingGoalReward
| 409 | Python | 67.333322 | 108 | 0.885086 |
StanfordVL/OmniGibson/omnigibson/reward_functions/reward_function_base.py | from abc import ABCMeta, abstractmethod
from copy import deepcopy
from omnigibson.utils.python_utils import classproperty, Registerable
REGISTERED_REWARD_FUNCTIONS = dict()
class BaseRewardFunction(Registerable, metaclass=ABCMeta):
"""
Base RewardFunction class
Reward-specific reset and get_reward methods are implemented in subclasses
"""
def __init__(self):
# Store internal vars that will be filled in at runtime
self._reward = None
self._info = None
@abstractmethod
def _step(self, task, env, action):
"""
Step the reward function and compute the reward at the current timestep. Overwritten by subclasses.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
2-tuple:
- bool: computed reward
- dict: any reward-related information for this specific reward
"""
raise NotImplementedError()
def step(self, task, env, action):
"""
Step the reward function and compute the reward at the current timestep.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
2-tuple:
- bool: computed reward
- dict: any reward-related information for this specific reward
"""
# Step internally and store output
self._reward, self._info = self._step(task=task, env=env, action=action)
# Return reward and a copy of the info
return self._reward, deepcopy(self._info)
def reset(self, task, env):
"""
Reward function-specific reset
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
"""
# Reset internal vars
self._reward = None
self._info = None
@property
def reward(self):
"""
Returns:
float: Current reward for this reward function
"""
assert self._reward is not None, "At least one step() must occur before reward can be calculated!"
return self._reward
@property
def info(self):
"""
Returns:
dict: Current info for this reward function
"""
assert self._info is not None, "At least one step() must occur before info can be calculated!"
return self._info
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseRewardFunction")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_REWARD_FUNCTIONS
return REGISTERED_REWARD_FUNCTIONS
| 3,066 | Python | 30.618556 | 107 | 0.61546 |
StanfordVL/OmniGibson/omnigibson/reward_functions/point_goal_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
class PointGoalReward(BaseRewardFunction):
"""
Point goal reward
Success reward for reaching the goal with the robot's base
Args:
pointgoal (PointGoal): Termination condition for checking whether a point goal is reached
r_pointgoal (float): Reward for reaching the point goal
"""
def __init__(self, pointgoal, r_pointgoal=10.0):
# Store internal vars
self._pointgoal = pointgoal
self._r_pointgoal = r_pointgoal
# Run super
super().__init__()
def _step(self, task, env, action):
# Reward received the pointgoal success condition is met
reward = self._r_pointgoal if self._pointgoal.success else 0.0
return reward, {}
| 811 | Python | 29.074073 | 97 | 0.664612 |
StanfordVL/OmniGibson/omnigibson/reward_functions/collision_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
from omnigibson.object_states.contact_bodies import ContactBodies
class CollisionReward(BaseRewardFunction):
"""
Collision reward
Penalize robot collision. Typically collision_reward_weight is negative. Note that we ignore collisions with any
floor objects.
Args:
robot_idn (int): robot identifier to evaluate collision penalty with. Default is 0, corresponding to the first
robot added to the scene
ignore_self_collisions (bool): Whether to ignore robot self-collisions or not
r_collision (float): Penalty value (>0) to penalize collisions
"""
def __init__(self, robot_idn=0, ignore_self_collisions=True, r_collision=0.1):
# Store internal vars
assert r_collision > 0, f"r_collision must be positive, got: {r_collision}!"
self._robot_idn = robot_idn
self._ignore_self_collisions = ignore_self_collisions
self._r_collision = r_collision
# Run super
super().__init__()
def _step(self, task, env, action):
# Penalty is Reward is -self._r_collision if there were any collisions in the last timestep
robot = env.robots[self._robot_idn]
# Ignore floors and potentially robot's own prims as well
floors = list(env.scene.object_registry("category", "floors", []))
ignore_objs = floors if self._ignore_self_collisions is None else floors + [robot]
in_contact = len(env.robots[self._robot_idn].states[ContactBodies].get_value(ignore_objs=tuple(ignore_objs))) > 0
reward = float(in_contact) * -self._r_collision
return reward, {}
| 1,694 | Python | 44.81081 | 121 | 0.681818 |
StanfordVL/OmniGibson/omnigibson/reward_functions/reaching_goal_reward.py | from omnigibson.reward_functions.reward_function_base import BaseRewardFunction
import omnigibson.utils.transform_utils as T
class ReachingGoalReward(BaseRewardFunction):
"""
Reaching goal reward
Success reward for reaching the goal with the robot's end-effector
Args:
robot_idn (int): robot identifier to evaluate point goal with. Default is 0, corresponding to the first
robot added to the scene
r_reach (float): reward for succeeding to reach the goal
distance_tol (float): Distance (m) tolerance between goal position and @robot_idn's robot eef position
that is accepted as a success
"""
def __init__(self, robot_idn=0, r_reach=10.0, distance_tol=0.1):
# Store internal vars
self._robot_idn = robot_idn
self._r_reach = r_reach
self._distance_tol = distance_tol
# Run super
super().__init__()
def _step(self, task, env, action):
# Sparse reward is received if distance between robot_idn robot's eef and goal is below the distance threshold
success = T.l2_distance(env.robots[self._robot_idn].get_eef_position(), task.goal_pos) < \
self._distance_tol
reward = self._r_reach if success else 0.0
return reward, {}
| 1,289 | Python | 36.941175 | 118 | 0.660978 |
StanfordVL/OmniGibson/omnigibson/tasks/point_reaching_task.py | import numpy as np
from omnigibson.tasks.point_navigation_task import PointNavigationTask
from omnigibson.termination_conditions.point_goal import PointGoal
import omnigibson.utils.transform_utils as T
# Valid point navigation reward types
POINT_NAVIGATION_REWARD_TYPES = {"l2", "geodesic"}
class PointReachingTask(PointNavigationTask):
"""
Point Reaching Task
The goal is to reach a random goal position with the robot's end effector
Args:
robot_idn (int): Which robot that this task corresponds to
floor (int): Which floor to navigate on
initial_pos (None or 3-array): If specified, should be (x,y,z) global initial position to place the robot
at the start of each task episode. If None, a collision-free value will be randomly sampled
initial_quat (None or 3-array): If specified, should be (r,p,y) global euler orientation to place the robot
at the start of each task episode. If None, a value will be randomly sampled about the z-axis
goal_pos (None or 3-array): If specified, should be (x,y,z) global goal position to reach for the given task
episode. If None, a collision-free value will be randomly sampled
goal_tolerance (float): Distance between goal position and current position below which is considered a task
success
goal_in_polar (bool): Whether to represent the goal in polar coordinates or not when capturing task observations
path_range (None or 2-array): If specified, should be (min, max) values representing the range of valid
total path lengths that are valid when sampling initial / goal positions
height_range (None or 2-array): If specified, should be (min, max) values representing the range of valid
total heights that are valid when sampling goal positions
visualize_goal (bool): Whether to visualize the initial / goal locations
visualize_path (bool): Whether to visualize the path from initial to goal location, as represented by
discrete waypoints
goal_height (float): If visualizing, specifies the height of the visual goals (m)
waypoint_height (float): If visualizing, specifies the height of the visual waypoints (m)
waypoint_width (float): If visualizing, specifies the width of the visual waypoints (m)
n_vis_waypoints (int): If visualizing, specifies the number of waypoints to generate
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(
self,
robot_idn=0,
floor=0,
initial_pos=None,
initial_quat=None,
goal_pos=None,
goal_tolerance=0.1,
goal_in_polar=False,
path_range=None,
height_range=None,
visualize_goal=False,
visualize_path=False,
goal_height=0.06,
waypoint_height=0.05,
waypoint_width=0.1,
n_vis_waypoints=10,
reward_config=None,
termination_config=None,
):
# Store inputs
self._height_range = height_range
# Run super
super().__init__(
robot_idn=robot_idn,
floor=floor,
initial_pos=initial_pos,
initial_quat=initial_quat,
goal_pos=goal_pos,
goal_tolerance=goal_tolerance,
goal_in_polar=goal_in_polar,
path_range=path_range,
visualize_goal=visualize_goal,
visualize_path=visualize_path,
goal_height=goal_height,
waypoint_height=waypoint_height,
waypoint_width=waypoint_width,
n_vis_waypoints=n_vis_waypoints,
reward_type="l2", # Must use l2 for reaching task
reward_config=reward_config,
termination_config=termination_config,
)
def _create_termination_conditions(self):
# Run super first
terminations = super()._create_termination_conditions()
# We replace the pointgoal condition with a new one, specifying xyz instead of only xy as the axes to measure
# distance to the goal
terminations["pointgoal"] = PointGoal(
robot_idn=self._robot_idn,
distance_tol=self._goal_tolerance,
distance_axes="xyz",
)
return terminations
def _sample_initial_pose_and_goal_pos(self, env, max_trials=100):
# Run super first
initial_pos, initial_ori, goal_pos = super()._sample_initial_pose_and_goal_pos(env=env, max_trials=max_trials)
# Sample goal position to be within requested height range if specified
if self._height_range is not None:
goal_pos[2] += np.random.uniform(*self._height_range)
return initial_pos, initial_ori, goal_pos
def _get_l2_potential(self, env):
# Distance calculated from robot EEF, not base!
return T.l2_distance(env.robots[self._robot_idn].get_eef_position(), self._goal_pos)
def _get_obs(self, env):
# Get obs from super
low_dim_obs, obs = super()._get_obs(env=env)
# Remove xy-pos and replace with full xyz relative distance between current and goal pos
low_dim_obs.pop("xy_pos_to_goal")
low_dim_obs["eef_to_goal"] = self._global_pos_to_robot_frame(env=env, pos=self._goal_pos)
# Add local eef position as well
low_dim_obs["eef_local_pos"] = self._global_pos_to_robot_frame(env=env, pos=env.robots[self._robot_idn].get_eef_position())
return low_dim_obs, obs
def get_current_pos(self, env):
# Current position is the robot's EEF, not base!
return env.robots[self._robot_idn].get_eef_position()
| 6,615 | Python | 46.942029 | 131 | 0.654271 |
StanfordVL/OmniGibson/omnigibson/tasks/task_base.py | from abc import ABCMeta, abstractmethod
from copy import deepcopy
import numpy as np
from omnigibson.utils.python_utils import classproperty, Registerable
from omnigibson.utils.gym_utils import GymObservable
REGISTERED_TASKS = dict()
class BaseTask(GymObservable, Registerable, metaclass=ABCMeta):
"""
Base Task class.
Task-specific reset_scene, reset_agent, step methods are implemented in subclasses
Args:
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(self, termination_config=None, reward_config=None):
# Make sure configs are dictionaries
termination_config = dict() if termination_config is None else termination_config
reward_config = dict() if reward_config is None else reward_config
# Sanity check termination and reward conditions -- any keys found in the inputted config but NOT
# found in the default config should raise an error
unknown_termination_keys = set(termination_config.keys()) - set(self.default_termination_config.keys())
assert len(unknown_termination_keys) == 0, \
f"Got unknown termination config keys inputted: {unknown_termination_keys}"
unknown_reward_keys = set(reward_config.keys()) - set(self.default_reward_config.keys())
assert len(unknown_reward_keys) == 0, f"Got unknown reward config keys inputted: {unknown_reward_keys}"
# Combine with defaults and store internally
self._termination_config = self.default_termination_config
self._termination_config.update(termination_config)
self._reward_config = self.default_reward_config
self._reward_config.update(reward_config)
# Generate reward and termination functions
self._termination_conditions = self._create_termination_conditions()
self._reward_functions = self._create_reward_functions()
# Store other internal vars that will be populated at runtime
self._loaded = False
self._reward = None
self._done = None
self._success = None
self._info = None
self._low_dim_obs_dim = None
# Run super init
super().__init__()
@abstractmethod
def _load(self, env):
"""
Load this task. Should be implemented by subclass. Can include functionality, e.g.: loading dynamic objects
into the environment
"""
raise NotImplementedError()
@abstractmethod
def _load_non_low_dim_observation_space(self):
"""
Loads any non-low dim observation spaces for this task.
Returns:
dict: Keyword-mapped observation space for this object mapping non low dim task observation name to
observation space
"""
raise NotImplementedError()
@classmethod
def verify_scene_and_task_config(cls, scene_cfg, task_cfg):
"""
Runs any necessary sanity checks on the scene and task configs passed; and possibly modifies them in-place
Args:
scene_cfg (dict): Scene configuration
task_cfg (dict): Task configuration
"""
# Default is no-op
pass
def _load_observation_space(self):
# Create the non low dim obs space
obs_space = self._load_non_low_dim_observation_space()
# Create the low dim obs space and add to the main obs space dict -- make sure we're flattening low dim obs
obs_space["low_dim"] = self._build_obs_box_space(shape=(self._low_dim_obs_dim,), low=-np.inf, high=np.inf, dtype=np.float64)
return obs_space
def load(self, env):
"""
Load this task
"""
# Make sure the scene is of the correct type!
assert any([issubclass(env.scene.__class__, valid_cls) for valid_cls in self.valid_scene_types]), \
f"Got incompatible scene type {env.scene.__class__.__name__} for task {self.__class__.__name__}! " \
f"Scene class must be a subclass of at least one of: " \
f"{[cls_type.__name__ for cls_type in self.valid_scene_types]}"
# Run internal method
self._load(env=env)
# We're now initialized
self._loaded = True
@abstractmethod
def _create_termination_conditions(self):
"""
Creates the termination functions in the environment
Returns:
dict of BaseTerminationCondition: Termination functions created for this task
"""
raise NotImplementedError()
@abstractmethod
def _create_reward_functions(self):
"""
Creates the reward functions in the environment
Returns:
dict of BaseRewardFunction: Reward functions created for this task
"""
raise NotImplementedError()
def _reset_scene(self, env):
"""
Task-specific scene reset. Default is the normal scene reset
Args:
env (Environment): environment instance
"""
env.scene.reset()
def _reset_agent(self, env):
"""
Task-specific agent reset
Args:
env (Environment): environment instance
"""
# Default is no-op
pass
def _reset_variables(self, env):
"""
Task-specific internal variable reset
Args:
env (Environment): environment instance
"""
# By default, reset reward, done, and info
self._reward = None
self._done = False
self._success = False
self._info = None
def reset(self, env):
"""
Resets this task in the environment
Args:
env (Environment): environment instance to reset
"""
# Reset the scene, agent, and variables
self._reset_scene(env)
self._reset_agent(env)
self._reset_variables(env)
# Also reset all termination conditions and reward functions
for termination_condition in self._termination_conditions.values():
termination_condition.reset(self, env)
for reward_function in self._reward_functions.values():
reward_function.reset(self, env)
# Fill in low dim obs dim so we can use this to create the observation space later
self._low_dim_obs_dim = len(self.get_obs(env=env, flatten_low_dim=True)["low_dim"])
def _step_termination(self, env, action, info=None):
"""
Step and aggregate termination conditions
Args:
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
info (None or dict): Any info to return
Returns:
2-tuple:
- float: aggregated termination at the current timestep
- dict: any information passed through this function or generated by this function
"""
# Get all dones and successes from individual termination conditions
dones = []
successes = []
for termination_condition in self._termination_conditions.values():
d, s = termination_condition.step(self, env, action)
dones.append(d)
successes.append(s)
# Any True found corresponds to a done / success
done = sum(dones) > 0
success = sum(successes) > 0
# Populate info
info = dict() if info is None else info
info["success"] = success
return done, info
def _step_reward(self, env, action, info=None):
"""
Step and aggregate reward functions
Args:
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
info (None or dict): Any info to return
Returns:
2-tuple:
- float: aggregated reward at the current timestep
- dict: any information passed through this function or generated by this function
"""
# Make sure info is a dict
total_info = dict() if info is None else info
# We'll also store individual reward split as well
breakdown_dict = dict()
# Aggregate rewards over all reward functions
total_reward = 0.0
for reward_name, reward_function in self._reward_functions.items():
reward, reward_info = reward_function.step(self, env, action)
total_reward += reward
breakdown_dict[reward_name] = reward
total_info[reward_name] = reward_info
# Store breakdown dict
total_info["reward_breakdown"] = breakdown_dict
return total_reward, total_info
@abstractmethod
def _get_obs(self, env):
"""
Get task-specific observation
Args:
env (Environment): Environment instance
Returns:
2-tuple:
- dict: Keyword-mapped low dimensional observations from this task
- dict: All other keyword-mapped observations from this task
"""
raise NotImplementedError()
def _flatten_low_dim_obs(self, obs):
"""
Flattens dictionary containing low-dimensional observations @obs and converts it from a dictionary into a
1D numpy array
Args:
obs (dict): Low-dim observation dictionary where each value is a 1D array
Returns:
n-array: 1D-numpy array of flattened low-dim observations
"""
# By default, we simply concatenate all values in our obs dict
return np.concatenate([ob for ob in obs.values()]) if len(obs.values()) > 0 else np.array([])
def get_obs(self, env, flatten_low_dim=True):
# Args: env (Environment): environment instance
# Args: flatten_low_dim (bool): Whether to flatten low-dimensional observations
# Grab obs internally
low_dim_obs, obs = self._get_obs(env=env)
# Possibly flatten low dim and add to main observation dictionary
obs["low_dim"] = self._flatten_low_dim_obs(obs=low_dim_obs) if flatten_low_dim else low_dim_obs
return obs
def step(self, env, action):
"""
Perform task-specific step for every timestep
Args:
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
3-tuple:
- float: reward calculated after this step
- bool: whether task is done or not
- dict: nested dictionary of reward- and done-related info
"""
# Make sure we're initialized
assert self._loaded, "Task must be loaded using load() before calling step()!"
# We calculate termination conditions first and then rewards
# (since some rewards can rely on termination conditions to update)
done, done_info = self._step_termination(env=env, action=action)
reward, reward_info = self._step_reward(env=env, action=action)
# Update the internal state of this task
self._reward = reward
self._done = done
self._success = done_info["success"]
self._info = {
"reward": reward_info,
"done": done_info,
}
return self._reward, self._done, deepcopy(self._info)
@property
def name(self):
"""
Returns:
str: Name of this task. Defaults to class name
"""
return self.__class__.__name__
@property
def reward(self):
"""
Returns:
float: Current reward for this task
"""
assert self._reward is not None, "At least one step() must occur before reward can be calculated!"
return self._reward
@property
def done(self):
"""
Returns:
bool: Whether this task is done or not
"""
assert self._done is not None, "At least one step() must occur before done can be calculated!"
return self._done
@property
def success(self):
"""
Returns:
bool: Whether this task has succeeded or not
"""
assert self._success is not None, "At least one step() must occur before success can be calculated!"
return self._success
@property
def info(self):
"""
Returns:
dict: Nested dictionary of information for this task, including reward- and done-specific information
"""
assert self._info is not None, "At least one step() must occur before info can be calculated!"
return self._info
@classproperty
def valid_scene_types(cls):
"""
Returns:
set of Scene: Scene type(s) that are valid (i.e.: compatible) with this specific task. This will be
used to sanity check the task + scene combination at runtime
"""
raise NotImplementedError()
@classproperty
def default_reward_config(cls):
"""
Returns:
dict: Default reward configuration for this class. Should include any kwargs necessary for
any of the reward classes generated in self._create_rewards(). Note: this default config
should be fully verbose -- any keys inputted in the constructor but NOT found in this default config
will raise an error!
"""
raise NotImplementedError()
@classproperty
def default_termination_config(cls):
"""
Returns:
dict: Default termination configuration for this class. Should include any kwargs necessary for
any of the termination classes generated in self._create_terminations(). Note: this default config
should be fully verbose -- any keys inputted in the constructor but NOT found in this default config
will raise an error!
"""
raise NotImplementedError()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseTask")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_TASKS
return REGISTERED_TASKS
| 15,225 | Python | 36.046229 | 132 | 0.622266 |
StanfordVL/OmniGibson/omnigibson/tasks/point_navigation_task.py | import numpy as np
import omnigibson as og
from omnigibson.object_states import Pose
from omnigibson.objects.primitive_object import PrimitiveObject
from omnigibson.reward_functions.collision_reward import CollisionReward
from omnigibson.reward_functions.point_goal_reward import PointGoalReward
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.scenes.traversable_scene import TraversableScene
from omnigibson.tasks.task_base import BaseTask
from omnigibson.termination_conditions.max_collision import MaxCollision
from omnigibson.termination_conditions.falling import Falling
from omnigibson.termination_conditions.point_goal import PointGoal
from omnigibson.termination_conditions.timeout import Timeout
from omnigibson.utils.python_utils import classproperty, assert_valid_key
from omnigibson.utils.sim_utils import land_object, test_valid_pose
import omnigibson.utils.transform_utils as T
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Valid point navigation reward types
POINT_NAVIGATION_REWARD_TYPES = {"l2", "geodesic"}
class PointNavigationTask(BaseTask):
"""
Point Navigation Task
The task is to navigate to a goal position
Args:
robot_idn (int): Which robot that this task corresponds to
floor (int): Which floor to navigate on
initial_pos (None or 3-array): If specified, should be (x,y,z) global initial position to place the robot
at the start of each task episode. If None, a collision-free value will be randomly sampled
initial_quat (None or 4-array): If specified, should be (x,y,z,w) global quaternion orientation to place the
robot at the start of each task episode. If None, a value will be randomly sampled about the z-axis
goal_pos (None or 3-array): If specified, should be (x,y,z) global goal position to reach for the given task
episode. If None, a collision-free value will be randomly sampled
goal_tolerance (float): Distance between goal position and current position below which is considered a task
success
goal_in_polar (bool): Whether to represent the goal in polar coordinates or not when capturing task observations
path_range (None or 2-array): If specified, should be (min, max) values representing the range of valid
total path lengths that are valid when sampling initial / goal positions
visualize_goal (bool): Whether to visualize the initial / goal locations
visualize_path (bool): Whether to visualize the path from initial to goal location, as represented by
discrete waypoints
goal_height (float): If visualizing, specifies the height of the visual goals (m)
waypoint_height (float): If visualizing, specifies the height of the visual waypoints (m)
waypoint_width (float): If visualizing, specifies the width of the visual waypoints (m)
n_vis_waypoints (int): If visualizing, specifies the number of waypoints to generate
reward_type (str): Type of reward to use. Valid options are: {"l2", "geodesic"}
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(
self,
robot_idn=0,
floor=0,
initial_pos=None,
initial_quat=None,
goal_pos=None,
goal_tolerance=0.5,
goal_in_polar=False,
path_range=None,
visualize_goal=False,
visualize_path=False,
goal_height=0.06,
waypoint_height=0.05,
waypoint_width=0.1,
n_vis_waypoints=10,
reward_type="l2",
termination_config=None,
reward_config=None,
):
# Store inputs
self._robot_idn = robot_idn
self._floor = floor
self._initial_pos = initial_pos if initial_pos is None else np.array(initial_pos)
self._initial_quat = initial_quat if initial_quat is None else np.array(initial_quat)
self._goal_pos = goal_pos if goal_pos is None else np.array(goal_pos)
self._goal_tolerance = goal_tolerance
self._goal_in_polar = goal_in_polar
self._path_range = path_range
self._randomize_initial_pos = initial_pos is None
self._randomize_initial_quat = initial_quat is None
self._randomize_goal_pos = goal_pos is None
self._visualize_goal = visualize_goal
self._visualize_path = visualize_path
self._goal_height = goal_height
self._waypoint_height = waypoint_height
self._waypoint_width = waypoint_width
self._n_vis_waypoints = n_vis_waypoints
assert_valid_key(key=reward_type, valid_keys=POINT_NAVIGATION_REWARD_TYPES, name="reward type")
self._reward_type = reward_type
# Create other attributes that will be filled in at runtime
self._initial_pos_marker = None
self._goal_pos_marker = None
self._waypoint_markers = None
self._path_length = None
self._current_robot_pos = None
self._geodesic_dist = None
# Run super
super().__init__(termination_config=termination_config, reward_config=reward_config)
def _create_termination_conditions(self):
# Initialize termination conditions dict and fill in with MaxCollision, Timeout, Falling, and PointGoal
terminations = dict()
terminations["max_collision"] = MaxCollision(max_collisions=self._termination_config["max_collisions"])
terminations["timeout"] = Timeout(max_steps=self._termination_config["max_steps"])
terminations["falling"] = Falling(robot_idn=self._robot_idn, fall_height=self._termination_config["fall_height"])
terminations["pointgoal"] = PointGoal(
robot_idn=self._robot_idn,
distance_tol=self._goal_tolerance,
distance_axes="xy",
)
return terminations
def _create_reward_functions(self):
# Initialize reward functions dict and fill in with Potential, Collision, and PointGoal rewards
rewards = dict()
rewards["potential"] = PotentialReward(
potential_fcn=self.get_potential,
r_potential=self._reward_config["r_potential"],
)
rewards["collision"] = CollisionReward(r_collision=self._reward_config["r_collision"])
rewards["pointgoal"] = PointGoalReward(
pointgoal=self._termination_conditions["pointgoal"],
r_pointgoal=self._reward_config["r_pointgoal"],
)
return rewards
def _load(self, env):
# Load visualization
self._load_visualization_markers(env=env)
# Auto-initialize all markers
og.sim.play()
env.scene.reset()
env.scene.update_initial_state()
og.sim.stop()
def _load_visualization_markers(self, env):
"""
Load visualization, such as initial and target position, shortest path, etc
Args:
env (Environment): Active environment instance
"""
if self._visualize_goal:
self._initial_pos_marker = PrimitiveObject(
prim_path="/World/task_initial_pos_marker",
primitive_type="Cylinder",
name="task_initial_pos_marker",
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=np.array([1, 0, 0, 0.3]),
)
self._goal_pos_marker = PrimitiveObject(
prim_path="/World/task_goal_pos_marker",
primitive_type="Cylinder",
name="task_goal_pos_marker",
radius=self._goal_tolerance,
height=self._goal_height,
visual_only=True,
rgba=np.array([0, 0, 1, 0.3]),
)
# Load the objects into the simulator
og.sim.import_object(self._initial_pos_marker)
og.sim.import_object(self._goal_pos_marker)
# Additionally generate waypoints along the path if we're building the map in the environment
if self._visualize_path:
waypoints = []
for i in range(self._n_vis_waypoints):
waypoint = PrimitiveObject(
prim_path=f"/World/task_waypoint_marker{i}",
primitive_type="Cylinder",
name=f"task_waypoint_marker{i}",
radius=self._waypoint_width,
height=self._waypoint_height,
visual_only=True,
rgba=np.array([0, 1, 0, 0.3]),
)
og.sim.import_object(waypoint)
waypoints.append(waypoint)
# Store waypoints
self._waypoint_markers = waypoints
def _sample_initial_pose_and_goal_pos(self, env, max_trials=100):
"""
Potentially sample the robot initial pos / ori and target pos, based on whether we're using randomized
initial and goal states. If not randomzied, then this value will return the corresponding values inputted
during this task initialization.
Args:
env (Environment): Environment instance
max_trials (int): Number of trials to attempt to sample valid poses and positions
Returns:
3-tuple:
- 3-array: (x,y,z) global sampled initial position
- 4-array: (x,y,z,w) global sampled initial orientation in quaternion form
- 3-array: (x,y,z) global sampled goal position
"""
# Possibly sample initial pos
if self._randomize_initial_pos:
_, initial_pos = env.scene.get_random_point(floor=self._floor, robot=env.robots[self._robot_idn])
else:
initial_pos = self._initial_pos
# Possibly sample initial ori
initial_quat = T.euler2quat(np.array([0, 0, np.random.uniform(0, np.pi * 2)])) if \
self._randomize_initial_quat else self._initial_quat
# Possibly sample goal pos
if self._randomize_goal_pos:
dist, in_range_dist = 0.0, False
for _ in range(max_trials):
_, goal_pos = env.scene.get_random_point(floor=self._floor,
reference_point=initial_pos,
robot=env.robots[self._robot_idn])
_, dist = env.scene.get_shortest_path(self._floor, initial_pos[:2], goal_pos[:2], entire_path=False, robot=env.robots[self._robot_idn])
# If a path range is specified, make sure distance is valid
if dist is not None and (self._path_range is None or self._path_range[0] < dist < self._path_range[1]):
in_range_dist = True
break
# Notify if we weren't able to get a valid start / end point sampled in the requested range
if not in_range_dist:
log.warning("Failed to sample initial and target positions within requested path range")
else:
goal_pos = self._goal_pos
# Add additional logging info
log.info("Sampled initial pose: {}, {}".format(initial_pos, initial_quat))
log.info("Sampled goal position: {}".format(goal_pos))
return initial_pos, initial_quat, goal_pos
def _get_geodesic_potential(self, env):
"""
Get potential based on geodesic distance
Args:
env: environment instance
Returns:
float: geodesic distance to the target position
"""
_, geodesic_dist = self.get_shortest_path_to_goal(env=env)
return geodesic_dist
def _get_l2_potential(self, env):
"""
Get potential based on L2 distance
Args:
env: environment instance
Returns:
float: L2 distance to the target position
"""
return T.l2_distance(env.robots[self._robot_idn].states[Pose].get_value()[0][:2], self._goal_pos[:2])
def get_potential(self, env):
"""
Compute task-specific potential: distance to the goal
Args:
env (Environment): Environment instance
Returns:
float: Computed potential
"""
if self._reward_type == "l2":
reward = self._get_l2_potential(env)
elif self._reward_type == "geodesic":
reward = self._get_geodesic_potential(env)
else:
raise ValueError(f"Invalid reward type! {self._reward_type}")
return reward
def _reset_agent(self, env):
# Reset agent
env.robots[self._robot_idn].reset()
# We attempt to sample valid initial poses and goal positions
success, max_trials = False, 100
initial_pos, initial_quat, goal_pos = None, None, None
for i in range(max_trials):
initial_pos, initial_quat, goal_pos = self._sample_initial_pose_and_goal_pos(env)
# Make sure the sampled robot start pose and goal position are both collision-free
success = test_valid_pose(
env.robots[self._robot_idn], initial_pos, initial_quat, env.initial_pos_z_offset
) and test_valid_pose(env.robots[self._robot_idn], goal_pos, None, env.initial_pos_z_offset)
# Don't need to continue iterating if we succeeded
if success:
break
# Notify user if we failed to reset a collision-free sampled pose
if not success:
log.warning("Failed to reset robot without collision")
# Land the robot
land_object(env.robots[self._robot_idn], initial_pos, initial_quat, env.initial_pos_z_offset)
# Store the sampled values internally
self._initial_pos = initial_pos
self._initial_quat = initial_quat
self._goal_pos = goal_pos
# Update visuals if requested
if self._visualize_goal:
self._initial_pos_marker.set_position(self._initial_pos)
self._goal_pos_marker.set_position(self._goal_pos)
def _reset_variables(self, env):
# Run super first
super()._reset_variables(env=env)
# Reset internal variables
self._path_length = 0.0
self._current_robot_pos = self._initial_pos
self._geodesic_dist = self._get_geodesic_potential(env)
def _step_termination(self, env, action, info=None):
# Run super first
done, info = super()._step_termination(env=env, action=action, info=info)
# Add additional info
info["path_length"] = self._path_length
info["spl"] = float(info["success"]) * min(1.0, self._geodesic_dist / self._path_length) if done and self._path_length != 0.0 else 0.0
return done, info
def _global_pos_to_robot_frame(self, env, pos):
"""
Convert a 3D point in global frame to agent's local frame
Args:
env (TraversableEnv): Environment instance
pos (3-array): global (x,y,z) position
Returns:
3-array: (x,y,z) position in self._robot_idn agent's local frame
"""
delta_pos_global = np.array(pos) - env.robots[self._robot_idn].states[Pose].get_value()[0]
return T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T @ delta_pos_global
def _get_obs(self, env):
# Get relative position of goal with respect to the current agent position
xy_pos_to_goal = self._global_pos_to_robot_frame(env, self._goal_pos)[:2]
if self._goal_in_polar:
xy_pos_to_goal = np.array(T.cartesian_to_polar(*xy_pos_to_goal))
# linear velocity and angular velocity
ori_t = T.quat2mat(env.robots[self._robot_idn].states[Pose].get_value()[1]).T
lin_vel = ori_t @ env.robots[self._robot_idn].get_linear_velocity()
ang_vel = ori_t @ env.robots[self._robot_idn].get_angular_velocity()
# Compose observation dict
low_dim_obs = dict(
xy_pos_to_goal=xy_pos_to_goal,
robot_lin_vel=lin_vel,
robot_ang_vel=ang_vel,
)
# We have no non-low-dim obs, so return empty dict for those
return low_dim_obs, dict()
def _load_non_low_dim_observation_space(self):
# No non-low dim observations so we return an empty dict
return dict()
def get_goal_pos(self):
"""
Returns:
3-array: (x,y,z) global current goal position
"""
return self._goal_pos
def get_current_pos(self, env):
"""
Returns:
3-array: (x,y,z) global current position representing the robot
"""
return env.robots[self._robot_idn].states[Pose].get_value()[0]
def get_shortest_path_to_goal(self, env, start_xy_pos=None, entire_path=False):
"""
Get the shortest path and geodesic distance from @start_pos to the target position
Args:
env (TraversableEnv): Environment instance
start_xy_pos (None or 2-array): If specified, should be the global (x,y) start position from which
to calculate the shortest path to the goal position. If None (default), the robot's current xy position
will be used
entire_path (bool): Whether to return the entire shortest path
Returns:
2-tuple:
- list of 2-array: List of (x,y) waypoints representing the path # TODO: is this true?
- float: geodesic distance of the path to the goal position
"""
start_xy_pos = env.robots[self._robot_idn].states[Pose].get_value()[0][:2] if start_xy_pos is None else start_xy_pos
return env.scene.get_shortest_path(self._floor, start_xy_pos, self._goal_pos[:2], entire_path=entire_path, robot=env.robots[self._robot_idn])
def _step_visualization(self, env):
"""
Step visualization
Args:
env (Environment): Environment instance
"""
if self._visualize_path:
shortest_path, _ = self.get_shortest_path_to_goal(env=env, entire_path=True)
floor_height = env.scene.get_floor_height(self._floor)
num_nodes = min(self._n_vis_waypoints, shortest_path.shape[0])
for i in range(num_nodes):
self._waypoint_markers[i].set_position(
position=np.array([shortest_path[i][0], shortest_path[i][1], floor_height])
)
for i in range(num_nodes, self._n_vis_waypoints):
self._waypoint_markers[i].set_position(position=np.array([0.0, 0.0, 100.0]))
def step(self, env, action):
# Run super method first
reward, done, info = super().step(env=env, action=action)
# Step visualization
self._step_visualization(env=env)
# Update other internal variables
new_robot_pos = env.robots[self._robot_idn].states[Pose].get_value()[0]
self._path_length += T.l2_distance(self._current_robot_pos[:2], new_robot_pos[:2])
self._current_robot_pos = new_robot_pos
return reward, done, info
@classproperty
def valid_scene_types(cls):
# Must be a traversable scene
return {TraversableScene}
@classproperty
def default_termination_config(cls):
return {
"max_collisions": 500,
"max_steps": 500,
"fall_height": 0.03,
}
@classproperty
def default_reward_config(cls):
return {
"r_potential": 1.0,
"r_collision": 0.1,
"r_pointgoal": 10.0,
}
| 20,626 | Python | 42.06263 | 151 | 0.617667 |
StanfordVL/OmniGibson/omnigibson/tasks/__init__.py | from omnigibson.tasks.task_base import REGISTERED_TASKS
from omnigibson.tasks.dummy_task import DummyTask
from omnigibson.tasks.point_navigation_task import PointNavigationTask
from omnigibson.tasks.point_reaching_task import PointReachingTask
from omnigibson.tasks.behavior_task import BehaviorTask
| 300 | Python | 49.166658 | 70 | 0.873333 |
StanfordVL/OmniGibson/omnigibson/tasks/dummy_task.py | import numpy as np
from omnigibson.tasks.task_base import BaseTask
from omnigibson.scenes.scene_base import Scene
from omnigibson.utils.python_utils import classproperty
from omnigibson.utils.sim_utils import land_object
class DummyTask(BaseTask):
"""
Dummy task
"""
def _load(self, env):
# Do nothing here
pass
def _create_termination_conditions(self):
# Do nothing
return dict()
def _create_reward_functions(self):
# Do nothing
return dict()
def _get_obs(self, env):
# No task-specific obs of any kind
return dict(), dict()
def _load_non_low_dim_observation_space(self):
# No non-low dim observations so we return an empty dict
return dict()
@classproperty
def valid_scene_types(cls):
# Any scene works
return {Scene}
@classproperty
def default_termination_config(cls):
# Empty dict
return {}
@classproperty
def default_reward_config(cls):
# Empty dict
return {}
| 1,060 | Python | 21.104166 | 64 | 0.627358 |
StanfordVL/OmniGibson/omnigibson/tasks/behavior_task.py | import numpy as np
import os
from bddl.activity import (
Conditions,
evaluate_goal_conditions,
get_goal_conditions,
get_ground_goal_state_options,
get_natural_initial_conditions,
get_initial_conditions,
get_natural_goal_conditions,
get_object_scope,
)
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.object_states import Pose
from omnigibson.reward_functions.potential_reward import PotentialReward
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.systems.system_base import get_system, add_callback_on_system_init, add_callback_on_system_clear, \
REGISTERED_SYSTEMS
from omnigibson.scenes.scene_base import Scene
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
from omnigibson.utils.bddl_utils import OmniGibsonBDDLBackend, BDDLEntity, BEHAVIOR_ACTIVITIES, BDDLSampler
from omnigibson.tasks.task_base import BaseTask
from omnigibson.termination_conditions.predicate_goal import PredicateGoal
from omnigibson.termination_conditions.timeout import Timeout
import omnigibson.utils.transform_utils as T
from omnigibson.utils.python_utils import classproperty, assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class BehaviorTask(BaseTask):
"""
Task for BEHAVIOR
Args:
activity_name (None or str): Name of the Behavior Task to instantiate
activity_definition_id (int): Specification to load for the desired task. For a given Behavior Task, multiple task
specifications can be used (i.e.: differing goal conditions, or "ways" to complete a given task). This
ID determines which specification to use
activity_instance_id (int): Specific pre-configured instance of a scene to load for this BehaviorTask. This
will be used only if @online_object_sampling is False.
predefined_problem (None or str): If specified, specifies the raw string definition of the Behavior Task to
load. This will automatically override @activity_name and @activity_definition_id.
online_object_sampling (bool): whether to sample object locations online at runtime or not
debug_object_sampling (bool): whether to debug placement functionality
highlight_task_relevant_objects (bool): whether to overlay task-relevant objects in the scene with a colored mask
termination_config (None or dict): Keyword-mapped configuration to use to generate termination conditions. This
should be specific to the task class. Default is None, which corresponds to a default config being usd.
Note that any keyword required by a specific task class but not specified in the config will automatically
be filled in with the default config. See cls.default_termination_config for default values used
reward_config (None or dict): Keyword-mapped configuration to use to generate reward functions. This should be
specific to the task class. Default is None, which corresponds to a default config being usd. Note that
any keyword required by a specific task class but not specified in the config will automatically be filled
in with the default config. See cls.default_reward_config for default values used
"""
def __init__(
self,
activity_name=None,
activity_definition_id=0,
activity_instance_id=0,
predefined_problem=None,
online_object_sampling=False,
debug_object_sampling=False,
highlight_task_relevant_objects=False,
termination_config=None,
reward_config=None,
):
# Make sure object states are enabled
assert gm.ENABLE_OBJECT_STATES, "Must set gm.ENABLE_OBJECT_STATES=True in order to use BehaviorTask!"
# Make sure task name is valid if not specifying a predefined problem
if predefined_problem is None:
assert activity_name is not None, \
"Activity name must be specified if no predefined_problem is specified for BehaviorTask!"
assert_valid_key(key=activity_name, valid_keys=BEHAVIOR_ACTIVITIES, name="Behavior Task")
else:
# Infer activity name
activity_name = predefined_problem.split("problem ")[-1].split("-")[0]
# Initialize relevant variables
# BDDL
self.backend = OmniGibsonBDDLBackend()
# Activity info
self.activity_name = None
self.activity_definition_id = activity_definition_id
self.activity_instance_id = activity_instance_id
self.activity_conditions = None
self.activity_initial_conditions = None
self.activity_goal_conditions = None
self.ground_goal_state_options = None
self.feedback = None # None or str
self.sampler = None # BDDLSampler
# Object info
self.debug_object_sampling = debug_object_sampling # bool
self.online_object_sampling = online_object_sampling # bool
self.highlight_task_relevant_objs = highlight_task_relevant_objects # bool
self.object_scope = None # Maps str to BDDLEntity
self.object_instance_to_category = None # Maps str to str
self.future_obj_instances = None # set of str
# Info for demonstration collection
self.instruction_order = None # np.array of int
self.currently_viewed_index = None # int
self.currently_viewed_instruction = None # tuple of str
self.activity_natural_language_goal_conditions = None # str
# Load the initial behavior configuration
self.update_activity(activity_name=activity_name, activity_definition_id=activity_definition_id, predefined_problem=predefined_problem)
# Run super init
super().__init__(termination_config=termination_config, reward_config=reward_config)
@classmethod
def get_cached_activity_scene_filename(cls, scene_model, activity_name, activity_definition_id, activity_instance_id):
"""
Helper method to programmatically construct the scene filename for a given pre-cached task configuration
Args:
scene_model (str): Name of the scene (e.g.: Rs_int)
activity_name (str): Name of the task activity (e.g.: putting_away_halloween_decorations)
activity_definition_id (int): ID of the task definition
activity_instance_id (int): ID of the task instance
Returns:
str: Filename which, if exists, should include the cached activity scene
"""
return f"{scene_model}_task_{activity_name}_{activity_definition_id}_{activity_instance_id}_template"
@classmethod
def verify_scene_and_task_config(cls, scene_cfg, task_cfg):
# Run super first
super().verify_scene_and_task_config(scene_cfg=scene_cfg, task_cfg=task_cfg)
# Possibly modify the scene to load if we're using online_object_sampling
scene_instance, scene_file = scene_cfg["scene_instance"], scene_cfg["scene_file"]
activity_name = task_cfg["predefined_problem"].split("problem ")[-1].split("-")[0] if \
task_cfg.get("predefined_problem", None) is not None else task_cfg["activity_name"]
if scene_file is None and scene_instance is None and not task_cfg["online_object_sampling"]:
scene_instance = cls.get_cached_activity_scene_filename(
scene_model=scene_cfg.get("scene_model", "Scene"),
activity_name=activity_name,
activity_definition_id=task_cfg.get("activity_definition_id", 0),
activity_instance_id=task_cfg.get("activity_instance_id", 0),
)
# Update the value in the scene config
scene_cfg["scene_instance"] = scene_instance
def write_task_metadata(self):
# Store mapping from entity name to its corresponding BDDL instance name
metadata = dict(
inst_to_name={inst: entity.name for inst, entity in self.object_scope.items() if entity.exists},
)
# Write to sim
og.sim.write_metadata(key="task", data=metadata)
def load_task_metadata(self):
# Load from sim
return og.sim.get_metadata(key="task")
def _create_termination_conditions(self):
# Initialize termination conditions dict and fill in with Timeout and PredicateGoal
terminations = dict()
terminations["timeout"] = Timeout(max_steps=self._termination_config["max_steps"])
terminations["predicate"] = PredicateGoal(goal_fcn=lambda: self.activity_goal_conditions)
return terminations
def _create_reward_functions(self):
# Initialize reward functions dict and fill in with Potential reward
rewards = dict()
rewards["potential"] = PotentialReward(
potential_fcn=self.get_potential,
r_potential=self._reward_config["r_potential"],
)
return rewards
def _load(self, env):
# Initialize the current activity
success, self.feedback = self.initialize_activity(env=env)
# assert success, f"Failed to initialize Behavior Activity. Feedback:\n{self.feedback}"
# Highlight any task relevant objects if requested
if self.highlight_task_relevant_objs:
for entity in self.object_scope.values():
if entity.synset == "agent":
continue
if not entity.is_system and entity.exists:
entity.highlighted = True
# Add callbacks to handle internal processing when new systems / objects are added / removed to the scene
callback_name = f"{self.activity_name}_refresh"
og.sim.add_callback_on_import_obj(name=callback_name, callback=self._update_bddl_scope_from_added_obj)
og.sim.add_callback_on_remove_obj(name=callback_name, callback=self._update_bddl_scope_from_removed_obj)
add_callback_on_system_init(name=callback_name, callback=self._update_bddl_scope_from_system_init)
add_callback_on_system_clear(name=callback_name, callback=self._update_bddl_scope_from_system_clear)
def _load_non_low_dim_observation_space(self):
# No non-low dim observations so we return an empty dict
return dict()
def update_activity(self, activity_name, activity_definition_id, predefined_problem=None):
"""
Update the active Behavior activity being deployed
Args:
activity_name (None or str): Name of the Behavior Task to instantiate
activity_definition_id (int): Specification to load for the desired task. For a given Behavior Task, multiple task
specifications can be used (i.e.: differing goal conditions, or "ways" to complete a given task). This
ID determines which specification to use
predefined_problem (None or str): If specified, specifies the raw string definition of the Behavior Task to
load. This will automatically override @activity_name and @activity_definition_id.
"""
# Update internal variables based on values
# Activity info
self.activity_name = activity_name
self.activity_definition_id = activity_definition_id
self.activity_conditions = Conditions(
activity_name,
activity_definition_id,
simulator_name="omnigibson",
predefined_problem=predefined_problem,
)
# Get scope, making sure agent is the first entry
self.object_scope = {"agent.n.01_1": None}
self.object_scope.update(get_object_scope(self.activity_conditions))
# Object info
self.object_instance_to_category = {
obj_inst: obj_cat
for obj_cat in self.activity_conditions.parsed_objects
for obj_inst in self.activity_conditions.parsed_objects[obj_cat]
}
# Generate initial and goal conditions
self.activity_initial_conditions = get_initial_conditions(self.activity_conditions, self.backend, self.object_scope)
self.activity_goal_conditions = get_goal_conditions(self.activity_conditions, self.backend, self.object_scope)
self.ground_goal_state_options = get_ground_goal_state_options(
self.activity_conditions, self.backend, self.object_scope, self.activity_goal_conditions
)
# Demo attributes
self.instruction_order = np.arange(len(self.activity_conditions.parsed_goal_conditions))
np.random.shuffle(self.instruction_order)
self.currently_viewed_index = 0
self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
self.activity_natural_language_initial_conditions = get_natural_initial_conditions(self.activity_conditions)
self.activity_natural_language_goal_conditions = get_natural_goal_conditions(self.activity_conditions)
def get_potential(self, env):
"""
Compute task-specific potential: distance to the goal
Args:
env (Environment): Current active environment instance
Returns:
float: Computed potential
"""
# Evaluate the first ground goal state option as the potential
_, satisfied_predicates = evaluate_goal_conditions(self.ground_goal_state_options[0])
success_score = len(satisfied_predicates["satisfied"]) / (
len(satisfied_predicates["satisfied"]) + len(satisfied_predicates["unsatisfied"])
)
return -success_score
def initialize_activity(self, env):
"""
Initializes the desired activity in the current environment @env
Args:
env (Environment): Current active environment instance
Returns:
2-tuple:
- bool: Whether the generated scene activity should be accepted or not
- dict: Any feedback from the sampling / initialization process
"""
accept_scene = True
feedback = None
# Generate sampler
self.sampler = BDDLSampler(
env=env,
activity_conditions=self.activity_conditions,
object_scope=self.object_scope,
backend=self.backend,
debug=self.debug_object_sampling,
)
# Compose future objects
self.future_obj_instances = \
{init_cond.body[1] for init_cond in self.activity_initial_conditions if init_cond.body[0] == "future"}
if self.online_object_sampling:
# Sample online
accept_scene, feedback = self.sampler.sample()
if not accept_scene:
return accept_scene, feedback
else:
# Load existing scene cache and assign object scope accordingly
self.assign_object_scope_with_cache(env)
# Generate goal condition with the fully populated self.object_scope
self.activity_goal_conditions = get_goal_conditions(self.activity_conditions, self.backend, self.object_scope)
self.ground_goal_state_options = get_ground_goal_state_options(
self.activity_conditions, self.backend, self.object_scope, self.activity_goal_conditions
)
return accept_scene, feedback
def get_agent(self, env):
"""
Grab the 0th agent from @env
Args:
env (Environment): Current active environment instance
Returns:
BaseRobot: The 0th robot from the environment instance
"""
# We assume the relevant agent is the first agent in the scene
return env.robots[0]
def assign_object_scope_with_cache(self, env):
"""
Assigns objects within the current object scope
Args:
env (Environment): Current active environment instance
"""
# Load task metadata
inst_to_name = self.load_task_metadata()["inst_to_name"]
# Assign object_scope based on a cached scene
for obj_inst in self.object_scope:
if obj_inst in self.future_obj_instances:
entity = None
else:
assert obj_inst in inst_to_name, f"BDDL object instance {obj_inst} should exist in cached metadata " \
f"from loaded scene, but could not be found!"
name = inst_to_name[obj_inst]
is_system = name in REGISTERED_SYSTEMS
entity = get_system(name) if is_system else og.sim.scene.object_registry("name", name)
self.object_scope[obj_inst] = BDDLEntity(
bddl_inst=obj_inst,
entity=entity,
)
def _get_obs(self, env):
low_dim_obs = dict()
# Batch rpy calculations for much better efficiency
objs_exist = {obj: obj.exists for obj in self.object_scope.values() if not obj.is_system}
objs_rpy = T.quat2euler(np.array([obj.states[Pose].get_value()[1] if obj_exist else np.array([0, 0, 0, 1.0])
for obj, obj_exist in objs_exist.items()]))
objs_rpy_cos = np.cos(objs_rpy)
objs_rpy_sin = np.sin(objs_rpy)
# Always add agent info first
agent = self.get_agent(env=env)
for (obj, obj_exist), obj_rpy, obj_rpy_cos, obj_rpy_sin in zip(objs_exist.items(), objs_rpy, objs_rpy_cos, objs_rpy_sin):
# TODO: May need to update checking here to USDObject? Or even baseobject?
# TODO: How to handle systems as part of obs?
if obj_exist:
low_dim_obs[f"{obj.bddl_inst}_real"] = np.array([1.0])
low_dim_obs[f"{obj.bddl_inst}_pos"] = obj.states[Pose].get_value()[0]
low_dim_obs[f"{obj.bddl_inst}_ori_cos"] = obj_rpy_cos
low_dim_obs[f"{obj.bddl_inst}_ori_sin"] = obj_rpy_sin
if obj.name != agent.name:
for arm in agent.arm_names:
grasping_object = agent.is_grasping(arm=arm, candidate_obj=obj.wrapped_obj)
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = np.array([float(grasping_object)])
else:
low_dim_obs[f"{obj.bddl_inst}_real"] = np.zeros(1)
low_dim_obs[f"{obj.bddl_inst}_pos"] = np.zeros(3)
low_dim_obs[f"{obj.bddl_inst}_ori_cos"] = np.zeros(3)
low_dim_obs[f"{obj.bddl_inst}_ori_sin"] = np.zeros(3)
for arm in agent.arm_names:
low_dim_obs[f"{obj.bddl_inst}_in_gripper_{arm}"] = np.zeros(1)
return low_dim_obs, dict()
def _step_termination(self, env, action, info=None):
# Run super first
done, info = super()._step_termination(env=env, action=action, info=info)
# Add additional info
info["goal_status"] = self._termination_conditions["predicate"].goal_status
return done, info
def _update_bddl_scope_from_added_obj(self, obj):
"""
Internal callback function to be called when sim.import_object() is called to potentially update internal
bddl object scope
Args:
obj (BaseObject): Newly imported object
"""
# Iterate over all entities, and if they don't exist, check if any category matches @obj's category, and set it
# if it does, and immediately return
for inst, entity in self.object_scope.items():
if not entity.exists and not entity.is_system and obj.category in set(entity.og_categories):
entity.set_entity(entity=obj)
return
def _update_bddl_scope_from_removed_obj(self, obj):
"""
Internal callback function to be called when sim.remove_object() is called to potentially update internal
bddl object scope
Args:
obj (BaseObject): Newly removed object
"""
# Iterate over all entities, and if they exist, check if any name matches @obj's name, and remove it
# if it does, and immediately return
for entity in self.object_scope.values():
if entity.exists and not entity.is_system and obj.name == entity.name:
entity.clear_entity()
return
def _update_bddl_scope_from_system_init(self, system):
"""
Internal callback function to be called when system.initialize() is called to potentially update internal
bddl object scope
Args:
system (BaseSystem): Newly initialized system
"""
# Iterate over all entities, and potentially match the system to the scope
for inst, entity in self.object_scope.items():
if not entity.exists and entity.is_system and entity.og_categories[0] == system.name:
entity.set_entity(entity=system)
return
def _update_bddl_scope_from_system_clear(self, system):
"""
Internal callback function to be called when system.clear() is called to potentially update internal
bddl object scope
Args:
system (BaseSystem): Newly cleared system
"""
# Iterate over all entities, and potentially remove the matched system from the scope
for inst, entity in self.object_scope.items():
if entity.exists and entity.is_system and system.name == entity.name:
entity.clear_entity()
return
def show_instruction(self):
"""
Get current instruction for user
Returns:
3-tuple:
- str: Current goal condition in natural language
- 3-tuple: (R,G,B) color to assign to text
- list of BaseObject: Relevant objects for the current instruction
"""
satisfied = self.currently_viewed_instruction in self._termination_conditions["predicate"].goal_status["satisfied"]
natural_language_condition = self.activity_natural_language_goal_conditions[self.currently_viewed_instruction]
objects = self.activity_goal_conditions[self.currently_viewed_instruction].get_relevant_objects()
text_color = (
[83.0 / 255.0, 176.0 / 255.0, 72.0 / 255.0] if satisfied else [255.0 / 255.0, 51.0 / 255.0, 51.0 / 255.0]
)
return natural_language_condition, text_color, objects
def iterate_instruction(self):
"""
Increment the instruction
"""
self.currently_viewed_index = (self.currently_viewed_index + 1) % len(self.activity_conditions.parsed_goal_conditions)
self.currently_viewed_instruction = self.instruction_order[self.currently_viewed_index]
def save_task(self, path=None, override=False):
"""
Writes the current scene configuration to a .json file
Args:
path (None or str): If specified, absolute fpath to the desired path to write the .json. Default is
<gm.DATASET_PATH/scenes/<SCENE_MODEL>/json/...>
override (bool): Whether to override any files already found at the path to write the task .json
"""
if path is None:
fname = self.get_cached_activity_scene_filename(
scene_model=og.sim.scene.scene_model,
activity_name=self.activity_name,
activity_definition_id=self.activity_definition_id,
activity_instance_id=self.activity_instance_id,
)
path = os.path.join(gm.DATASET_PATH, "scenes", og.sim.scene.scene_model, "json", f"{fname}.json")
if os.path.exists(path) and not override:
log.warning(f"Scene json already exists at {path}. Use override=True to force writing of new json.")
return
# Write metadata and then save
self.write_task_metadata()
og.sim.save(json_path=path)
@property
def name(self):
"""
Returns:
str: Name of this task. Defaults to class name
"""
name_base = super().name
# Add activity name, def id, and inst id
return f"{name_base}_{self.activity_name}_{self.activity_definition_id}_{self.activity_instance_id}"
@classproperty
def valid_scene_types(cls):
# Any scene can be used
return {Scene}
@classproperty
def default_termination_config(cls):
return {
"max_steps": 500,
}
@classproperty
def default_reward_config(cls):
return {
"r_potential": 1.0,
}
| 25,013 | Python | 45.151291 | 143 | 0.633431 |
StanfordVL/OmniGibson/omnigibson/maps/traversable_map.py | import os
import cv2
import numpy as np
from PIL import Image
# Accommodate large maps (e.g. 10k x 10k) while suppressing DecompressionBombError
Image.MAX_IMAGE_PIXELS = None
from omnigibson.maps.map_base import BaseMap
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.utils.motion_planning_utils import astar
# Create module logger
log = create_module_logger(module_name=__name__)
class TraversableMap(BaseMap):
"""
Traversable scene class.
Contains the functionalities for navigation such as shortest path computation
"""
def __init__(
self,
map_resolution=0.1,
default_erosion_radius=0.0,
trav_map_with_objects=True,
num_waypoints=10,
waypoint_resolution=0.2,
):
"""
Args:
map_resolution (float): map resolution in meters, each pixel represents this many meters;
normally, this should be between 0.01 and 0.1
default_erosion_radius (float): default map erosion radius in meters
trav_map_with_objects (bool): whether to use objects or not when constructing graph
num_waypoints (int): number of way points returned
waypoint_resolution (float): resolution of adjacent way points
"""
# Set internal values
self.map_default_resolution = 0.01 # each pixel == 0.01m in the dataset representation
self.default_erosion_radius = default_erosion_radius
self.trav_map_with_objects = trav_map_with_objects
self.num_waypoints = num_waypoints
self.waypoint_interval = int(waypoint_resolution / map_resolution)
# Values loaded at runtime
self.trav_map_original_size = None
self.trav_map_size = None
self.mesh_body_id = None
self.floor_heights = None
self.floor_map = None
# Run super method
super().__init__(map_resolution=map_resolution)
def _load_map(self, maps_path, floor_heights=(0.0,)):
"""
Loads the traversability maps for all floors
Args:
maps_path (str): Path to the folder containing the traversability maps
floor_heights (n-array): Height(s) of the floors for this map
Returns:
int: Size of the loaded map
"""
if not os.path.exists(maps_path):
log.warning("trav map does not exist: {}".format(maps_path))
return
self.floor_heights = floor_heights
self.floor_map = []
map_size = None
for floor in range(len(self.floor_heights)):
if self.trav_map_with_objects:
# TODO: Shouldn't this be generated dynamically?
trav_map = np.array(Image.open(os.path.join(maps_path, "floor_trav_{}.png".format(floor))))
else:
trav_map = np.array(Image.open(os.path.join(maps_path, "floor_trav_no_obj_{}.png".format(floor))))
# If we do not initialize the original size of the traversability map, we obtain it from the image
# Then, we compute the final map size as the factor of scaling (default_resolution/resolution) times the
# original map size
if self.trav_map_original_size is None:
height, width = trav_map.shape
assert height == width, "trav map is not a square"
self.trav_map_original_size = height
map_size = int(
self.trav_map_original_size * self.map_default_resolution / self.map_resolution
)
# We resize the traversability map to the new size computed before
trav_map = cv2.resize(trav_map, (map_size, map_size))
# We make the pixels of the image to be either 0 or 255
trav_map[trav_map < 255] = 0
self.floor_map.append(trav_map)
return map_size
@property
def n_floors(self):
"""
Returns:
int: Number of floors belonging to this map's associated scene
"""
return len(self.floor_heights)
def _erode_trav_map(self, trav_map, robot=None):
# Erode the traversability map to account for the robot's size
if robot:
robot_chassis_extent = robot.reset_joint_pos_aabb_extent[:2]
radius = np.linalg.norm(robot_chassis_extent) / 2.0
else:
radius = self.default_erosion_radius
radius_pixel = int(np.ceil(radius / self.map_resolution))
trav_map = cv2.erode(trav_map, np.ones((radius_pixel, radius_pixel)))
return trav_map
def get_random_point(self, floor=None, reference_point=None, robot=None):
"""
Sample a random point on the given floor number. If not given, sample a random floor number.
If @reference_point is given, sample a point in the same connected component as the previous point.
Args:
floor (None or int): floor number. None means the floor is randomly sampled
Warning: if @reference_point is given, @floor must be given;
otherwise, this would lead to undefined behavior
reference_point (3-array): (x,y,z) if given, sample a point in the same connected component as this point
Returns:
2-tuple:
- int: floor number. This is the sampled floor number if @floor is None
- 3-array: (x,y,z) randomly sampled point
"""
if reference_point is not None:
assert floor is not None, "floor must be given if reference_point is given"
# If nothing is given, sample a random floor and a random point on that floor
if floor is None and reference_point is None:
floor = np.random.randint(0, self.n_floors)
# create a deep copy so that we don't erode the original map
trav_map = self.floor_map[floor].copy()
trav_map = self._erode_trav_map(trav_map, robot=robot)
if reference_point is not None:
# Find connected component
_, component_labels = cv2.connectedComponents(trav_map, connectivity=4)
# If previous point is given, sample a point in the same connected component
prev_xy_map = self.world_to_map(reference_point[:2])
prev_label = component_labels[prev_xy_map[0]][prev_xy_map[1]]
trav_space = np.where(component_labels == prev_label)
else:
trav_space = np.where(trav_map == 255)
idx = np.random.randint(0, high=trav_space[0].shape[0])
xy_map = np.array([trav_space[0][idx], trav_space[1][idx]])
x, y = self.map_to_world(xy_map)
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
def get_shortest_path(self, floor, source_world, target_world, entire_path=False, robot=None):
"""
Get the shortest path from one point to another point.
If any of the given point is not in the graph, add it to the graph and
create an edge between it to its closest node.
Args:
floor (int): floor number
source_world (2-array): (x,y) 2D source location in world reference frame (metric)
target_world (2-array): (x,y) 2D target location in world reference frame (metric)
entire_path (bool): whether to return the entire path
robot (None or BaseRobot): if given, erode the traversability map to account for the robot's size
Returns:
2-tuple:
- (N, 2) array: array of path waypoints, where N is the number of generated waypoints
- float: geodesic distance of the path
"""
source_map = tuple(self.world_to_map(source_world))
target_map = tuple(self.world_to_map(target_world))
# create a deep copy so that we don't erode the original map
trav_map = self.floor_map[floor].copy()
trav_map = self._erode_trav_map(trav_map, robot=robot)
path_map = astar(trav_map, source_map, target_map)
if path_map is None:
# No traversable path found
return None, None
path_world = self.map_to_world(path_map)
geodesic_distance = np.sum(np.linalg.norm(path_world[1:] - path_world[:-1], axis=1))
path_world = path_world[:: self.waypoint_interval]
if not entire_path:
path_world = path_world[: self.num_waypoints]
num_remaining_waypoints = self.num_waypoints - path_world.shape[0]
if num_remaining_waypoints > 0:
remaining_waypoints = np.tile(target_world, (num_remaining_waypoints, 1))
path_world = np.concatenate((path_world, remaining_waypoints), axis=0)
return path_world, geodesic_distance
| 8,862 | Python | 41.816425 | 117 | 0.61081 |
StanfordVL/OmniGibson/omnigibson/maps/map_base.py | import numpy as np
class BaseMap:
"""
Base map class.
Contains basic interface for converting from map to world frame, and vise-versa
"""
def __init__(
self,
map_resolution=0.1,
):
"""
Args:
map_resolution (float): map resolution
"""
# Set internal values
self.map_resolution = map_resolution
self.map_size = None
def load_map(self, *args, **kwargs):
"""
Load's this map internally
"""
# Run internal method and store map size
self.map_size = self._load_map(*args, **kwargs)
def _load_map(self, *args, **kwargs):
"""
Arbitrary function to load this map. Should be implemented by subclass
Returns:
int: Size of the loaded map
"""
raise NotImplementedError()
def map_to_world(self, xy):
"""
Transforms a 2D point in map reference frame into world (simulator) reference frame
Args:
xy (2-array or (N, 2)-array): 2D location(s) in map reference frame (in image pixel space)
Returns:
2-array or (N, 2)-array: 2D location(s) in world reference frame (in metric space)
"""
axis = 0 if len(xy.shape) == 1 else 1
return np.flip((xy - self.map_size / 2.0) * self.map_resolution, axis=axis)
def world_to_map(self, xy):
"""
Transforms a 2D point in world (simulator) reference frame into map reference frame
xy: 2D location in world reference frame (metric)
:return: 2D location in map reference frame (image)
"""
return np.flip((np.array(xy) / self.map_resolution + self.map_size / 2.0)).astype(np.int)
| 1,751 | Python | 28.694915 | 102 | 0.571102 |
StanfordVL/OmniGibson/omnigibson/maps/__init__.py | from omnigibson.maps.map_base import BaseMap
from omnigibson.maps.traversable_map import TraversableMap
from omnigibson.maps.segmentation_map import SegmentationMap
| 165 | Python | 40.49999 | 60 | 0.872727 |
StanfordVL/OmniGibson/omnigibson/maps/segmentation_map.py | import os
import numpy as np
from PIL import Image
# Accommodate large maps (e.g. 10k x 10k) while suppressing DecompressionBombError
Image.MAX_IMAGE_PIXELS = None
import omnigibson as og
from omnigibson.macros import gm
from omnigibson.maps.map_base import BaseMap
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class SegmentationMap(BaseMap):
"""
Segmentation map for computing connectivity within the scene
"""
def __init__(
self,
scene_dir,
map_resolution=0.1,
floor_heights=(0.0,),
):
"""
Args:
scene_dir (str): path to the scene directory from which segmentation info will be extracted
map_resolution (float): map resolution
floor_heights (list of float): heights of the floors for this segmentation map
"""
# Store internal values
self.scene_dir = scene_dir
self.map_default_resolution = 0.01
self.floor_heights = floor_heights
# Other values that will be loaded at runtime
self.room_sem_name_to_sem_id = None
self.room_sem_id_to_sem_name = None
self.room_ins_name_to_ins_id = None
self.room_ins_id_to_ins_name = None
self.room_sem_name_to_ins_name = None
self.room_ins_map = None
self.room_sem_map = None
# Run super call
super().__init__(map_resolution=map_resolution)
# Load the map
self.load_map()
def _load_map(self):
layout_dir = os.path.join(self.scene_dir, "layout")
room_seg_imgs = os.path.join(layout_dir, "floor_insseg_0.png")
img_ins = Image.open(room_seg_imgs)
room_seg_imgs = os.path.join(layout_dir, "floor_semseg_0.png")
img_sem = Image.open(room_seg_imgs)
height, width = img_ins.size
assert height == width, "room seg map is not a square"
assert img_ins.size == img_sem.size, "semantic and instance seg maps have different sizes"
map_size = int(height * self.map_default_resolution / self.map_resolution)
img_ins = np.array(img_ins.resize((map_size, map_size), Image.NEAREST))
img_sem = np.array(img_sem.resize((map_size, map_size), Image.NEAREST))
room_categories = os.path.join(gm.DATASET_PATH, "metadata", "room_categories.txt")
with open(room_categories, "r") as fp:
room_cats = [line.rstrip() for line in fp.readlines()]
sem_id_to_ins_id = {}
unique_ins_ids = np.unique(img_ins)
unique_ins_ids = np.delete(unique_ins_ids, 0)
for ins_id in unique_ins_ids:
# find one pixel for each ins id
x, y = np.where(img_ins == ins_id)
# retrieve the correspounding sem id
sem_id = img_sem[x[0], y[0]]
if sem_id not in sem_id_to_ins_id:
sem_id_to_ins_id[sem_id] = []
sem_id_to_ins_id[sem_id].append(ins_id)
room_sem_name_to_sem_id = {}
room_ins_name_to_ins_id = {}
room_sem_name_to_ins_name = {}
for sem_id, ins_ids in sem_id_to_ins_id.items():
sem_name = room_cats[sem_id - 1]
room_sem_name_to_sem_id[sem_name] = sem_id
for i, ins_id in enumerate(ins_ids):
# valid class start from 1
ins_name = "{}_{}".format(sem_name, i)
room_ins_name_to_ins_id[ins_name] = ins_id
if sem_name not in room_sem_name_to_ins_name:
room_sem_name_to_ins_name[sem_name] = []
room_sem_name_to_ins_name[sem_name].append(ins_name)
self.room_sem_name_to_sem_id = room_sem_name_to_sem_id
self.room_sem_id_to_sem_name = {value: key for key, value in room_sem_name_to_sem_id.items()}
self.room_ins_name_to_ins_id = room_ins_name_to_ins_id
self.room_ins_id_to_ins_name = {value: key for key, value in room_ins_name_to_ins_id.items()}
self.room_sem_name_to_ins_name = room_sem_name_to_ins_name
self.room_ins_map = img_ins
self.room_sem_map = img_sem
return map_size
def get_random_point_by_room_type(self, room_type):
"""
Sample a random point on the given a specific room type @room_type.
Args:
room_type (str): Room type to sample random point (e.g.: "bathroom")
Returns:
2-tuple:
- int: floor number. This is always 0
- 3-array: (x,y,z) randomly sampled point in a room of type @room_type
"""
if room_type not in self.room_sem_name_to_sem_id:
log.warning("room_type [{}] does not exist.".format(room_type))
return None, None
sem_id = self.room_sem_name_to_sem_id[room_type]
valid_idx = np.array(np.where(self.room_sem_map == sem_id))
random_point_map = valid_idx[:, np.random.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
def get_random_point_by_room_instance(self, room_instance):
"""
Sample a random point on the given a specific room instance @room_instance.
Args:
room_instance (str): Room instance to sample random point (e.g.: "bathroom_1")
Returns:
2-tuple:
- int: floor number. This is always 0
- 3-array: (x,y,z) randomly sampled point in room @room_instance
"""
if room_instance not in self.room_ins_name_to_ins_id:
log.warning("room_instance [{}] does not exist.".format(room_instance))
return None, None
ins_id = self.room_ins_name_to_ins_id[room_instance]
valid_idx = np.array(np.where(self.room_ins_map == ins_id))
random_point_map = valid_idx[:, np.random.randint(valid_idx.shape[1])]
x, y = self.map_to_world(random_point_map)
# assume only 1 floor
floor = 0
z = self.floor_heights[floor]
return floor, np.array([x, y, z])
def get_room_type_by_point(self, xy):
"""
Return the room type given a point
Args:
xy (2-array): 2D location in world reference frame (in metric space)
Returns:
None or str: room type that this point is in or None, if this point is not on the room segmentation map
"""
x, y = self.world_to_map(xy)
if x < 0 or x >= self.room_sem_map.shape[0] or y < 0 or y >= self.room_sem_map.shape[1]:
return None
sem_id = self.room_sem_map[x, y]
# room boundary
if sem_id == 0:
return None
else:
return self.room_sem_id_to_sem_name[sem_id]
def get_room_instance_by_point(self, xy):
"""
Return the room type given a point
Args:
xy (2-array): 2D location in world reference frame (in metric space)
Returns:
None or str: room instance that this point is in or None, if this point is not on the room segmentation map
"""
x, y = self.world_to_map(xy)
if x < 0 or x >= self.room_ins_map.shape[0] or y < 0 or y >= self.room_ins_map.shape[1]:
return None
ins_id = self.room_ins_map[x, y]
# room boundary
if ins_id == 0:
return None
else:
return self.room_ins_id_to_ins_name[ins_id]
| 7,535 | Python | 36.869347 | 119 | 0.580889 |
StanfordVL/OmniGibson/omnigibson/controllers/joint_controller.py | import numpy as np
from omnigibson.controllers import IsGraspingState, ControlType, LocomotionController, ManipulationController, \
GripperController
from omnigibson.utils.python_utils import assert_valid_key
import omnigibson.utils.transform_utils as T
from omnigibson.macros import create_module_macros
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_JOINT_POS_KP = 50.0
m.DEFAULT_JOINT_POS_DAMPING_RATIO = 1.0 # critically damped
m.DEFAULT_JOINT_VEL_KP = 2.0
class JointController(LocomotionController, ManipulationController, GripperController):
"""
Controller class for joint control. Because omniverse can handle direct position / velocity / effort
control signals, this is merely a pass-through operation from command to control (with clipping / scaling built in).
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2a. If using delta commands, then adds the command to the current joint state
2b. Clips the resulting command by the motor limits
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
kp=None,
damping_ratio=None,
use_impedances=False,
use_delta_commands=False,
compute_delta_in_quat_space=None,
):
"""
Args:
control_freq (int): controller loop frequency
motor_type (str): type of motor being controlled, one of {position, velocity, effort}
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
kp (None or float): If @motor_type is "position" or "velocity" and @use_impedances=True, this is the
proportional gain applied to the joint controller. If None, a default value will be used.
damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the
damping ratio applied to the joint controller. If None, a default value will be used.
use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts
applied
use_delta_commands (bool): whether inputted commands should be interpreted as delta or absolute values
compute_delta_in_quat_space (None or List[(rx_idx, ry_idx, rz_idx), ...]): if specified, groups of
joints that need to be processed in quaternion space to avoid gimbal lock issues normally faced by
3 DOF rotation joints. Each group needs to consist of three idxes corresponding to the indices in
the input space. This is only used in the delta_commands mode.
"""
# Store arguments
assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
self._motor_type = motor_type.lower()
self._use_delta_commands = use_delta_commands
self._compute_delta_in_quat_space = [] if compute_delta_in_quat_space is None else compute_delta_in_quat_space
# Store control gains
if self._motor_type == "position":
kp = m.DEFAULT_JOINT_POS_KP if kp is None else kp
damping_ratio = m.DEFAULT_JOINT_POS_DAMPING_RATIO if damping_ratio is None else damping_ratio
elif self._motor_type == "velocity":
kp = m.DEFAULT_JOINT_VEL_KP if kp is None else kp
assert damping_ratio is None, "Cannot set damping_ratio for JointController with motor_type=velocity!"
else: # effort
assert kp is None, "Cannot set kp for JointController with motor_type=effort!"
assert damping_ratio is None, "Cannot set damping_ratio for JointController with motor_type=effort!"
self.kp = kp
self.kd = None if damping_ratio is None else 2 * np.sqrt(self.kp) * damping_ratio
self._use_impedances = use_impedances
# When in delta mode, it doesn't make sense to infer output range using the joint limits (since that's an
# absolute range and our values are relative). So reject the default mode option in that case.
assert not (self._use_delta_commands and command_output_limits == "default"), \
"Cannot use 'default' command output limits in delta commands mode of JointController. Try None instead."
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def _update_goal(self, command, control_dict):
# Compute the base value for the command
base_value = control_dict[f"joint_{self._motor_type}"][self.dof_idx]
# If we're using delta commands, add this value
if self._use_delta_commands:
# Apply the command to the base value.
target = base_value + command
# Correct any gimbal lock issues using the compute_delta_in_quat_space group.
for rx_ind, ry_ind, rz_ind in self._compute_delta_in_quat_space:
# Grab the starting rotations of these joints.
start_rots = base_value[[rx_ind, ry_ind, rz_ind]]
# Grab the delta rotations.
delta_rots = command[[rx_ind, ry_ind, rz_ind]]
# Compute the final rotations in the quaternion space.
_, end_quat = T.pose_transform(np.zeros(3), T.euler2quat(delta_rots),
np.zeros(3), T.euler2quat(start_rots))
end_rots = T.quat2euler(end_quat)
# Update the command
target[[rx_ind, ry_ind, rz_ind]] = end_rots
# Otherwise, goal is simply the command itself
else:
target = command
# Clip the command based on the limits
target = target.clip(
self._control_limits[ControlType.get_type(self._motor_type)][0][self.dof_idx],
self._control_limits[ControlType.get_type(self._motor_type)][1][self.dof_idx],
)
return dict(target=target)
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target: desired N-dof absolute joint values used as setpoint
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
joint_effort: Array of current joint effort
Returns:
Array[float]: outputted (non-clipped!) control signal to deploy
"""
base_value = control_dict[f"joint_{self._motor_type}"][self.dof_idx]
target = goal_dict["target"]
# Convert control into efforts
if self._use_impedances:
if self._motor_type == "position":
# Run impedance controller -- effort = pos_err * kp + vel_err * kd
position_error = target - base_value
vel_pos_error = -control_dict[f"joint_velocity"][self.dof_idx]
u = position_error * self.kp + vel_pos_error * self.kd
elif self._motor_type == "velocity":
# Compute command torques via PI velocity controller plus gravity compensation torques
velocity_error = target - base_value
u = velocity_error * self.kp
else: # effort
u = target
dof_idxs_mat = tuple(np.meshgrid(self.dof_idx, self.dof_idx))
mm = control_dict["mass_matrix"][dof_idxs_mat]
u = np.dot(mm, u)
# Add gravity compensation
u += control_dict["gravity_force"][self.dof_idx] + control_dict["cc_force"][self.dof_idx]
else:
# Desired is the exact goal
u = target
# Return control
return u
def compute_no_op_goal(self, control_dict):
# Compute based on mode
if self._motor_type == "position":
# Maintain current qpos
target = control_dict[f"joint_{self._motor_type}"][self.dof_idx]
else:
# For velocity / effort, directly set to 0
target = np.zeros(self.control_dim)
return dict(target=target)
def _get_goal_shapes(self):
return dict(target=(self.control_dim,))
def is_grasping(self):
# No good heuristic to determine grasping, so return UNKNOWN
return IsGraspingState.UNKNOWN
@property
def use_delta_commands(self):
"""
Returns:
bool: Whether this controller is using delta commands or not
"""
return self._use_delta_commands
@property
def motor_type(self):
"""
Returns:
str: The type of motor being simulated by this controller. One of {"position", "velocity", "effort"}
"""
return self._motor_type
@property
def control_type(self):
return ControlType.EFFORT if self._use_impedances else ControlType.get_type(type_str=self._motor_type)
@property
def command_dim(self):
return len(self.dof_idx)
| 11,270 | Python | 46.357143 | 120 | 0.624224 |
StanfordVL/OmniGibson/omnigibson/controllers/ik_controller.py | import numpy as np
from omnigibson.macros import gm, create_module_macros
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import ControlType, ManipulationController
from omnigibson.controllers.joint_controller import JointController
from omnigibson.utils.processing_utils import MovingAverageFilter
from omnigibson.utils.control_utils import IKSolver
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Set some macros
m = create_module_macros(module_path=__file__)
m.IK_POS_TOLERANCE = 0.002
m.IK_POS_WEIGHT = 20.0
m.IK_ORN_TOLERANCE = 0.01
m.IK_ORN_WEIGHT = 0.05
m.IK_MAX_ITERATIONS = 100
# Different modes
IK_MODE_COMMAND_DIMS = {
"absolute_pose": 6, # 6DOF (x,y,z,ax,ay,az) control of pose, whether both position and orientation is given in absolute coordinates
"pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
"pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose
"position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
"position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
}
IK_MODES = set(IK_MODE_COMMAND_DIMS.keys())
class InverseKinematicsController(JointController, ManipulationController):
"""
Controller class to convert (delta) EEF commands into joint velocities using Inverse Kinematics (IK).
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Run Inverse Kinematics to back out joint velocities for a desired task frame command
3. Clips the resulting command by the motor (velocity) limits
"""
def __init__(
self,
task_name,
robot_description_path,
robot_urdf_path,
eef_name,
control_freq,
reset_joint_pos,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)),
kp=None,
damping_ratio=None,
use_impedances=True,
mode="pose_delta_ori",
smoothing_filter_size=None,
workspace_pose_limiter=None,
condition_on_current_position=True,
):
"""
Args:
task_name (str): name assigned to this task frame for computing IK control. During control calculations,
the inputted control_dict should include entries named <@task_name>_pos_relative and
<@task_name>_quat_relative. See self._command_to_control() for what these values should entail.
robot_description_path (str): path to robot descriptor yaml file
robot_urdf_path (str): path to robot urdf file
eef_name (str): end effector frame name
control_freq (int): controller loop frequency
reset_joint_pos (Array[float]): reset joint positions, used as part of nullspace controller in IK.
Note that this should correspond to ALL the joints; the exact indices will be extracted via @dof_idx
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
kp (None or float): The proportional gain applied to the joint controller. If None, a default value
will be used. Only relevant if @use_impedances=True
damping_ratio (None or float): The damping ratio applied to the joint controller. If None, a default
value will be used. Only relevant if @use_impedances=True
use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts
applied
mode (str): mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz)
cartesian values, relative to the robot base frame. Valid options are:
- "absolute_pose": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where both the position and the orientation is given in absolute axis-angle coordinates
- "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where the orientation is given in absolute axis-angle coordinates
- "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose
- "position_fixed_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands being kept as fixed initial absolute orientation
- "position_compliant_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands automatically being sent as 0s (so can drift over time)
smoothing_filter_size (None or int): if specified, sets the size of a moving average filter to apply
on all outputted IK joint positions.
workspace_pose_limiter (None or function): if specified, callback method that should clip absolute
target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace
range (i.e.: this can be unique to each robot, and implemented by each embodiment).
Function signature should be:
def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]]
where target_pos is (x,y,z) cartesian position values, target_quat is (x,y,z,w) quarternion orientation
values, and the returned tuple is the processed (pos, quat) command.
condition_on_current_position (bool): if True, will use the current joint position as the initial guess for the IK algorithm.
Otherwise, will use the reset_joint_pos as the initial guess.
"""
# Store arguments
control_dim = len(dof_idx)
self.control_filter = (
None
if smoothing_filter_size in {None, 0}
else MovingAverageFilter(obs_dim=control_dim, filter_width=smoothing_filter_size)
)
assert mode in IK_MODES, f"Invalid ik mode specified! Valid options are: {IK_MODES}, got: {mode}"
self.mode = mode
self.workspace_pose_limiter = workspace_pose_limiter
self.task_name = task_name
self.reset_joint_pos = reset_joint_pos[dof_idx]
self.condition_on_current_position = condition_on_current_position
# Create the lula IKSolver
self.solver = IKSolver(
robot_description_path=robot_description_path,
robot_urdf_path=robot_urdf_path,
eef_name=eef_name,
reset_joint_pos=self.reset_joint_pos,
)
# Other variables that will be filled in at runtime
self._fixed_quat_target = None
# If the mode is set as absolute orientation and using default config,
# change input and output limits accordingly.
# By default, the input limits are set as 1, so we modify this to have a correct range.
# The output orientation limits are also set to be values assuming delta commands, so those are updated too
if self.mode == "pose_absolute_ori":
if command_input_limits is not None:
if command_input_limits == "default":
command_input_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_input_limits[0][3:] = -np.pi
command_input_limits[1][3:] = np.pi
if command_output_limits is not None:
if command_output_limits == "default":
command_output_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_output_limits[0][3:] = -np.pi
command_output_limits[1][3:] = np.pi
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
kp=kp,
damping_ratio=damping_ratio,
motor_type="position",
use_delta_commands=False,
use_impedances=use_impedances,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Call super first
super().reset()
# Reset the filter and clear internal control state
if self.control_filter is not None:
self.control_filter.reset()
self._fixed_quat_target = None
@property
def state_size(self):
# Add state size from the control filter
return super().state_size + self.control_filter.state_size
def _dump_state(self):
# Run super first
state = super()._dump_state()
# Add internal quaternion target and filter state
state["control_filter"] = self.control_filter.dump_state(serialized=False)
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# If self._goal is populated, then set fixed_quat_target as well if the mode uses it
if self.mode == "position_fixed_ori" and self._goal is not None:
self._fixed_quat_target = self._goal["target_quat"]
# Load relevant info for this controller
self.control_filter.load_state(state["control_filter"], serialized=False)
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
# Serialize state for this controller
return np.concatenate([
state_flat,
self.control_filter.serialize(state=state["control_filter"]),
]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize state for this controller
state_dict["control_filter"] = self.control_filter.deserialize(state=state[idx: idx + self.control_filter.state_size])
return state_dict, idx + self.control_filter.state_size
def _update_goal(self, command, control_dict):
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
target_pos = command[:3]
else:
dpos = command[:3]
target_pos = pos_relative + dpos
# Compute orientation
if self.mode == "position_fixed_ori":
# We need to grab the current robot orientation as the commanded orientation if there is none saved
if self._fixed_quat_target is None:
self._fixed_quat_target = quat_relative.astype(np.float32) \
if (self._goal is None) else self._goal["target_quat"]
target_quat = self._fixed_quat_target
elif self.mode == "position_compliant_ori":
# Target quat is simply the current robot orientation
target_quat = quat_relative
elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose":
# Received "delta" ori is in fact the desired absolute orientation
target_quat = T.axisangle2quat(command[3:6])
else: # pose_delta_ori control
# Grab dori and compute target ori
dori = T.quat2mat(T.axisangle2quat(command[3:6]))
target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative))
# Possibly limit to workspace if specified
if self.workspace_pose_limiter is not None:
target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict)
goal_dict = dict(
target_pos=target_pos,
target_quat=target_quat,
)
return goal_dict
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
This processes the command based on self.mode, possibly clips the command based on self.workspace_pose_limiter,
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target_pos: robot-frame (x,y,z) desired end effector position
target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
base_pos: (x,y,z) cartesian position of the robot's base relative to the static global frame
base_quat: (x,y,z,w) quaternion orientation of the robot's base relative to the static global frame
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
Returns:
Array[float]: outputted (non-clipped!) velocity control signal to deploy
"""
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
target_pos = goal_dict["target_pos"]
target_quat = goal_dict["target_quat"]
# Calculate and return IK-backed out joint angles
current_joint_pos = control_dict["joint_position"][self.dof_idx]
# If the delta is really small, we just keep the current joint position. This avoids joint
# drift caused by IK solver inaccuracy even when zero delta actions are provided.
if np.allclose(pos_relative, target_pos, atol=1e-4) and np.allclose(quat_relative, target_quat, atol=1e-4):
target_joint_pos = current_joint_pos
else:
# Otherwise we try to solve for the IK configuration.
if self.condition_on_current_position:
target_joint_pos = self.solver.solve(
target_pos=target_pos,
target_quat=target_quat,
tolerance_pos=m.IK_POS_TOLERANCE,
tolerance_quat=m.IK_ORN_TOLERANCE,
weight_pos=m.IK_POS_WEIGHT,
weight_quat=m.IK_ORN_WEIGHT,
max_iterations=m.IK_MAX_ITERATIONS,
initial_joint_pos=current_joint_pos,
)
else:
target_joint_pos = self.solver.solve(
target_pos=target_pos,
target_quat=target_quat,
tolerance_pos=m.IK_POS_TOLERANCE,
tolerance_quat=m.IK_ORN_TOLERANCE,
weight_pos=m.IK_POS_WEIGHT,
weight_quat=m.IK_ORN_WEIGHT,
max_iterations=m.IK_MAX_ITERATIONS,
)
if target_joint_pos is None:
# Print warning that we couldn't find a valid solution, and return the current joint configuration
# instead so that we execute a no-op control
if gm.DEBUG:
log.warning(f"Could not find valid IK configuration! Returning no-op control instead.")
target_joint_pos = current_joint_pos
# Optionally pass through smoothing filter for better stability
if self.control_filter is not None:
target_joint_pos = self.control_filter.estimate(target_joint_pos)
# Run super to reach desired position / velocity setpoint
return super().compute_control(goal_dict=dict(target=target_joint_pos), control_dict=control_dict)
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
return dict(
target_pos=np.array(control_dict[f"{self.task_name}_pos_relative"]),
target_quat=np.array(control_dict[f"{self.task_name}_quat_relative"]),
)
def _get_goal_shapes(self):
return dict(
target_pos=(3,),
target_quat=(4,),
)
@property
def command_dim(self):
return IK_MODE_COMMAND_DIMS[self.mode]
| 18,891 | Python | 49.784946 | 158 | 0.616802 |
StanfordVL/OmniGibson/omnigibson/controllers/osc_controller.py | import numpy as np
from numba import jit
import omnigibson.utils.transform_utils as T
from omnigibson.controllers import ControlType, ManipulationController
from omnigibson.utils.control_utils import orientation_error
from omnigibson.utils.processing_utils import MovingAverageFilter
from omnigibson.utils.python_utils import nums2array, assert_valid_key
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Different modes
OSC_MODE_COMMAND_DIMS = {
"absolute_pose": 6, # 6DOF (x,y,z,ax,ay,az) control of pose, whether both position and orientation is given in absolute coordinates
"pose_absolute_ori": 6, # 6DOF (dx,dy,dz,ax,ay,az) control over pose, where the orientation is given in absolute axis-angle coordinates
"pose_delta_ori": 6, # 6DOF (dx,dy,dz,dax,day,daz) control over pose
"position_fixed_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands being kept as fixed initial absolute orientation
"position_compliant_ori": 3, # 3DOF (dx,dy,dz) control over position, with orientation commands automatically being sent as 0s (so can drift over time)
}
OSC_MODES = set(OSC_MODE_COMMAND_DIMS.keys())
class OperationalSpaceController(ManipulationController):
"""
Controller class to convert (delta or absolute) EEF commands into joint efforts using Operational Space Control
This controller expects 6DOF delta commands (dx, dy, dz, dax, day, daz), where the delta orientation
commands are in axis-angle form, and outputs low-level torque commands.
Gains may also be considered part of the action space as well. In this case, the action space would be:
(
dx, dy, dz, dax, day, daz <-- 6DOF delta eef commands
[, kpx, kpy, kpz, kpax, kpay, kpaz] <-- kp gains
[, drx dry, drz, drax, dray, draz] <-- damping ratio gains
[, kpnx, kpny, kpnz, kpnax, kpnay, kpnaz] <-- kp null gains
)
Note that in this case, we ASSUME that the inputted gains are normalized to be in the range [-1, 1], and will
be mapped appropriately to their respective ranges, as defined by XX_limits
Alternatively, parameters (in this case, kp or damping_ratio) can either be set during initialization or provided
from an external source; if the latter, the control_dict should include the respective parameter(s) as
a part of its keys
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Run OSC to back out joint efforts for a desired task frame command
3. Clips the resulting command by the motor (effort) limits
"""
def __init__(
self,
task_name,
control_freq,
reset_joint_pos,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits=((-0.2, -0.2, -0.2, -0.5, -0.5, -0.5), (0.2, 0.2, 0.2, 0.5, 0.5, 0.5)),
kp=150.0,
kp_limits=(10.0, 300.),
damping_ratio=1.0,
damping_ratio_limits=(0.0, 2.0),
kp_null=10.0,
kp_null_limits=(0.0, 50.0),
mode="pose_delta_ori",
decouple_pos_ori=False,
workspace_pose_limiter=None,
):
"""
Args:
task_name (str): name assigned to this task frame for computing OSC control. During control calculations,
the inputted control_dict should include entries named <@task_name>_pos_relative and
<@task_name>_quat_relative. See self._command_to_control() for what these values should entail.
control_freq (int): controller loop frequency
reset_joint_pos (Array[float]): reset joint positions, used as part of nullspace controller in IK.
Note that this should correspond to ALL the joints; the exact indices will be extracted via @dof_idx
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
kp (None, int, float, or array): Gain values to apply to 6DOF error.
If None, will be variable (part of action space)
kp_limits (2-array): (min, max) values of kp
damping_ratio (None, int, float, or array): Damping ratio to apply to 6DOF error controller gain
If None, will be variable (part of action space)
damping_ratio_limits (2-array): (min, max) values of damping ratio
kp_null (None, int, float, or array): Gain applied when calculating null torques
If None, will be variable (part of action space)
kp_null_limits (2-array): (min, max) values of kp_null
mode (str): mode to use when computing IK. In all cases, position commands are 3DOF delta (dx,dy,dz)
cartesian values, relative to the robot base frame. Valid options are:
- "pose_absolute_ori": 6DOF (dx,dy,dz,ax,ay,az) control over pose,
where the orientation is given in absolute axis-angle coordinates
- "pose_delta_ori": 6DOF (dx,dy,dz,dax,day,daz) control over pose
- "position_fixed_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands being kept as fixed initial absolute orientation
- "position_compliant_ori": 3DOF (dx,dy,dz) control over position,
with orientation commands automatically being sent as 0s (so can drift over time)
decouple_pos_ori (bool): Whether to decouple position and orientation control or not
workspace_pose_limiter (None or function): if specified, callback method that should clip absolute
target (x,y,z) cartesian position and absolute quaternion orientation (x,y,z,w) to a specific workspace
range (i.e.: this can be unique to each robot, and implemented by each embodiment).
Function signature should be:
def limiter(target_pos: Array[float], target_quat: Array[float], control_dict: Dict[str, Any]) --> Tuple[Array[float], Array[float]]
where target_pos is (x,y,z) cartesian position values, target_quat is (x,y,z,w) quarternion orientation
values, and the returned tuple is the processed (pos, quat) command.
"""
# Store arguments
control_dim = len(dof_idx)
# Store gains
self.kp = nums2array(nums=kp, dim=6, dtype=np.float32) if kp is not None else None
self.damping_ratio = damping_ratio
self.kp_null = nums2array(nums=kp_null, dim=control_dim, dtype=np.float32) if kp_null is not None else None
self.kd_null = 2 * np.sqrt(self.kp_null) if kp_null is not None else None # critically damped
self.kp_limits = np.array(kp_limits, dtype=np.float32)
self.damping_ratio_limits = np.array(damping_ratio_limits, dtype=np.float32)
self.kp_null_limits = np.array(kp_null_limits, dtype=np.float32)
# Store settings for whether we're learning gains or not
self.variable_kp = self.kp is None
self.variable_damping_ratio = self.damping_ratio is None
self.variable_kp_null = self.kp_null is None
# TODO: Add support for variable gains -- for now, just raise an error
assert True not in {self.variable_kp, self.variable_damping_ratio, self.variable_kp_null}, \
"Variable gains with OSC is not supported yet!"
# If the mode is set as absolute orientation and using default config,
# change input and output limits accordingly.
# By default, the input limits are set as 1, so we modify this to have a correct range.
# The output orientation limits are also set to be values assuming delta commands, so those are updated too
assert_valid_key(key=mode, valid_keys=OSC_MODES, name="OSC mode")
self.mode = mode
if self.mode == "pose_absolute_ori":
if command_input_limits is not None:
if command_input_limits == "default":
command_input_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_input_limits[0][3:] = -np.pi
command_input_limits[1][3:] = np.pi
if command_output_limits is not None:
if command_output_limits == "default":
command_output_limits = [
[-1.0, -1.0, -1.0, -np.pi, -np.pi, -np.pi],
[1.0, 1.0, 1.0, np.pi, np.pi, np.pi],
]
else:
command_output_limits[0][3:] = -np.pi
command_output_limits[1][3:] = np.pi
is_input_limits_numeric = not (command_input_limits is None or isinstance(command_input_limits, str))
is_output_limits_numeric = not (command_output_limits is None or isinstance(command_output_limits, str))
command_input_limits = [nums2array(lim, dim=6, dtype=np.float32) for lim in command_input_limits] if is_input_limits_numeric else command_input_limits
command_output_limits = [nums2array(lim, dim=6, dtype=np.float32) for lim in command_output_limits] if is_output_limits_numeric else command_output_limits
# Modify input / output scaling based on whether we expect gains to be part of the action space
self._command_dim = OSC_MODE_COMMAND_DIMS[self.mode]
for variable_gain, gain_limits, dim in zip(
(self.variable_kp, self.variable_damping_ratio, self.variable_kp_null),
(self.kp_limits, self.damping_ratio_limits, self.kp_null_limits),
(6, 6, control_dim),
):
if variable_gain:
# Add this to input / output limits
if is_input_limits_numeric:
command_input_limits = [np.concatenate([lim, nums2array(nums=val, dim=dim, dtype=np.float32)]) for lim, val in zip(command_input_limits, (-1, 1))]
if is_output_limits_numeric:
command_output_limits = [np.concatenate([lim, nums2array(nums=val, dim=dim, dtype=np.float32)]) for lim, val in zip(command_output_limits, gain_limits)]
# Update command dim
self._command_dim += dim
# Other values
self.decouple_pos_ori = decouple_pos_ori
self.workspace_pose_limiter = workspace_pose_limiter
self.task_name = task_name
self.reset_joint_pos = reset_joint_pos[dof_idx].astype(np.float32)
# Other variables that will be filled in at runtime
self._fixed_quat_target = None
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Call super first
super().reset()
# Clear internal variables
self._fixed_quat_target = None
self._clear_variable_gains()
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# If self._goal is populated, then set fixed_quat_target as well if the mode uses it
if self.mode == "position_fixed_ori" and self._goal is not None:
self._fixed_quat_target = self._goal["target_quat"]
def _clear_variable_gains(self):
"""
Helper function to clear any gains that are variable and considered part of actions
"""
if self.variable_kp:
self.kp = None
if self.variable_damping_ratio:
self.damping_ratio = None
if self.variable_kp_null:
self.kp_null = None
self.kd_null = None
def _update_variable_gains(self, gains):
"""
Helper function to update any gains that are variable and considered part of actions
Args:
gains (n-array): array where n dim is parsed based on which gains are being learned
"""
idx = 0
if self.variable_kp:
self.kp = gains[:, idx:idx + 6].astype(np.float32)
idx += 6
if self.variable_damping_ratio:
self.damping_ratio = gains[:, idx:idx + 6].astype(np.float32)
idx += 6
if self.variable_kp_null:
self.kp_null = gains[:, idx:idx + self.control_dim].astype(np.float32)
self.kd_null = 2 * np.sqrt(self.kp_null) # critically damped
idx += self.control_dim
def _update_goal(self, command, control_dict):
"""
Updates the internal goal (ee pos and ee ori mat) based on the inputted delta command
Args:
command (n-array): Preprocessed command
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_lin_vel_relative: (x,y,z) relative linear velocity of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_ang_vel_relative: (ax, ay, az) relative angular velocity of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
"""
# Grab important info from control dict
pos_relative = np.array(control_dict[f"{self.task_name}_pos_relative"])
quat_relative = np.array(control_dict[f"{self.task_name}_quat_relative"])
# Convert position command to absolute values if needed
if self.mode == "absolute_pose":
target_pos = command[:3]
else:
dpos = command[:3]
target_pos = pos_relative + dpos
# Compute orientation
if self.mode == "position_fixed_ori":
# We need to grab the current robot orientation as the commanded orientation if there is none saved
if self._fixed_quat_target is None:
self._fixed_quat_target = quat_relative.astype(np.float32) \
if (self._goal is None) else self._goal["target_quat"]
target_quat = self._fixed_quat_target
elif self.mode == "position_compliant_ori":
# Target quat is simply the current robot orientation
target_quat = quat_relative
elif self.mode == "pose_absolute_ori" or self.mode == "absolute_pose":
# Received "delta" ori is in fact the desired absolute orientation
target_quat = T.axisangle2quat(command[3:6])
else: # pose_delta_ori control
# Grab dori and compute target ori
dori = T.quat2mat(T.axisangle2quat(command[3:6]))
target_quat = T.mat2quat(dori @ T.quat2mat(quat_relative))
# Possibly limit to workspace if specified
if self.workspace_pose_limiter is not None:
target_pos, target_quat = self.workspace_pose_limiter(target_pos, target_quat, control_dict)
gains = None # TODO! command[OSC_MODE_COMMAND_DIMS[self.mode]:]
if gains is not None:
self._update_variable_gains(gains=gains)
# Set goals and return
return dict(
target_pos=target_pos.astype(np.float32),
target_ori_mat=T.quat2mat(target_quat).astype(np.float32),
)
def compute_control(self, goal_dict, control_dict):
"""
Computes low-level torque controls using internal eef goal pos / ori.
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target_pos: robot-frame (x,y,z) desired end effector position
target_quat: robot-frame (x,y,z,w) desired end effector quaternion orientation
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
mass_matrix: (N_dof, N_dof) Current mass matrix
<@self.task_name>_jacobian_relative: (6, N_dof) Current jacobian matrix for desired task frame
<@self.task_name>_pos_relative: (x,y,z) relative cartesian position of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_quat_relative: (x,y,z,w) relative quaternion orientation of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_lin_vel_relative: (x,y,z) relative linear velocity of the desired task frame to
control, computed in its local frame (e.g.: robot base frame)
<@self.task_name>_ang_vel_relative: (ax, ay, az) relative angular velocity of the desired task
frame to control, computed in its local frame (e.g.: robot base frame)
control_dict (dict): Dictionary of state tensors including relevant info for controller computation
Returns:
n-array: low-level effort control actions, NOT post-processed
"""
# TODO: Update to possibly grab parameters from dict
# For now, always use internal values
kp = self.kp
damping_ratio = self.damping_ratio
kd = 2 * np.sqrt(kp) * damping_ratio
# Extract relevant values from the control dict
dof_idxs_mat = tuple(np.meshgrid(self.dof_idx, self.dof_idx))
q = control_dict["joint_position"][self.dof_idx]
qd = control_dict["joint_velocity"][self.dof_idx]
mm = control_dict["mass_matrix"][dof_idxs_mat]
j_eef = control_dict[f"{self.task_name}_jacobian_relative"][:, self.dof_idx]
ee_pos = control_dict[f"{self.task_name}_pos_relative"]
ee_quat = control_dict[f"{self.task_name}_quat_relative"]
ee_vel = np.concatenate([control_dict[f"{self.task_name}_lin_vel_relative"], control_dict[f"{self.task_name}_ang_vel_relative"]])
base_lin_vel = control_dict["root_rel_lin_vel"]
base_ang_vel = control_dict["root_rel_ang_vel"]
# Calculate torques
u = _compute_osc_torques(
q=q,
qd=qd,
mm=mm,
j_eef=j_eef,
ee_pos=ee_pos.astype(np.float32),
ee_mat=T.quat2mat(ee_quat).astype(np.float32),
ee_vel=ee_vel.astype(np.float32),
goal_pos=goal_dict["target_pos"],
goal_ori_mat=goal_dict["target_ori_mat"],
kp=kp,
kd=kd,
kp_null=self.kp_null,
kd_null=self.kd_null,
rest_qpos=self.reset_joint_pos,
control_dim=self.control_dim,
decouple_pos_ori=self.decouple_pos_ori,
base_lin_vel=base_lin_vel.astype(np.float32),
base_ang_vel=base_ang_vel.astype(np.float32),
).flatten()
# Apply gravity compensation from the control dict
u += control_dict["gravity_force"][self.dof_idx] + control_dict["cc_force"][self.dof_idx]
# Return the control torques
return u
def compute_no_op_goal(self, control_dict):
# No-op is maintaining current pose
target_pos = np.array(control_dict[f"{self.task_name}_pos_relative"])
target_quat = np.array(control_dict[f"{self.task_name}_quat_relative"])
# Convert quat into eef ori mat
return dict(
target_pos=target_pos.astype(np.float32),
target_ori_mat=T.quat2mat(target_quat).astype(np.float32),
)
def _get_goal_shapes(self):
return dict(
target_pos=(3,),
target_ori_mat=(3, 3),
)
@property
def control_type(self):
return ControlType.EFFORT
@property
def command_dim(self):
return self._command_dim
# Use numba since faster
@jit(nopython=True)
def _compute_osc_torques(
q,
qd,
mm,
j_eef,
ee_pos,
ee_mat,
ee_vel,
goal_pos,
goal_ori_mat,
kp,
kd,
kp_null,
kd_null,
rest_qpos,
control_dim,
decouple_pos_ori,
base_lin_vel,
base_ang_vel,
):
# Compute the inverse
mm_inv = np.linalg.inv(mm)
# Calculate error
pos_err = goal_pos - ee_pos
ori_err = orientation_error(goal_ori_mat, ee_mat).astype(np.float32)
err = np.concatenate((pos_err, ori_err))
# Vel target is the base velocity as experienced by the end effector
# For angular velocity, this is just the base angular velocity
# For linear velocity, this is the base linear velocity PLUS the net linear velocity experienced
# due to the base linear velocity
lin_vel_err = base_lin_vel + np.cross(base_ang_vel, ee_pos)
vel_err = np.concatenate((lin_vel_err, base_ang_vel)) - ee_vel
# Determine desired wrench
err = np.expand_dims(kp * err + kd * vel_err, axis=-1)
m_eef_inv = j_eef @ mm_inv @ j_eef.T
m_eef = np.linalg.inv(m_eef_inv)
if decouple_pos_ori:
# # More efficient, but numba doesn't support 3D tensor operations yet
# j_eef_batch = j_eef.reshape(2, 3, -1)
# m_eef_pose_inv = np.matmul(np.matmul(j_eef_batch, np.expand_dims(mm_inv, axis=0)), np.transpose(j_eef_batch, (0, 2, 1)))
# m_eef_pose = np.linalg.inv(m_eef_pose_inv) # Shape (2, 3, 3)
# wrench = np.matmul(m_eef_pose, err.reshape(2, 3, 1)).flatten()
m_eef_pos_inv = j_eef[:3, :] @ mm_inv @ j_eef[:3, :].T
m_eef_ori_inv = j_eef[3:, :] @ mm_inv @ j_eef[3:, :].T
m_eef_pos = np.linalg.inv(m_eef_pos_inv)
m_eef_ori = np.linalg.inv(m_eef_ori_inv)
wrench_pos = m_eef_pos @ err[:3, :]
wrench_ori = m_eef_ori @ err[3:, :]
wrench = np.concatenate((wrench_pos, wrench_ori))
else:
wrench = m_eef @ err
# Compute OSC torques
u = j_eef.T @ wrench
# Nullspace control torques `u_null` prevents large changes in joint configuration
# They are added into the nullspace of OSC so that the end effector orientation remains constant
# roboticsproceedings.org/rss07/p31.pdf
if rest_qpos is not None:
j_eef_inv = m_eef @ j_eef @ mm_inv
u_null = kd_null * -qd + kp_null * ((rest_qpos - q + np.pi) % (2 * np.pi) - np.pi)
u_null = mm @ np.expand_dims(u_null, axis=-1).astype(np.float32)
u += (np.eye(control_dim, dtype=np.float32) - j_eef.T @ j_eef_inv) @ u_null
return u
| 25,020 | Python | 49.649797 | 172 | 0.611791 |
StanfordVL/OmniGibson/omnigibson/controllers/null_joint_controller.py | import numpy as np
from omnigibson.controllers import JointController
class NullJointController(JointController):
"""
Dummy Controller class for a null-type of joint control (i.e.: no control or constant pass-through control).
This class has a zero-size command space, and returns either an empty array for control if dof_idx is None
else constant values as specified by @default_command (if not specified, uses zeros)
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
default_command=None,
kp=None,
damping_ratio=None,
use_impedances=False,
):
"""
Args:
control_freq (int): controller loop frequency
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
default_command (None or n-array): if specified, should be the same length as @dof_idx, specifying
the default control for this controller to output
kp (None or float): If @motor_type is "position" or "velocity" and @use_impedances=True, this is the
proportional gain applied to the joint controller. If None, a default value will be used.
damping_ratio (None or float): If @motor_type is "position" and @use_impedances=True, this is the
damping ratio applied to the joint controller. If None, a default value will be used.
use_impedances (bool): If True, will use impedances via the mass matrix to modify the desired efforts
applied
"""
# Store values
self._default_command = np.zeros(len(dof_idx)) if default_command is None else np.array(default_command)
# Run super init
super().__init__(
control_freq=control_freq,
motor_type=motor_type,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
kp=kp,
damping_ratio=damping_ratio,
use_impedances=use_impedances,
use_delta_commands=False,
)
def compute_no_op_goal(self, control_dict):
# Set the goal to be internal stored default value
return dict(target=self._default_command)
def _preprocess_command(self, command):
# Override super and force the processed command to be internal stored default value
return np.array(self._default_command)
def update_default_goal(self, target):
"""
Updates the internal default command value.
Args:
target (n-array): New default command values to set for this controller.
Should be of dimension @command_dim
"""
assert len(target) == self.control_dim, \
f"Default control must be length: {self.control_dim}, got length: {len(target)}"
self._default_command = np.array(target)
| 4,495 | Python | 46.829787 | 116 | 0.625362 |
StanfordVL/OmniGibson/omnigibson/controllers/__init__.py | from omnigibson.controllers.controller_base import (
REGISTERED_CONTROLLERS,
REGISTERED_LOCOMOTION_CONTROLLERS,
REGISTERED_MANIPULATION_CONTROLLERS,
IsGraspingState,
ControlType,
LocomotionController,
ManipulationController,
GripperController,
)
from omnigibson.controllers.dd_controller import DifferentialDriveController
from omnigibson.controllers.osc_controller import OperationalSpaceController
from omnigibson.controllers.ik_controller import InverseKinematicsController
from omnigibson.controllers.joint_controller import JointController
from omnigibson.controllers.multi_finger_gripper_controller import MultiFingerGripperController
from omnigibson.controllers.null_joint_controller import NullJointController
from omnigibson.utils.python_utils import assert_valid_key
def create_controller(name, **kwargs):
"""
Creates a controller of type @name with corresponding necessary keyword arguments @kwargs
Args:
name (str): type of controller to use (e.g. JointController, InverseKinematicsController, etc.)
**kwargs: Any relevant keyword arguments to pass to the controller
Returns:
Controller: created controller
"""
assert_valid_key(key=name, valid_keys=REGISTERED_CONTROLLERS, name="controller")
controller_cls = REGISTERED_CONTROLLERS[name]
return controller_cls(**kwargs)
| 1,375 | Python | 38.314285 | 103 | 0.794182 |
StanfordVL/OmniGibson/omnigibson/controllers/dd_controller.py | import numpy as np
from omnigibson.controllers import ControlType, LocomotionController
class DifferentialDriveController(LocomotionController):
"""
Differential drive (DD) controller for controlling two independently controlled wheeled joints.
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2. Convert desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control signals
3. Clips the resulting command by the joint velocity limits
"""
def __init__(
self,
wheel_radius,
wheel_axle_length,
control_freq,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
):
"""
Args:
wheel_radius (float): radius of the wheels (both assumed to be same radius)
wheel_axle_length (float): perpendicular distance between the two wheels
control_freq (int): controller loop frequency
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the maximum linear and angular velocities calculated from @wheel_radius, @wheel_axle_length, and
@control_limits velocity limits entry
"""
# Store internal variables
self._wheel_radius = wheel_radius
self._wheel_axle_halflength = wheel_axle_length / 2.0
# If we're using default command output limits, map this to maximum linear / angular velocities
if command_output_limits == "default":
min_vels = control_limits["velocity"][0][dof_idx]
assert (
min_vels[0] == min_vels[1]
), "Differential drive requires both wheel joints to have same min velocities!"
max_vels = control_limits["velocity"][1][dof_idx]
assert (
max_vels[0] == max_vels[1]
), "Differential drive requires both wheel joints to have same max velocities!"
assert abs(min_vels[0]) == abs(
max_vels[0]
), "Differential drive requires both wheel joints to have same min and max absolute velocities!"
max_lin_vel = max_vels[0] * wheel_radius
max_ang_vel = max_lin_vel * 2.0 / wheel_axle_length
command_output_limits = ((-max_lin_vel, -max_ang_vel), (max_lin_vel, max_ang_vel))
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def _update_goal(self, command, control_dict):
# Directly store command as the velocity goal
return dict(vel=command)
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) joint control signal.
This processes converts the desired (lin_vel, ang_vel) command into (left, right) wheel joint velocity control
signals.
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
vel: desired (lin_vel, ang_vel) of the controlled body
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
Returns:
Array[float]: outputted (non-clipped!) velocity control signal to deploy
to the [left, right] wheel joints
"""
lin_vel, ang_vel = goal_dict["vel"]
# Convert to wheel velocities
left_wheel_joint_vel = (lin_vel - ang_vel * self._wheel_axle_halflength) / self._wheel_radius
right_wheel_joint_vel = (lin_vel + ang_vel * self._wheel_axle_halflength) / self._wheel_radius
# Return desired velocities
return np.array([left_wheel_joint_vel, right_wheel_joint_vel])
def compute_no_op_goal(self, control_dict):
# This is zero-vector, since we want zero linear / angular velocity
return dict(vel=np.zeros(2))
def _get_goal_shapes(self):
# Add (2, )-array representing linear, angular velocity
return dict(vel=(2,))
@property
def control_type(self):
return ControlType.VELOCITY
@property
def command_dim(self):
# [lin_vel, ang_vel]
return 2
| 6,042 | Python | 46.210937 | 118 | 0.625455 |
StanfordVL/OmniGibson/omnigibson/controllers/multi_finger_gripper_controller.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.controllers import IsGraspingState, ControlType, GripperController
from omnigibson.utils.python_utils import assert_valid_key
VALID_MODES = {
"binary",
"smooth",
"independent",
}
# Create settings for this module
m = create_module_macros(module_path=__file__)
# is_grasping heuristics parameters
m.POS_TOLERANCE = 0.002 # arbitrary heuristic
m.VEL_TOLERANCE = 0.01 # arbitrary heuristic
class MultiFingerGripperController(GripperController):
"""
Controller class for multi finger gripper control. This either interprets an input as a binary
command (open / close), continuous command (open / close with scaled velocities), or per-joint continuous command
Each controller step consists of the following:
1. Clip + Scale inputted command according to @command_input_limits and @command_output_limits
2a. Convert command into gripper joint control signals
2b. Clips the resulting control by the motor limits
"""
def __init__(
self,
control_freq,
motor_type,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
inverted=False,
mode="binary",
open_qpos=None,
closed_qpos=None,
limit_tolerance=0.001,
):
"""
Args:
control_freq (int): controller loop frequency
motor_type (str): type of motor being controlled, one of {position, velocity, effort}
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
inverted (bool): whether or not the command direction (grasp is negative) and the control direction are
inverted, e.g. to grasp you need to move the joint in the positive direction.
mode (str): mode for this controller. Valid options are:
"binary": 1D command, if preprocessed value > 0 is interpreted as an max open
(send max pos / vel / tor signal), otherwise send max close control signals
"smooth": 1D command, sends symmetric signal to both finger joints equal to the preprocessed commands
"independent": 2D command, sends independent signals to each finger joint equal to the preprocessed command
open_qpos (None or Array[float]): If specified, the joint positions representing a fully-opened gripper.
This is to allow representing the open state as a partially opened gripper, rather than the full
opened gripper. If None, will simply use the native joint limits of the gripper joints. Only relevant
if using @mode=binary and @motor_type=position
closed_qpos (None or Array[float]): If specified, the joint positions representing a fully-closed gripper.
This is to allow representing the closed state as a partially closed gripper, rather than the full
closed gripper. If None, will simply use the native joint limits of the gripper joints. Only relevant
if using @mode=binary and @motor_type=position
limit_tolerance (float): sets the tolerance from the joint limit ends, below which controls will be zeroed
out if the control is using velocity or torque control
"""
# Store arguments
assert_valid_key(key=motor_type.lower(), valid_keys=ControlType.VALID_TYPES_STR, name="motor_type")
self._motor_type = motor_type.lower()
assert_valid_key(key=mode, valid_keys=VALID_MODES, name="mode for multi finger gripper")
self._inverted = inverted
self._mode = mode
self._limit_tolerance = limit_tolerance
self._open_qpos = open_qpos if open_qpos is None else np.array(open_qpos)
self._closed_qpos = closed_qpos if closed_qpos is None else np.array(closed_qpos)
# Create other args to be filled in at runtime
self._is_grasping = IsGraspingState.FALSE
# If we're using binary signal, we override the command output limits
if mode == "binary":
command_output_limits = (-1.0, 1.0)
# Run super init
super().__init__(
control_freq=control_freq,
control_limits=control_limits,
dof_idx=dof_idx,
command_input_limits=command_input_limits,
command_output_limits=command_output_limits,
)
def reset(self):
# Call super first
super().reset()
# reset grasping state
self._is_grasping = IsGraspingState.FALSE
def _preprocess_command(self, command):
# We extend this method to make sure command is always 2D
if self._mode != "independent":
command = (
np.array([command] * self.command_dim)
if type(command) in {int, float}
else np.array([command[0]] * self.command_dim)
)
# Flip the command if the direction is inverted.
if self._inverted:
command = self._command_input_limits[1] - (command - self._command_input_limits[0])
# Return from super method
return super()._preprocess_command(command=command)
def _update_goal(self, command, control_dict):
# Directly store command as the goal
return dict(target=command)
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) gripper
joint control signal
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation. Must include the following keys:
target: desired gripper target
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
Returns:
Array[float]: outputted (non-clipped!) control signal to deploy
"""
target = goal_dict["target"]
joint_pos = control_dict["joint_position"][self.dof_idx]
# Choose what to do based on control mode
if self._mode == "binary":
# Use max control signal
if target[0] >= 0.0:
u = self._control_limits[ControlType.get_type(self._motor_type)][1][self.dof_idx] \
if self._open_qpos is None else self._open_qpos
else:
u = self._control_limits[ControlType.get_type(self._motor_type)][0][self.dof_idx] \
if self._closed_qpos is None else self._closed_qpos
else:
# Use continuous signal
u = target
# If we're near the joint limits and we're using velocity / torque control, we zero out the action
if self._motor_type in {"velocity", "torque"}:
violate_upper_limit = (
joint_pos > self._control_limits[ControlType.POSITION][1][self.dof_idx] - self._limit_tolerance
)
violate_lower_limit = (
joint_pos < self._control_limits[ControlType.POSITION][0][self.dof_idx] + self._limit_tolerance
)
violation = np.logical_or(violate_upper_limit * (u > 0), violate_lower_limit * (u < 0))
u *= ~violation
# Update whether we're grasping or not
self._update_grasping_state(control_dict=control_dict)
# Return control
return u
def _update_grasping_state(self, control_dict):
"""
Updates internal inferred grasping state of the gripper being controlled by this gripper controller
Args:
control_dict (dict): dictionary that should include any relevant keyword-mapped
states necessary for controller computation. Must include the following keys:
joint_position: Array of current joint positions
joint_velocity: Array of current joint velocities
"""
# Calculate grasping state based on mode of this controller
# Independent mode of MultiFingerGripperController does not have any good heuristics to determine is_grasping
if self._mode == "independent":
is_grasping = IsGraspingState.UNKNOWN
# No control has been issued before -- we assume not grasping
elif self._control is None:
is_grasping = IsGraspingState.FALSE
else:
assert np.all(
self._control == self._control[0]
), f"MultiFingerGripperController has different values in the command for non-independent mode: {self._control}"
assert m.POS_TOLERANCE > self._limit_tolerance, (
"Joint position tolerance for is_grasping heuristics checking is smaller than or equal to the "
"gripper controller's tolerance of zero-ing out velocities, which makes the heuristics invalid."
)
finger_pos = control_dict["joint_position"][self.dof_idx]
# For joint position control, if the desired positions are the same as the current positions, is_grasping unknown
if (
self._motor_type == "position"
and np.mean(np.abs(finger_pos - self._control)) < m.POS_TOLERANCE
):
is_grasping = IsGraspingState.UNKNOWN
# For joint velocity / torque control, if the desired velocities / torques are zeros, is_grasping unknown
elif (
self._motor_type in {"velocity", "torque"}
and np.mean(np.abs(self._control)) < m.VEL_TOLERANCE
):
is_grasping = IsGraspingState.UNKNOWN
# Otherwise, the last control signal intends to "move" the gripper
else:
finger_vel = control_dict["joint_velocity"][self.dof_idx]
min_pos = self._control_limits[ControlType.POSITION][0][self.dof_idx]
max_pos = self._control_limits[ControlType.POSITION][1][self.dof_idx]
# Make sure we don't have any invalid values (i.e.: fingers should be within the limits)
assert np.all(
(min_pos <= finger_pos) * (finger_pos <= max_pos)
), f"Got invalid finger joint positions when checking for grasp! " \
f"min: {min_pos}, max: {max_pos}, finger_pos: {finger_pos}"
# Check distance from both ends of the joint limits
dist_from_lower_limit = finger_pos - min_pos
dist_from_upper_limit = max_pos - finger_pos
# If the joint positions are not near the joint limits with some tolerance (m.POS_TOLERANCE)
valid_grasp_pos = (
np.mean(dist_from_lower_limit) > m.POS_TOLERANCE
and np.mean(dist_from_upper_limit) > m.POS_TOLERANCE
)
# And the joint velocities are close to zero with some tolerance (m.VEL_TOLERANCE)
valid_grasp_vel = np.all(np.abs(finger_vel) < m.VEL_TOLERANCE)
# Then the gripper is grasping something, which stops the gripper from reaching its desired state
is_grasping = (
IsGraspingState.TRUE if valid_grasp_pos and valid_grasp_vel else IsGraspingState.FALSE
)
# Store calculated state
self._is_grasping = is_grasping
def compute_no_op_goal(self, control_dict):
# Just use a zero vector
return dict(target=np.zeros(self.command_dim))
def _get_goal_shapes(self):
return dict(target=(self.command_dim,))
def is_grasping(self):
# Return cached value
return self._is_grasping
@property
def control_type(self):
return ControlType.get_type(type_str=self._motor_type)
@property
def command_dim(self):
return len(self.dof_idx) if self._mode == "independent" else 1
| 13,703 | Python | 46.255172 | 125 | 0.619353 |
StanfordVL/OmniGibson/omnigibson/controllers/controller_base.py | from collections.abc import Iterable
from enum import IntEnum
import numpy as np
from omnigibson.utils.python_utils import classproperty, assert_valid_key, Serializable, Registerable, Recreatable
# Global dicts that will contain mappings
REGISTERED_CONTROLLERS = dict()
REGISTERED_LOCOMOTION_CONTROLLERS = dict()
REGISTERED_MANIPULATION_CONTROLLERS = dict()
REGISTERED_GRIPPER_CONTROLLERS = dict()
def register_locomotion_controller(cls):
if cls.__name__ not in REGISTERED_LOCOMOTION_CONTROLLERS:
REGISTERED_LOCOMOTION_CONTROLLERS[cls.__name__] = cls
def register_manipulation_controller(cls):
if cls.__name__ not in REGISTERED_MANIPULATION_CONTROLLERS:
REGISTERED_MANIPULATION_CONTROLLERS[cls.__name__] = cls
def register_gripper_controller(cls):
if cls.__name__ not in REGISTERED_GRIPPER_CONTROLLERS:
REGISTERED_GRIPPER_CONTROLLERS[cls.__name__] = cls
class IsGraspingState(IntEnum):
TRUE = 1
UNKNOWN = 0
FALSE = -1
# Define macros
class ControlType:
NONE = -1
POSITION = 0
VELOCITY = 1
EFFORT = 2
_MAPPING = {
"none": NONE,
"position": POSITION,
"velocity": VELOCITY,
"effort": EFFORT,
}
VALID_TYPES = set(_MAPPING.values())
VALID_TYPES_STR = set(_MAPPING.keys())
@classmethod
def get_type(cls, type_str):
"""
Args:
type_str (str): One of "position", "velocity", or "effort" (any case), and maps it
to the corresponding type
Returns:
ControlType: control type corresponding to the associated string
"""
assert_valid_key(key=type_str.lower(), valid_keys=cls._MAPPING, name="control type")
return cls._MAPPING[type_str.lower()]
class BaseController(Serializable, Registerable, Recreatable):
"""
An abstract class with interface for mapping specific types of commands to deployable control signals.
"""
def __init__(
self,
control_freq,
control_limits,
dof_idx,
command_input_limits="default",
command_output_limits="default",
):
"""
Args:
control_freq (int): controller loop frequency
control_limits (Dict[str, Tuple[Array[float], Array[float]]]): The min/max limits to the outputted
control signal. Should specify per-dof type limits, i.e.:
"position": [[min], [max]]
"velocity": [[min], [max]]
"effort": [[min], [max]]
"has_limit": [...bool...]
Values outside of this range will be clipped, if the corresponding joint index in has_limit is True.
dof_idx (Array[int]): specific dof indices controlled by this robot. Used for inferring
controller-relevant values during control computations
command_input_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max acceptable inputted command. Values outside this range will be clipped.
If None, no clipping will be used. If "default", range will be set to (-1, 1)
command_output_limits (None or "default" or Tuple[float, float] or Tuple[Array[float], Array[float]]):
if set, is the min/max scaled command. If both this value and @command_input_limits is not None,
then all inputted command values will be scaled from the input range to the output range.
If either is None, no scaling will be used. If "default", then this range will automatically be set
to the @control_limits entry corresponding to self.control_type
"""
# Store arguments
self._control_freq = control_freq
self._control_limits = {}
for motor_type in {"position", "velocity", "effort"}:
if motor_type not in control_limits:
continue
self._control_limits[ControlType.get_type(motor_type)] = [
np.array(control_limits[motor_type][0]),
np.array(control_limits[motor_type][1]),
]
assert "has_limit" in control_limits, "Expected has_limit specified in control_limits, but does not exist."
self._dof_has_limits = control_limits["has_limit"]
self._dof_idx = np.array(dof_idx, dtype=int)
# Generate goal information
self._goal_shapes = self._get_goal_shapes()
self._goal_dim = int(np.sum([np.product(shape) for shape in self._goal_shapes.values()]))
# Initialize some other variables that will be filled in during runtime
self._control = None
self._goal = None
self._command_scale_factor = None
self._command_output_transform = None
self._command_input_transform = None
# Standardize command input / output limits to be (min_array, max_array)
command_input_limits = (-1.0, 1.0) if command_input_limits == "default" else command_input_limits
command_output_limits = (
(
np.array(self._control_limits[self.control_type][0])[self.dof_idx],
np.array(self._control_limits[self.control_type][1])[self.dof_idx],
)
if command_output_limits == "default"
else command_output_limits
)
self._command_input_limits = (
None
if command_input_limits is None
else (
self.nums2array(command_input_limits[0], self.command_dim),
self.nums2array(command_input_limits[1], self.command_dim),
)
)
self._command_output_limits = (
None
if command_output_limits is None
else (
self.nums2array(command_output_limits[0], self.command_dim),
self.nums2array(command_output_limits[1], self.command_dim),
)
)
def _preprocess_command(self, command):
"""
Clips + scales inputted @command according to self.command_input_limits and self.command_output_limits.
If self.command_input_limits is None, then no clipping will occur. If either self.command_input_limits
or self.command_output_limits is None, then no scaling will occur.
Args:
command (Array[float] or float): Inputted command vector
Returns:
Array[float]: Processed command vector
"""
# Make sure command is a np.array
command = np.array([command]) if type(command) in {int, float} else np.array(command)
# We only clip and / or scale if self.command_input_limits exists
if self._command_input_limits is not None:
# Clip
command = command.clip(*self._command_input_limits)
if self._command_output_limits is not None:
# If we haven't calculated how to scale the command, do that now (once)
if self._command_scale_factor is None:
self._command_scale_factor = abs(
self._command_output_limits[1] - self._command_output_limits[0]
) / abs(self._command_input_limits[1] - self._command_input_limits[0])
self._command_output_transform = (
self._command_output_limits[1] + self._command_output_limits[0]
) / 2.0
self._command_input_transform = (self._command_input_limits[1] + self._command_input_limits[0]) / 2.0
# Scale command
command = (
command - self._command_input_transform
) * self._command_scale_factor + self._command_output_transform
# Return processed command
return command
def update_goal(self, command, control_dict):
"""
Updates inputted @command internally, writing any necessary internal variables as needed.
Args:
command (Array[float]): inputted command to preprocess and extract relevant goal(s) to store
internally in this controller
control_dict (dict): Current state
"""
# Sanity check the command
assert len(command) == self.command_dim, \
f"Commands must be dimension {self.command_dim}, got dim {len(command)} instead."
# Preprocess and run internal command
self._goal = self._update_goal(command=self._preprocess_command(np.array(command)), control_dict=control_dict)
def _update_goal(self, command, control_dict):
"""
Updates inputted @command internally, writing any necessary internal variables as needed.
Args:
command (Array[float]): inputted (preprocessed!) command and extract relevant goal(s) to store
internally in this controller
control_dict (dict): Current state
Returns:
dict: Keyword-mapped goals to store internally in this controller
"""
raise NotImplementedError
def compute_control(self, goal_dict, control_dict):
"""
Converts the (already preprocessed) inputted @command into deployable (non-clipped!) control signal.
Should be implemented by subclass.
Args:
goal_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
goals necessary for controller computation
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation
Returns:
Array[float]: outputted (non-clipped!) control signal to deploy
"""
raise NotImplementedError
def clip_control(self, control):
"""
Clips the inputted @control signal based on @control_limits.
Args:
control (Array[float]): control signal to clip
Returns:
Array[float]: Clipped control signal
"""
clipped_control = control.clip(
self._control_limits[self.control_type][0][self.dof_idx],
self._control_limits[self.control_type][1][self.dof_idx],
)
idx = (
self._dof_has_limits[self.dof_idx]
if self.control_type == ControlType.POSITION
else [True] * self.control_dim
)
control[idx] = clipped_control[idx]
return control
def step(self, control_dict):
"""
Take a controller step.
Args:
control_dict (Dict[str, Any]): dictionary that should include any relevant keyword-mapped
states necessary for controller computation
Returns:
Array[float]: numpy array of outputted control signals
"""
# Generate no-op goal if not specified
if self._goal is None:
self._goal = self.compute_no_op_goal(control_dict=control_dict)
# Compute control, then clip and return
control = self.compute_control(goal_dict=self._goal, control_dict=control_dict)
self._control = self.clip_control(control=control)
return self._control
def reset(self):
"""
Resets this controller. Can be extended by subclass
"""
self._goal = None
def compute_no_op_goal(self, control_dict):
"""
Compute no-op goal given the current state @control_dict
Args:
control_dict (dict): Current state
Returns:
dict: Maps relevant goal keys (from self._goal_shapes.keys()) to relevant goal data to be used
in controller computations
"""
raise NotImplementedError
def _dump_state(self):
# Default is just the command
return dict(
goal_is_valid=self._goal is not None,
goal=self._goal,
)
def _load_state(self, state):
# Make sure every entry in goal is a numpy array
# Load goal
self._goal = None if state["goal"] is None else {name: np.array(goal_state) for name, goal_state in state["goal"].items()}
def _serialize(self, state):
# Make sure size of the state is consistent, even if we have no goal
goal_state_flattened = np.concatenate([goal_state.flatten() for goal_state in self._goal.values()]) if (
state)["goal_is_valid"] else np.zeros(self.goal_dim)
return np.concatenate([
[state["goal_is_valid"]],
goal_state_flattened,
])
def _deserialize(self, state):
goal_is_valid = bool(state[0])
if goal_is_valid:
# Un-flatten all the keys
idx = 1
goal = dict()
for key, shape in self._goal_shapes.items():
length = np.product(shape)
goal[key] = state[idx:idx+length].reshape(shape)
idx += length
else:
goal = None
state_dict = dict(
goal_is_valid=goal_is_valid,
goal=goal,
)
return state_dict, self.goal_dim + 1
def _get_goal_shapes(self):
"""
Returns:
dict: Maps keyword in @self.goal to its corresponding numerical shape. This should be static
and analytically computed prior to any controller steps being taken
"""
raise NotImplementedError
@staticmethod
def nums2array(nums, dim):
"""
Convert input @nums into numpy array of length @dim. If @nums is a single number, broadcasts it to the
corresponding dimension size @dim before converting into a numpy array
Args:
nums (numeric or Iterable): Either single value or array of numbers
dim (int): Size of array to broadcast input to
Returns:
np.array: Array filled with values specified in @nums
"""
# First run sanity check to make sure no strings are being inputted
if isinstance(nums, str):
raise TypeError("Error: Only numeric inputs are supported for this function, nums2array!")
# Check if input is an Iterable, if so, we simply convert the input to np.array and return
# Else, input is a single value, so we map to a numpy array of correct size and return
return np.array(nums) if isinstance(nums, Iterable) else np.ones(dim) * nums
@property
def state_size(self):
# Default is goal dim + 1 (for whether the goal is valid or not)
return self.goal_dim + 1
@property
def goal(self):
"""
Returns:
dict: Current goal for this controller. Maps relevant goal keys to goal values to be
used during controller step computations
"""
return self._goal
@property
def goal_dim(self):
"""
Returns:
int: Expected size of flattened, internal goals
"""
return self._goal_dim
@property
def control(self):
"""
Returns:
n-array: Array of most recent controls deployed by this controller
"""
return self._control
@property
def control_freq(self):
"""
Returns:
float: Control frequency (Hz) of this controller
"""
return self._control_freq
@property
def control_dim(self):
"""
Returns:
int: Expected size of outputted controls
"""
return len(self.dof_idx)
@property
def control_type(self):
"""
Returns:
ControlType: Type of control returned by this controller
"""
raise NotImplementedError
@property
def command_input_limits(self):
"""
Returns:
None or 2-tuple: If specified, returns (min, max) command input limits for this controller, where
@min and @max are numpy float arrays of length self.command_dim. Otherwise, returns None
"""
return self._command_input_limits
@property
def command_output_limits(self):
"""
Returns:
None or 2-tuple: If specified, returns (min, max) command output limits for this controller, where
@min and @max are numpy float arrays of length self.command_dim. Otherwise, returns None
"""
return self._command_output_limits
@property
def command_dim(self):
"""
Returns:
int: Expected size of inputted commands
"""
raise NotImplementedError
@property
def dof_idx(self):
"""
Returns:
Array[int]: DOF indices corresponding to the specific DOFs being controlled by this robot
"""
return np.array(self._dof_idx)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseController")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_CONTROLLERS
return REGISTERED_CONTROLLERS
class LocomotionController(BaseController):
"""
Controller to control locomotion. All implemented controllers that encompass locomotion capabilities should extend
from this class.
"""
def __init_subclass__(cls, **kwargs):
# Register as part of locomotion controllers
super().__init_subclass__(**kwargs)
register_locomotion_controller(cls)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("LocomotionController")
return classes
class ManipulationController(BaseController):
"""
Controller to control manipulation. All implemented controllers that encompass manipulation capabilities
should extend from this class.
"""
def __init_subclass__(cls, **kwargs):
# Register as part of manipulation controllers
super().__init_subclass__(**kwargs)
register_manipulation_controller(cls)
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("ManipulationController")
return classes
class GripperController(BaseController):
"""
Controller to control a gripper. All implemented controllers that encompass gripper capabilities
should extend from this class.
"""
def __init_subclass__(cls, **kwargs):
# Register as part of gripper controllers
super().__init_subclass__(**kwargs)
register_gripper_controller(cls)
def is_grasping(self):
"""
Checks whether the current state of this gripper being controlled is in a grasping state.
Should be implemented by subclass.
Returns:
IsGraspingState: Grasping state of gripper
"""
raise NotImplementedError()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("GripperController")
return classes
| 19,354 | Python | 35.313321 | 130 | 0.608195 |
StanfordVL/OmniGibson/omnigibson/systems/macro_particle_system.py | import os
import matplotlib.pyplot as plt
import trimesh
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.systems.system_base import BaseSystem, VisualParticleSystem, PhysicalParticleSystem, REGISTERED_SYSTEMS
from omnigibson.utils.constants import PrimType
from omnigibson.utils.python_utils import classproperty, subclass_factory, snake_case_to_camel_case
from omnigibson.utils.sampling_utils import sample_cuboid_on_object_symmetric_bimodal_distribution
import omnigibson.utils.transform_utils as T
from omnigibson.utils.usd_utils import FlatcacheAPI
from omnigibson.prims.geom_prim import VisualGeomPrim, CollisionVisualGeomPrim
import numpy as np
from scipy.spatial.transform import Rotation as R
from omnigibson.utils.ui_utils import create_module_logger, suppress_omni_log
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.MIN_PARTICLE_RADIUS = 0.01 # Minimum particle radius for physical macro particles -- this reduces the chance of omni physx crashing
class MacroParticleSystem(BaseSystem):
"""
Global system for modeling "macro" level particles, e.g.: dirt, dust, etc.
"""
# Template object to use -- class particle objet is assumed to be the first and only visual mesh belonging to the
# root link of this template object, which symbolizes a single particle, and will be duplicated to generate the
# particle system. Note that this object is NOT part of the actual particle system itself!
_particle_template = None
# dict, array of particle objects, mapped by their prim names
particles = None
# Counter to increment monotonically as we add more particles
_particle_counter = None
# Color associated with this system (NOTE: external queries should call cls.color)
_color = None
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Initialize mutable class variables so they don't automatically get overridden by children classes
cls.particles = dict()
cls._particle_counter = 0
# Create the system prim -- this is merely a scope prim
og.sim.stage.DefinePrim(f"/World/{cls.name}", "Scope")
# Load the particle template, and make it kinematic only because it's not interacting with anything
particle_template = cls._create_particle_template()
og.sim.import_object(obj=particle_template, register=False)
# Make sure template scaling is [1, 1, 1] -- any particle scaling should be done via cls.min/max_scale
assert np.all(particle_template.scale == 1.0)
# Make sure there is no ambiguity about which mesh to use as the particle from this template
assert len(particle_template.links) == 1, "MacroParticleSystem particle template has more than one link"
assert len(particle_template.root_link.visual_meshes) == 1, "MacroParticleSystem particle template has more than one visual mesh"
cls._particle_template = particle_template
# Class particle objet is assumed to be the first and only visual mesh belonging to the root link
cls.particle_object.material.shader_force_populate(render=True)
cls.process_particle_object()
@classproperty
def particle_object(cls):
return list(cls._particle_template.root_link.visual_meshes.values())[0]
@classproperty
def particle_idns(cls):
"""
Returns:
set: idn of all the particles across all groups.
"""
return {cls.particle_name2idn(particle_name) for particle_name in cls.particles}
@classproperty
def next_available_particle_idn(cls):
"""
Returns:
int: the next available particle idn across all groups.
"""
return cls._particle_counter
@classmethod
def _create_particle_template(cls):
"""
Creates the particle template to be used for this system.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
Returns:
EntityPrim: Particle template that will be duplicated when generating future particle groups
"""
raise NotImplementedError()
@classmethod
def remove_all_particles(cls):
# Use list explicitly to prevent mid-loop mutation of dict
for particle_name in tuple(cls.particles.keys()):
cls.remove_particle_by_name(name=particle_name)
@classmethod
def reset(cls):
# Call super first
super().reset()
# Reset the particle counter
cls._particle_counter = 0
@classmethod
def _clear(cls):
# Clear all internal state
og.sim.remove_object(cls._particle_template)
super()._clear()
cls._particle_template = None
cls.particles = None
cls._color = None
@classproperty
def n_particles(cls):
return len(cls.particles)
@classproperty
def material(cls):
return cls.particle_object.material
@classproperty
def particle_name_prefix(cls):
"""
Returns:
str: Naming prefix used for all generated particles. This is coupled with the unique particle ID to generate
the full particle name
"""
return f"{cls.name}Particle"
@classproperty
def state_size(cls):
# In additon to super, we have:
# scale (3*n), and particle counter (1)
return super().state_size + 3 * cls.n_particles + 1
@classmethod
def _dump_state(cls):
state = super()._dump_state()
state["scales"] = np.array([particle.scale for particle in cls.particles.values()])
state["particle_counter"] = cls._particle_counter
return state
@classmethod
def _load_state(cls, state):
# Run super first
super()._load_state(state=state)
# Set particle scales
for particle, scale in zip(cls.particles.values(), state["scales"]):
particle.scale = scale
# Set particle counter
cls._particle_counter = state["particle_counter"]
@classmethod
def _serialize(cls, state):
# Run super first
states_flat = super()._serialize(state=state)
# Add particle scales, then the template info
return np.concatenate([
states_flat,
state["scales"].flatten(),
[state["particle_counter"]],
], dtype=float)
@classmethod
def _deserialize(cls, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Infer how many scales we have, then deserialize
n_particles = state_dict["n_particles"]
len_scales = n_particles * 3
state_dict["scales"] = state[idx:idx+len_scales].reshape(-1, 3)
state_dict["particle_counter"] = int(state[idx+len_scales])
return state_dict, idx + len_scales + 1
@classmethod
def process_particle_object(cls):
"""
Perform any necessary processing on the particle object to extract further information.
"""
# Update color if the particle object has any material
color = np.ones(3)
if cls.particle_object.has_material():
if cls.particle_object.material.is_glass:
color = cls.particle_object.material.glass_color
else:
diffuse_texture = cls.particle_object.material.diffuse_texture
color = plt.imread(diffuse_texture).mean(axis=(0, 1)) if diffuse_texture else cls.particle_object.material.diffuse_color_constant
cls._color = color
@classmethod
def add_particle(cls, prim_path, scale, idn=None):
"""
Adds a particle to this system.
Args:
prim_path (str): Absolute path to the newly created particle, minus the name for this particle
scale (3-array): (x,y,z) scale to set for the added particle
idn (None or int): If specified, should be unique identifier to assign to this particle. If not, will
automatically generate a new unique one
Returns:
XFormPrim: Newly created particle instance, which is added internally as well
"""
# Generate the new particle
name = cls.particle_idn2name(idn=cls.next_available_particle_idn if idn is None else idn)
# Make sure name doesn't already exist
assert name not in cls.particles.keys(), f"Cannot create particle with name {name} because it already exists!"
new_particle = cls._load_new_particle(prim_path=f"{prim_path}/{name}", name=name)
# Set the scale and make sure the particle is visible
new_particle.scale *= scale
new_particle.visible = True
# Track this particle as well
cls.particles[new_particle.name] = new_particle
# Increment counter
cls._particle_counter += 1
return new_particle
@classmethod
def remove_particle_by_name(cls, name):
assert name in cls.particles, f"Got invalid name for particle to remove {name}"
particle = cls.particles.pop(name)
og.sim.remove_prim(particle)
@classmethod
def remove_particles(
cls,
idxs,
**kwargs,
):
particle_names = tuple(cls.particles.keys())
for idx in idxs:
cls.remove_particle_by_name(particle_names[idx])
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
scales=None,
**kwargs,
):
# Grab pre-existing tfs
current_positions, current_orientations = cls.get_particles_position_orientation()
# Update the tensors
n_particles = len(positions)
orientations = R.random(num=n_particles).as_quat() if orientations is None else orientations
scales = cls.sample_scales(n=n_particles) if scales is None else scales
positions = np.concatenate([current_positions, positions], axis=0)
orientations = np.concatenate([current_orientations, orientations], axis=0)
# Add particles
for scale in scales:
cls.add_particle(prim_path=f"{cls.prim_path}/particles", scale=scale)
# Set the tfs
cls.set_particles_position_orientation(positions=positions, orientations=orientations)
@classmethod
def _load_new_particle(cls, prim_path, name):
"""
Loads a new particle into the current stage, leveraging @cls.particle_object as a template for the new particle
to load. This function should be implemented by any subclasses.
Args:
prim_path (str): The absolute stage path at which to create the new particle
name (str): The name to assign to this new particle at the path
Returns:
XFormPrim: Loaded particle
"""
raise NotImplementedError()
@classmethod
def particle_name2idn(cls, name):
"""
Args:
name (str): Particle name to grab its corresponding unique id number for
Returns:
int: Unique ID assigned to the particle based on its name
"""
assert cls.particle_name_prefix in name, \
f"Particle name should have '{cls.particle_name_prefix}' in it when checking ID! Got: {name}"
return int(name.split(cls.particle_name_prefix)[-1])
@classmethod
def particle_idn2name(cls, idn):
"""
Args:
idn (int): Unique ID number assigned to the particle to grab the name for
Returns:
str: Particle name corresponding to its unique id number
"""
assert isinstance(idn, int), \
f"Particle idn must be an integer when checking name! Got: {idn}. Type: {type(idn)}"
return f"{cls.particle_name_prefix}{idn}"
@classproperty
def color(cls):
return np.array(cls._color)
class MacroVisualParticleSystem(MacroParticleSystem, VisualParticleSystem):
"""
Particle system class that procedurally generates individual particles that are not subject to physics
"""
# Maps particle name to dict of {obj, link, face_id}
# NOTE: link will only exist for particles on rigid bodies
# NOTE: face_id will only exist for particles on cloths
_particles_info = None
# Pre-cached information about visual particles so that we have efficient runtime computations
# Maps particle name to local pose matrix for computing global poses for the particle
_particles_local_mat = None
# Maps group name to array of face_ids where particles are located if the group object is a cloth type
# Maps group name to np.array of face IDs (int) that particles are attached to
_cloth_face_ids = None
# Default behavior for this class -- whether to clip generated particles halfway into objects when sampling
# their locations on the surface of the given object
_CLIP_INTO_OBJECTS = False
# Default parameters for sampling particle locations
# See omnigibson/utils/sampling_utils.py for how they are used.
_SAMPLING_AXIS_PROBABILITIES = (0.25, 0.25, 0.5)
_SAMPLING_AABB_OFFSET = 0.01
_SAMPLING_BIMODAL_MEAN_FRACTION = 0.9
_SAMPLING_BIMODAL_STDEV_FRACTION = 0.2
_SAMPLING_MAX_ATTEMPTS = 20
_SAMPLING_HIT_PROPORTION = 0.4
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Initialize mutable class variables so they don't automatically get overridden by children classes
cls._particles_info = dict()
cls._particles_local_mat = dict()
cls._cloth_face_ids = dict()
@classmethod
def update(cls):
# Run super first
super().update()
z_extent = cls.particle_object.aabb_extent[2]
# Iterate over all objects, and update all particles belonging to any cloth objects
for name, obj in cls._group_objects.items():
group = cls.get_group_name(obj=obj)
if obj.prim_type == PrimType.CLOTH and cls.num_group_particles(group=group) > 0:
# Update the transforms
cloth = obj.root_link
face_ids = cls._cloth_face_ids[group]
idxs = cloth.faces[face_ids].flatten()
positions = cloth.compute_particle_positions(idxs=idxs).reshape(-1, 3, 3)
normals = cloth.compute_face_normals_from_particle_positions(positions=positions)
# The actual positions we want are the face centroids, or the mean of all the positions
positions = positions.mean(axis=1)
# Orientations are the normals
z_up = np.zeros_like(normals)
z_up[:, 2] = 1.0
orientations = T.axisangle2quat(T.vecs2axisangle(z_up, normals))
if not cls._CLIP_INTO_OBJECTS and z_extent > 0:
z_offsets = np.array([z_extent * particle.scale[2] for particle in cls._group_particles[group].values()]) / 2.0
# Shift the particles halfway up
positions += normals * z_offsets.reshape(-1, 1)
# Set the group particle poses
cls.set_group_particles_position_orientation(group=group, positions=positions, orientations=orientations)
@classmethod
def _load_new_particle(cls, prim_path, name):
# We copy the template prim and generate the new object if the prim doesn't already exist, otherwise we
# reference the pre-existing one
if not lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path):
lazy.omni.kit.commands.execute(
"CopyPrim",
path_from=cls.particle_object.prim_path,
path_to=prim_path,
)
prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path)
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prim,
semantic_label=cls.name,
type_label="class",
)
return VisualGeomPrim(prim_path=prim_path, name=name)
@classmethod
def _clear(cls):
# Run super method first
super()._clear()
# Clear all groups as well
cls._particles_info = dict()
cls._particles_local_mat = dict()
cls._cloth_face_ids = dict()
@classmethod
def remove_attachment_group(cls, group):
# Call super first
super().remove_attachment_group(group=group)
# If the group is a cloth, also remove the cloth face ids
if group in cls._cloth_face_ids:
cls._cloth_face_ids.pop(group)
return group
@classmethod
def remove_particle_by_name(cls, name):
# Run super first
super().remove_particle_by_name(name=name)
# Remove this particle from its respective group as well
parent_obj = cls._particles_info[name]["obj"]
group = cls.get_group_name(obj=parent_obj)
cls._group_particles[group].pop(name)
cls._particles_local_mat.pop(name)
particle_info = cls._particles_info.pop(name)
if cls._is_cloth_obj(obj=parent_obj):
# Also remove from cloth face ids
face_ids = cls._cloth_face_ids[group]
idx_mapping = {face_id: i for i, face_id in enumerate(face_ids)}
cls._cloth_face_ids[group] = np.delete(face_ids, idx_mapping[particle_info["face_id"]])
@classmethod
def generate_group_particles(
cls,
group,
positions,
orientations=None,
scales=None,
link_prim_paths=None,
):
# Make sure the group exists
cls._validate_group(group=group)
# Standardize orientations and links
obj = cls._group_objects[group]
is_cloth = cls._is_cloth_obj(obj=obj)
# If cloth, run the following sanity checks:
# (1) make sure link prim paths are not specified -- we can ONLY apply particles under the object xform prim
# (2) make sure object prim path exists at /World/<NAME> -- global pose inference assumes this is the case
if is_cloth:
assert link_prim_paths is None, "link_prim_paths should not be specified for cloth object group!"
assert obj.prim.GetParent().GetPath().pathString == "/World", \
"cloth object should exist as direct child of /World prim!"
n_particles = positions.shape[0]
if orientations is None:
orientations = np.zeros((n_particles, 4))
orientations[:, -1] = 1.0
link_prim_paths = [None] * n_particles if is_cloth else link_prim_paths
scales = cls.sample_scales_by_group(group=group, n=n_particles) if scales is None else scales
bbox_extents_local = [(cls.particle_object.aabb_extent * scale).tolist() for scale in scales]
# If we're using flatcache, we need to update the object's pose on the USD manually
if gm.ENABLE_FLATCACHE:
FlatcacheAPI.sync_raw_object_transforms_in_usd(prim=obj)
# Generate particles
z_up = np.zeros((3, 1))
z_up[-1] = 1.0
for position, orientation, scale, bbox_extent_local, link_prim_path in \
zip(positions, orientations, scales, bbox_extents_local, link_prim_paths):
link = None if is_cloth else obj.links[link_prim_path.split("/")[-1]]
# Possibly shift the particle slightly away from the object if we're not clipping into objects
# Note: For particles tied to rigid objects, the given position is on the surface of the object,
# so clipping would move the particle INTO the object surface, whereas for particles tied to cloth objects,
# the given position is at the particle location (i.e.: already clipped), so NO clipping would move the
# particle AWAY from the object surface
if (is_cloth and not cls._CLIP_INTO_OBJECTS) or (not is_cloth and cls._CLIP_INTO_OBJECTS):
# Shift the particle halfway down
base_to_center = bbox_extent_local[2] / 2.0
normal = (T.quat2mat(orientation) @ z_up).flatten()
offset = normal * base_to_center if is_cloth else -normal * base_to_center
position += offset
# Create particle
particle = cls.add_particle(
prim_path=obj.prim_path if is_cloth else link_prim_path,
scale=scale,
)
# Add to group
cls._group_particles[group][particle.name] = particle
cls._particles_info[particle.name] = dict(obj=cls._group_objects[group], link=link)
# Set the pose
cls.set_particle_position_orientation(idx=-1, position=position, orientation=orientation)
@classmethod
def generate_group_particles_on_object(cls, group, max_samples=None, min_samples_for_success=1):
# This function does not support max_samples=None. Must be explicitly specified
assert max_samples is not None, f"max_samples must be specified for {cls.name}'s generate_group_particles_on_object!"
assert max_samples >= min_samples_for_success, "number of particles to sample should exceed the min for success"
# Make sure the group exists
cls._validate_group(group=group)
# Remove all stale particles
cls.remove_all_group_particles(group=group)
# Generate requested number of particles
obj = cls._group_objects[group]
# Sample scales and corresponding bbox extents
scales = cls.sample_scales_by_group(group=group, n=max_samples)
# For sampling particle positions, we need the global bbox extents, NOT the local extents
# which is what we would get naively if we directly use @scales
avg_scale = np.cbrt(np.product(obj.scale))
bbox_extents_global = scales * cls.particle_object.aabb_extent.reshape(1, 3) * avg_scale
if obj.prim_type == PrimType.CLOTH:
# Sample locations based on randomly sampled keyfaces
cloth = obj.root_link
n_faces = len(cloth.faces)
face_ids = np.random.choice(n_faces, min(max_samples, n_faces), replace=False)
# Positions are the midpoints of each requested face
normals = cloth.compute_face_normals(face_ids=face_ids)
positions = cloth.compute_particle_positions(idxs=cloth.faces[face_ids].flatten()).reshape(-1, 3, 3).mean(axis=1)
# Orientations are the normals
z_up = np.zeros_like(normals)
z_up[:, 2] = 1.0
orientations = T.axisangle2quat(T.vecs2axisangle(z_up, normals))
link_prim_paths = None
cls._cloth_face_ids[group] = face_ids
else:
# Sample locations for all particles
results = sample_cuboid_on_object_symmetric_bimodal_distribution(
obj=obj,
num_samples=max_samples,
cuboid_dimensions=bbox_extents_global,
bimodal_mean_fraction=cls._SAMPLING_BIMODAL_MEAN_FRACTION,
bimodal_stdev_fraction=cls._SAMPLING_BIMODAL_STDEV_FRACTION,
axis_probabilities=cls._SAMPLING_AXIS_PROBABILITIES,
undo_cuboid_bottom_padding=True,
verify_cuboid_empty=False,
aabb_offset=cls._SAMPLING_AABB_OFFSET,
max_sampling_attempts=cls._SAMPLING_MAX_ATTEMPTS,
refuse_downwards=True,
hit_proportion=cls._SAMPLING_HIT_PROPORTION,
)
# Use sampled points
positions, orientations, particle_scales, link_prim_paths = [], [], [], []
for result, scale in zip(results, scales):
position, normal, quaternion, hit_link, reasons = result
if position is not None:
positions.append(position)
orientations.append(quaternion)
particle_scales.append(scale)
link_prim_paths.append(hit_link)
scales = particle_scales
success = len(positions) >= min_samples_for_success
# If we generated a sufficient number of points, generate them in the simulator
if success:
cls.generate_group_particles(
group=group,
positions=np.array(positions),
orientations=np.array(orientations),
scales=np.array(scales),
link_prim_paths=link_prim_paths,
)
# If we're a cloth, store the face_id as well
if obj.prim_type == PrimType.CLOTH:
for particle_name, face_id in zip(cls._group_particles[group].keys(), cls._cloth_face_ids[group]):
cls._particles_info[particle_name]["face_id"] = int(face_id)
return success
@classmethod
def _compute_batch_particles_position_orientation(cls, particles, local=False):
"""
Computes all @particles' positions and orientations
Args:
particles (Iterable of str): Names of particles to compute batched position orientation for
local (bool): Whether to compute particles' poses in local frame or not
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
n_particles = len(particles)
if n_particles == 0:
return (np.array([]).reshape(0, 3), np.array([]).reshape(0, 4))
if local:
poses = np.zeros((n_particles, 4, 4))
for i, name in enumerate(particles):
poses[i] = T.pose2mat(cls.particles[name].get_local_pose())
else:
# Iterate over all particles and compute link tfs programmatically, then batch the matrix transform
link_tfs = dict()
link_tfs_batch = np.zeros((n_particles, 4, 4))
particle_local_poses_batch = np.zeros_like(link_tfs_batch)
for i, name in enumerate(particles):
obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=obj)
if is_cloth:
if obj not in link_tfs:
# We want World --> obj transform, NOT the World --> root_link transform, since these particles
# do NOT exist under a link but rather the object prim itself. So we use XFormPrim to directly
# get the transform, and not obj.get_local_pose() which will give us the local pose of the
# root link!
link_tfs[obj] = T.pose2mat(XFormPrim.get_local_pose(obj))
link = obj
else:
link = cls._particles_info[name]["link"]
if link not in link_tfs:
link_tfs[link] = T.pose2mat(link.get_position_orientation())
link_tfs_batch[i] = link_tfs[link]
particle_local_poses_batch[i] = cls._particles_local_mat[name]
# Compute once
poses = np.matmul(link_tfs_batch, particle_local_poses_batch)
# Decompose back into positions and orientations
return poses[:, :3, 3], T.mat2quat(poses[:, :3, :3])
@classmethod
def get_particles_position_orientation(cls):
return cls._compute_batch_particles_position_orientation(particles=cls.particles, local=False)
@classmethod
def get_particles_local_pose(cls):
return cls._compute_batch_particles_position_orientation(particles=cls.particles, local=True)
@classmethod
def get_group_particles_position_orientation(cls, group):
return cls._compute_batch_particles_position_orientation(particles=cls._group_particles[group], local=False)
@classmethod
def get_group_particles_local_pose(cls, group):
return cls._compute_batch_particles_position_orientation(particles=cls._group_particles[group], local=True)
@classmethod
def get_particle_position_orientation(cls, idx):
name = list(cls.particles.keys())[idx]
# First, get local pose, scale it by the parent link's scale, and then convert into a matrix
# Note that particles_local_mat already takes the parent scale into account when computing the transform!
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
local_mat = cls._particles_local_mat[name]
link_tf = T.pose2mat(XFormPrim.get_local_pose(parent_obj)) if is_cloth else \
T.pose2mat(cls._particles_info[name]["link"].get_position_orientation())
# Multiply the local pose by the link's global transform, then return as pos, quat tuple
return T.mat2pose(link_tf @ local_mat)
@classmethod
def get_particle_local_pose(cls, idx):
name = list(cls.particles.keys())[idx]
return cls.particles[name].get_local_pose()
@classmethod
def _modify_batch_particles_position_orientation(cls, particles, positions=None, orientations=None, local=False):
"""
Modifies all @particles' positions and orientations with @positions and @orientations
Args:
particles (Iterable of str): Names of particles to modify
positions (None or (n, 3)-array): New positions to set for the particles
orientations (None or (n, 4)-array): New orientations to set for the particles
local (bool): Whether to modify particles' poses in local frame or not
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
n_particles = len(particles)
if n_particles == 0:
return
if positions is None or orientations is None:
pos, ori = cls._compute_batch_particles_position_orientation(particles=particles, local=local)
positions = pos if positions is None else positions
orientations = ori if orientations is None else orientations
lens = np.array([len(particles), len(positions), len(orientations)])
assert lens.min() == lens.max(), "Got mismatched particles, positions, and orientations!"
particle_local_poses_batch = np.zeros((n_particles, 4, 4))
particle_local_poses_batch[:, -1, -1] = 1.0
particle_local_poses_batch[:, :3, 3] = positions
particle_local_poses_batch[:, :3, :3] = T.quat2mat(orientations)
if not local:
# Iterate over all particles and compute link tfs programmatically, then batch the matrix transform
link_tfs = dict()
link_tfs_batch = np.zeros((n_particles, 4, 4))
for i, name in enumerate(particles):
obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=obj)
if is_cloth:
if obj not in link_tfs:
# We want World --> obj transform, NOT the World --> root_link transform, since these particles
# do NOT exist under a link but rather the object prim itself. So we use XFormPrim to directly
# get the transform, and not obj.get_local_pose() which will give us the local pose of the
# root link!
link_tfs[obj] = T.pose2mat(XFormPrim.get_local_pose(obj))
link_tf = link_tfs[obj]
else:
link = cls._particles_info[name]["link"]
if link not in link_tfs:
link_tfs[link] = T.pose2mat(link.get_position_orientation())
link_tf = link_tfs[link]
link_tfs_batch[i] = link_tf
# particle_local_poses_batch = np.matmul(np.linalg.inv(link_tfs_batch), particle_local_poses_batch)
particle_local_poses_batch = np.linalg.solve(link_tfs_batch, particle_local_poses_batch)
for i, name in enumerate(particles):
cls._modify_particle_local_mat(name=name, mat=particle_local_poses_batch[i], ignore_scale=local)
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls.particles, positions=positions, orientations=orientations, local=False)
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls.particles, positions=positions, orientations=orientations, local=True)
@classmethod
def set_group_particles_position_orientation(cls, group, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls._group_particles[group], positions=positions, orientations=orientations, local=False)
@classmethod
def set_group_particles_local_pose(cls, group, positions=None, orientations=None):
return cls._modify_batch_particles_position_orientation(particles=cls._group_particles[group], positions=positions, orientations=orientations, local=True)
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
if position is None or orientation is None:
pos, ori = cls.get_particle_position_orientation(idx=idx)
position = pos if position is None else position
orientation = ori if orientation is None else orientation
name = list(cls.particles.keys())[idx]
global_mat = np.zeros((4, 4))
global_mat[-1, -1] = 1.0
global_mat[:3, 3] = position
global_mat[:3, :3] = T.quat2mat(orientation)
# First, get global pose, scale it by the parent link's scale, and then convert into a matrix
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
link_tf = T.pose2mat(XFormPrim.get_local_pose(parent_obj)) if is_cloth else \
T.pose2mat(cls._particles_info[name]["link"].get_position_orientation())
local_mat = np.linalg.inv(link_tf) @ global_mat
cls._modify_particle_local_mat(name=name, mat=local_mat, ignore_scale=False)
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
if position is None or orientation is None:
pos, ori = cls.get_particle_local_pose(idx=idx)
position = pos if position is None else position
orientation = ori if orientation is None else orientation
name = list(cls.particles.keys())[idx]
local_mat = np.zeros((4, 4))
local_mat[-1, -1] = 1.0
local_mat[:3, 3] = position
local_mat[:3, :3] = T.quat2mat(orientation)
cls._modify_particle_local_mat(name=name, mat=local_mat, ignore_scale=True)
@classmethod
def _is_cloth_obj(cls, obj):
"""
Checks whether object @obj is a cloth or not
Args:
obj (BaseObject): Object to check
Returns:
bool: True if the object is cloth type, otherwise False
"""
return obj.prim_type == PrimType.CLOTH
@classmethod
def _compute_particle_local_mat(cls, name, ignore_scale=False):
"""
Computes particle @name's local transform as a homogeneous 4x4 matrix
Args:
name (str): Name of the particle to compute local transform matrix for
ignore_scale (bool): Whether to ignore the parent_link scale when computing the local transform
Returns:
np.array: (4, 4) homogeneous transform matrix
"""
particle = cls.particles[name]
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
scale = np.ones(3) if is_cloth else cls._particles_info[name]["link"].scale
local_pos, local_quat = particle.get_local_pose()
local_pos = local_pos if ignore_scale else local_pos * scale
return T.pose2mat((local_pos, local_quat))
@classmethod
def _modify_particle_local_mat(cls, name, mat, ignore_scale=False):
"""
Sets particle @name's local transform as a homogeneous 4x4 matrix
Args:
name (str): Name of the particle to compute local transform matrix for
mat (n-array): (4, 4) homogeneous transform matrix
ignore_scale (bool): Whether to ignore the parent_link scale when setting the local transform
"""
particle = cls.particles[name]
parent_obj = cls._particles_info[name]["obj"]
is_cloth = cls._is_cloth_obj(obj=parent_obj)
scale = np.ones(3) if is_cloth else cls._particles_info[name]["link"].scale
local_pos, local_quat = T.mat2pose(mat)
local_pos = local_pos if ignore_scale else local_pos / scale
particle.set_local_pose(local_pos, local_quat)
# Store updated value
cls._particles_local_mat[name] = mat
@classmethod
def _sync_particle_groups(
cls,
group_objects,
particle_idns,
particle_attached_references,
):
"""
Synchronizes the particle groups based on desired identification numbers @group_idns
Args:
group_objects (list of BaseObject): Desired unique group objects that should be active for
this particle system.
particle_idns (list of list of int): Per-group unique id numbers for the particles assigned to that group.
List should be same length as @group_idns with sub-entries corresponding to the desired number of
particles assigned to that group
particle_attached_references (list of list of str or int): Per-group reference info relevant for each
particle. List should be same length as @group_idns with sub-entries corresponding to the desired
number of particles assigned to that group. If a given group is a cloth object, the entries should be
integers corresponding to the individual face IDs that each particle is attached to for the group.
Otherwise, the group is assumed to be a rigid object, in which case the entries should be link
names corresponding to the specific links each particle is attached for each group.
"""
# We have to be careful here -- some particle groups may have been deleted / are mismatched, so we need
# to update accordingly, potentially deleting stale groups and creating new groups as needed
name_to_info_mapping = {obj.name: {
"n_particles": len(p_idns),
"particle_idns": p_idns,
"references": references,
}
for obj, p_idns, references in
zip(group_objects, particle_idns, particle_attached_references)}
current_group_names = cls.groups
desired_group_names = set(obj.name for obj in group_objects)
groups_to_delete = current_group_names - desired_group_names
groups_to_create = desired_group_names - current_group_names
common_groups = current_group_names.intersection(desired_group_names)
# Sanity check the common groups, we will recreate any where there is a mismatch
for name in common_groups:
info = name_to_info_mapping[name]
if cls.num_group_particles(group=name) != info["n_particles"]:
log.debug(f"Got mismatch in particle group {name} when syncing, "
f"deleting and recreating group now.")
# Add this group to both the delete and creation pile
groups_to_delete.add(name)
groups_to_create.add(name)
# Delete any groups we no longer want
for name in groups_to_delete:
cls.remove_attachment_group(group=name)
# Create any groups we don't already have
for name in groups_to_create:
obj = og.sim.scene.object_registry("name", name)
info = name_to_info_mapping[name]
cls.create_attachment_group(obj=obj)
is_cloth = cls._is_cloth_obj(obj=obj)
for particle_idn, reference in zip(info["particle_idns"], info["references"]):
# Reference is either the face ID (int) if cloth group or link name (str) if rigid body group
# Create the necessary particles
# Use scale (1,1,1) since it will get overridden anyways when loading state
particle = cls.add_particle(
prim_path=obj.prim_path if is_cloth else obj.links[reference].prim_path,
scale=np.ones(3),
idn=int(particle_idn),
)
cls._group_particles[name][particle.name] = particle
cls._particles_info[particle.name] = dict(obj=obj)
# Add face_id if is_cloth, otherwise, add link
if is_cloth:
cls._particles_info[particle.name]["face_id"] = int(reference)
else:
cls._particles_info[particle.name]["link"] = obj.links[reference]
# Also store the cloth face IDs as a vector
if is_cloth:
cls._cloth_face_ids[cls.get_group_name(obj)] = \
np.array([cls._particles_info[particle_name]["face_id"] for particle_name in cls._group_particles[name]])
@classmethod
def create(cls, name, create_particle_template, min_scale=None, max_scale=None, scale_relative_to_parent=False, **kwargs):
"""
Utility function to programmatically generate monolithic visual particle system classes.
Note: If using super() calls in any functions, we have to use slightly esoteric syntax in order to
accommodate this procedural method for using super calls
cf. https://stackoverflow.com/questions/22403897/what-does-it-mean-by-the-super-object-returned-is-unbound-in-python
Use: super(cls).__get__(cls).<METHOD_NAME>(<KWARGS>)
Args:
name (str): Name of the visual particles, in snake case.
min_scale (None or 3-array): If specified, sets the minumum bound for the visual particles' relative scale.
Else, defaults to 1
max_scale (None or 3-array): If specified, sets the maximum bound for the visual particles' relative scale.
Else, defaults to 1
scale_relative_to_parent (bool): If True, will scale generated particles relative to the corresponding
group's object
create_particle_template (function): Method for generating the visual particle template that will be duplicated
when generating groups of particles.
Expected signature:
create_particle_template(prim_path: str, name: str) --> EntityPrim
where @prim_path and @name are the parameters to assign to the generated EntityPrim.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
VisualParticleSystem: Generated visual particle system class
"""
# Override the necessary parameters
@classproperty
def cp_register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def cp_scale_relative_to_parent(cls):
return scale_relative_to_parent
@classmethod
def cm_create_particle_template(cls):
return create_particle_template(prim_path=f"{cls.prim_path}/template", name=f"{cls.name}_template")
# Add to any other params specified
kwargs["_register_system"] = cp_register_system
kwargs["scale_relative_to_parent"] = cp_scale_relative_to_parent
kwargs["_create_particle_template"] = cm_create_particle_template
# Run super
return super().create(name=name, min_scale=min_scale, max_scale=max_scale, **kwargs)
@classmethod
def _dump_state(cls):
state = super()._dump_state()
particle_names = list(cls.particles.keys())
# Add in per-group information
groups_dict = dict()
name2idx = {name: idx for idx, name in enumerate(particle_names)}
for group_name, group_particles in cls._group_particles.items():
obj = cls._group_objects[group_name]
is_cloth = cls._is_cloth_obj(obj=obj)
groups_dict[group_name] = dict(
particle_attached_obj_uuid=obj.uuid,
n_particles=cls.num_group_particles(group=group_name),
particle_idns=[cls.particle_name2idn(name=name) for name in group_particles.keys()],
particle_indices=[name2idx[name] for name in group_particles.keys()],
# If the attached object is a cloth, store the face_id, otherwise, store the link name
particle_attached_references=[cls._particles_info[name]["face_id"] for name in group_particles.keys()]
if is_cloth else [cls._particles_info[name]["link"].prim_path.split("/")[-1] for name in group_particles.keys()],
)
state["n_groups"] = len(cls._group_particles)
state["groups"] = groups_dict
return state
@classmethod
def _load_state(cls, state):
# First, we sync our particle systems
"""
Load the internal state to this object as specified by @state. Should be implemented by subclass.
Args:
state (dict): Keyword-mapped states of this object to set
"""
# Synchronize particle groups
group_objects = []
particle_idns = []
particle_attached_references = []
indices_to_remove = np.array([], dtype=int)
for info in state["groups"].values():
obj = og.sim.scene.object_registry("uuid", info["particle_attached_obj_uuid"])
# obj will be None if an object with an attachment group is removed between dump_state() and load_state()
if obj is not None:
group_objects.append(obj)
particle_idns.append(info["particle_idns"])
particle_attached_references.append(info["particle_attached_references"])
else:
indices_to_remove = np.append(indices_to_remove, np.array(info["particle_indices"], dtype=int))
cls._sync_particle_groups(
group_objects=group_objects,
particle_idns=particle_idns,
particle_attached_references=particle_attached_references,
)
state["n_particles"] -= len(indices_to_remove)
state["positions"] = np.delete(state["positions"], indices_to_remove, axis=0)
state["orientations"] = np.delete(state["orientations"], indices_to_remove, axis=0)
state["scales"] = np.delete(state["scales"], indices_to_remove, axis=0)
# Run super
super()._load_state(state=state)
@classmethod
def _serialize(cls, state):
# Run super first
state_flat = super()._serialize(state=state)
groups_dict = state["groups"]
state_group_flat = [[state["n_groups"]]]
for group_name, group_dict in groups_dict.items():
obj = cls._group_objects[group_name]
is_cloth = cls._is_cloth_obj(obj=obj)
group_obj_link2id = {link_name: i for i, link_name in enumerate(obj.links.keys())}
state_group_flat += [
[group_dict["particle_attached_obj_uuid"]],
[group_dict["n_particles"]],
group_dict["particle_idns"],
group_dict["particle_indices"],
(group_dict["particle_attached_references"] if is_cloth else
[group_obj_link2id[reference] for reference in group_dict["particle_attached_references"]]),
]
return np.concatenate([*state_group_flat, state_flat]).astype(float)
@classmethod
def _deserialize(cls, state):
# Synchronize the particle groups
n_groups = int(state[0])
groups_dict = dict()
group_objs = []
# Index starts at 1 because index 0 is n_groups
idx = 1
for i in range(n_groups):
obj_uuid, n_particles = int(state[idx]), int(state[idx + 1])
obj = og.sim.scene.object_registry("uuid", obj_uuid)
assert obj is not None, f"Object with UUID {obj_uuid} not found in the scene"
is_cloth = cls._is_cloth_obj(obj=obj)
group_obj_id2link = {i: link_name for i, link_name in enumerate(obj.links.keys())}
group_objs.append(obj)
groups_dict[obj.name] = dict(
particle_attached_obj_uuid=obj_uuid,
n_particles=n_particles,
particle_idns=[int(idn) for idn in state[idx + 2 : idx + 2 + n_particles]], # Idx + 2 because the first two are obj_uuid and n_particles
particle_indices=[int(idn) for idn in state[idx + 2 + n_particles: idx + 2 + n_particles * 2]],
particle_attached_references=[int(idn) for idn in state[idx + 2 + n_particles * 2: idx + 2 + n_particles * 3]]
if is_cloth else [group_obj_id2link[int(idn)] for idn in state[idx + 2 + n_particles * 2: idx + 2 + n_particles * 3]],
)
idx += 2 + n_particles * 3
log.debug(f"Syncing {cls.name} particles with {n_groups} groups..")
cls._sync_particle_groups(
group_objects=group_objs,
particle_idns=[group_info["particle_idns"] for group_info in groups_dict.values()],
particle_attached_references=[group_info["particle_attached_references"] for group_info in groups_dict.values()],
)
# Get super method
state_dict, idx_super = super()._deserialize(state=state[idx:])
state_dict["n_groups"] = n_groups
state_dict["groups"] = groups_dict
return state_dict, idx + idx_super
class MacroPhysicalParticleSystem(MacroParticleSystem, PhysicalParticleSystem):
"""
Particle system class that procedurally generates individual particles that are subject to physics
"""
# Physics rigid body view for keeping track of all particles' state
particles_view = None
# Approximate radius of the macro particle, and distance from particle frame to approximate center
_particle_radius = None
_particle_offset = None
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Create the particles head prim -- this is merely a scope prim
og.sim.stage.DefinePrim(f"{cls.prim_path}/particles", "Scope")
# A new view needs to be created every time once sim is playing, so we add a callback now
og.sim.add_callback_on_play(name=f"{cls.name}_particles_view", callback=cls.refresh_particles_view)
# If sim is already playing, refresh particles immediately
if og.sim.is_playing():
cls.refresh_particles_view()
@classmethod
def _load_new_particle(cls, prim_path, name):
# We copy the template prim and generate the new object if the prim doesn't already exist, otherwise we
# reference the pre-existing one
if not lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path):
lazy.omni.kit.commands.execute(
"CopyPrim",
path_from=cls.particle_object.prim_path,
path_to=prim_path,
)
# Apply RigidBodyAPI to it so it is subject to physics
prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path)
lazy.pxr.UsdPhysics.RigidBodyAPI.Apply(prim)
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prim,
semantic_label=cls.name,
type_label="class",
)
return CollisionVisualGeomPrim(prim_path=prim_path, name=name)
@classmethod
def process_particle_object(cls):
# Run super method
super().process_particle_object()
# Compute particle radius
vertices = np.array(cls.particle_object.get_attribute("points")) * cls.particle_object.scale * cls.max_scale.reshape(1, 3)
particle_offset, particle_radius = trimesh.nsphere.minimum_nsphere(trimesh.Trimesh(vertices=vertices))
if particle_radius < m.MIN_PARTICLE_RADIUS:
ratio = m.MIN_PARTICLE_RADIUS / particle_radius
cls.particle_object.scale *= ratio
particle_offset *= ratio
particle_radius = m.MIN_PARTICLE_RADIUS
cls._particle_offset = particle_offset
cls._particle_radius = particle_radius
@classmethod
def refresh_particles_view(cls):
"""
Internal helper method to refresh the particles' rigid body view to grab state
Should be called every time sim.play() is called
"""
og.sim.pi.update_simulation(elapsedStep=0, currentTime=og.sim.current_time)
with suppress_omni_log(channels=["omni.physx.tensors.plugin"]):
cls.particles_view = og.sim.physics_sim_view.create_rigid_body_view(pattern=f"{cls.prim_path}/particles/*")
@classmethod
def _clear(cls):
# Run super method first
super()._clear()
# Clear internal variables
cls.particles_view = None
cls._particle_radius = None
cls._particle_offset = None
@classmethod
def remove_particle_by_name(cls, name):
# Run super first
super().remove_particle_by_name(name=name)
# Refresh particles view
cls.refresh_particles_view()
@classmethod
def add_particle(cls, prim_path, scale, idn=None):
# Run super first
particle = super().add_particle(prim_path=prim_path, scale=scale, idn=idn)
# Refresh particles view
cls.refresh_particles_view()
return particle
@classmethod
def get_particles_position_orientation(cls):
# Note: This gets the center of the sphere approximation of the particles, NOT the actual particle frames!
if cls.n_particles > 0:
tfs = cls.particles_view.get_transforms()
pos, ori = tfs[:, :3], tfs[:, 3:]
pos = pos + T.quat2mat(ori) @ cls._particle_offset
else:
pos, ori = np.array([]).reshape(0, 3), np.array([]).reshape(0, 4)
return pos, ori
@classmethod
def get_particles_local_pose(cls):
return cls.get_particles_position_orientation()
@classmethod
def get_particle_position_orientation(cls, idx):
assert idx <= cls.n_particles, \
f"Got invalid idx for getting particle pose! N particles: {cls.n_particles}, got idx: {idx}"
positions, orientations = cls.get_particles_position_orientation()
return (positions[idx], orientations[idx]) if cls.n_particles > 0 else (positions, orientations)
@classmethod
def get_particle_local_pose(cls, idx):
return cls.get_particle_position_orientation(idx=idx)
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
if cls.n_particles == 0:
return
# Note: This sets the center of the sphere approximation of the particles, NOT the actual particle frames!
if positions is None or orientations is None:
pos, ori = cls.get_particles_position_orientation()
orientations = ori if orientations is None else orientations
positions = pos if positions is None else (positions - T.quat2mat(orientations) @ cls._particle_offset)
cls.particles_view.set_transforms(np.concatenate([positions, orientations], axis=1), indices=np.arange(len(positions)))
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
cls.set_particles_position_orientation(positions=positions, orientations=orientations)
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
assert idx <= cls.n_particles, \
f"Got invalid idx for setting particle pose! N particles: {cls.n_particles}, got idx: {idx}"
if position is None or orientation is None:
pos, ori = cls.get_particle_position_orientation(idx=idx)
orientation = ori if orientation is None else orientation
position = pos if position is None else (position - T.quat2mat(orientation) @ cls._particle_offset)
cls.particles_view.set_transforms(np.concatenate([position, orientation]).reshape(1, -1), indices=np.array([idx]))
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
cls.set_particle_position_orientation(idx=idx, position=position, orientation=orientation)
@classmethod
def get_particles_velocities(cls):
"""
Grab particles' global linear and angular velocities
Returns:
2-tuple:
- (n, 3)-array: per-particle (x, y, z) linear velocities in the world frame
- (n, 3)-array: per-particle (ax, ay, az) angular velocities in the world frame
"""
if cls.n_particles > 0:
vels = cls.particles_view.get_velocities()
lin_vel, ang_vel = vels[:, :3], vels[:, 3:]
else:
lin_vel, ang_vel = np.array([]).reshape(0, 3), np.array([]).reshape(0, 3)
return lin_vel, ang_vel
@classmethod
def get_particle_velocities(cls, idx):
"""
Grab particle @idx's global linear and angular velocities
Returns:
2-tuple:
- 3-array: particle (x, y, z) linear velocity in the world frame
- 3-array: particle (ax, ay, az) angular velocity in the world frame
"""
assert idx <= cls.n_particles, \
f"Got invalid idx for getting particle velocity! N particles: {cls.n_particles}, got idx: {idx}"
lin_vel, ang_vel = cls.get_particles_velocities()
return (lin_vel[idx], ang_vel[idx]) if cls.n_particles > 0 else lin_vel, ang_vel
@classmethod
def set_particles_velocities(cls, lin_vels=None, ang_vels=None):
if cls.n_particles == 0:
return
if lin_vels is None or ang_vels is None:
l_vels, a_vels = cls.get_particles_velocities()
lin_vels = l_vels if lin_vels is None else lin_vels
ang_vels = a_vels if ang_vels is None else ang_vels
cls.particles_view.set_velocities(np.concatenate([lin_vels, ang_vels], axis=1), indices=np.arange(len(lin_vels)))
@classmethod
def set_particle_velocities(cls, idx, lin_vel=None, ang_vel=None):
assert idx <= cls.n_particles, \
f"Got invalid idx for setting particle velocity! N particles: {cls.n_particles}, got idx: {idx}"
if lin_vel is None or ang_vel is None:
l_vel, a_vel = cls.get_particles_velocities()
lin_vel = l_vel if lin_vel is None else lin_vel
ang_vel = a_vel if ang_vel is None else ang_vel
cls.particles_view.set_velocities(np.concatenate([lin_vel, ang_vel]).reshape(1, -1), indices=np.array([idx]))
@classproperty
def particle_radius(cls):
return cls._particle_radius
@classproperty
def particle_contact_radius(cls):
# This is simply the normal radius
return cls.particle_radius
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
velocities=None,
angular_velocities=None,
scales=None,
**kwargs,
):
"""
Generates new particles
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be sampled randomly
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
angular_velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (ax,ay,az)
angular velocities. If not specified, all will be set to 0
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
# Call super first
super().generate_particles(
positions=positions,
orientations=orientations,
scales=scales,
**kwargs,
)
# Grab pre-existing vels -- note that this already includes the newly included particles, so we will only
# keep the first (N - n_new) values
current_lin_vels, current_ang_vels = cls.get_particles_velocities()
# Update the tensors
n_particles = len(positions)
velocities = np.zeros((n_particles, 3)) if velocities is None else velocities
angular_velocities = np.zeros_like(velocities) if angular_velocities is None else angular_velocities
velocities = np.concatenate([current_lin_vels[:-n_particles], velocities], axis=0)
angular_velocities = np.concatenate([current_ang_vels[:-n_particles], angular_velocities], axis=0)
# Set the vels
cls.set_particles_velocities(lin_vels=velocities, ang_vels=angular_velocities)
@classmethod
def create(cls, name, create_particle_template, particle_density, scale=None, **kwargs):
"""
Utility function to programmatically generate monolithic visual particle system classes.
Note: If using super() calls in any functions, we have to use slightly esoteric syntax in order to
accommodate this procedural method for using super calls
cf. https://stackoverflow.com/questions/22403897/what-does-it-mean-by-the-super-object-returned-is-unbound-in-python
Use: super(cls).__get__(cls).<METHOD_NAME>(<KWARGS>)
Args:
name (str): Name of the macro physical particles, in snake case.
particle_density (float): Particle density for the generated system
create_particle_template (function): Method for generating the visual particle template that will be duplicated
when generating groups of particles.
Expected signature:
create_particle_template(prim_path: str, name: str) --> EntityPrim
where @prim_path and @name are the parameters to assign to the generated EntityPrim.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual mesh used for duplication
scale (None or 3-array): If specified, sets the scaling factor for the particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
VisualParticleSystem: Generated visual particle system class
"""
# Override the necessary parameters
@classproperty
def cp_register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def cp_particle_density(cls):
return particle_density
@classmethod
def cm_create_particle_template(cls):
return create_particle_template(prim_path=f"{cls.prim_path}/template", name=f"{cls.name}_template")
# Add to any other params specified
kwargs["_register_system"] = cp_register_system
kwargs["particle_density"] = cp_particle_density
kwargs["_create_particle_template"] = cm_create_particle_template
# Run super
return super().create(name=name, min_scale=scale, max_scale=scale, **kwargs)
@classmethod
def _sync_particles(cls, n_particles):
"""
Synchronizes the number of particles seen in the scene with @n_particles
Args:
n_particles (int): Desired number of particles to force simulator to have
"""
# Get the difference between current and desired particles
n_particles_to_generate = n_particles - cls.n_particles
# If positive, add particles
if n_particles_to_generate > 0:
for i in range(n_particles_to_generate):
# Min scale == max scale, so no need for sampling
cls.add_particle(prim_path=f"{cls.prim_path}/particles", scale=cls.max_scale)
else:
# Remove excess particles
cls.remove_particles(idxs=np.arange(-n_particles_to_generate))
@classproperty
def state_size(cls):
# In additon to super, we have:
# velocities (6*n)
return super().state_size + 6 * cls.n_particles
@classmethod
def _dump_state(cls):
state = super()._dump_state()
# Store all particles' velocities as well
state["lin_velocities"], state["ang_velocities"] = cls.get_particles_velocities()
return state
@classmethod
def _load_state(cls, state):
# Sync the number of particles first
cls._sync_particles(n_particles=state["n_particles"])
super()._load_state(state=state)
# Make sure view is refreshed
cls.refresh_particles_view()
# Make sure we update all the velocities
cls.set_particles_velocities(state["lin_velocities"], state["ang_velocities"])
@classmethod
def _serialize(cls, state):
# Run super first
state_flat = super()._serialize(state=state)
# Add velocities
return np.concatenate([state_flat, state["lin_velocities"].flatten(), state["ang_velocities"].flatten()], dtype=float)
@classmethod
def _deserialize(cls, state):
# Sync the number of particles first
cls._sync_particles(n_particles=int(state[0]))
# Run super first
state_dict, idx = super()._deserialize(state=state)
# Deserialize velocities
len_velocities = 3 * state_dict["n_particles"]
for vel in ("lin_velocities", "ang_velocities"):
state_dict[vel] = state[idx:idx+len_velocities].reshape(-1, 3)
idx += len_velocities
return state_dict, idx
| 68,091 | Python | 43.650492 | 163 | 0.62885 |
StanfordVL/OmniGibson/omnigibson/systems/system_base.py | import os
import json
import numpy as np
import omnigibson as og
from omnigibson.macros import gm, create_module_macros
from omnigibson.utils.asset_utils import get_all_system_categories
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function
from omnigibson.utils.python_utils import classproperty, assert_valid_key, get_uuid, camel_case_to_snake_case, \
snake_case_to_camel_case, subclass_factory, SerializableNonInstance, UniquelyNamedNonInstance
from omnigibson.utils.registry_utils import SerializableRegistry
from omnigibson.utils.sampling_utils import sample_cuboid_on_object_full_grid_topdown
from omnigibson.utils.ui_utils import create_module_logger
import omnigibson.lazy as lazy
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Parameters used if scaling particles relative to its parent object's scale
m.BBOX_LOWER_LIMIT_FRACTION_OF_AABB = 0.06
m.BBOX_LOWER_LIMIT_MIN = 0.002
m.BBOX_LOWER_LIMIT_MAX = 0.02
m.BBOX_UPPER_LIMIT_FRACTION_OF_AABB = 0.1
m.BBOX_UPPER_LIMIT_MIN = 0.01
m.BBOX_UPPER_LIMIT_MAX = 0.1
_CALLBACKS_ON_SYSTEM_INIT = dict()
_CALLBACKS_ON_SYSTEM_CLEAR = dict()
# Modifiers denoting a semantic difference in the system
SYSTEM_PREFIXES = {"diced", "cooked", "melted"}
class BaseSystem(SerializableNonInstance, UniquelyNamedNonInstance):
"""
Base class for all systems. These are non-instance objects that should be used globally for a given environment.
This is useful for items in a scene that are non-discrete / cannot be distinguished into individual instances,
e.g.: water, particles, etc. While we keep the python convention of the system class name being camel case
(e.g. StrawberrySmoothie), we adopt the snake case for the system registry to unify with the category of BaseObject.
For example, get_system("strawberry_smoothie") will return the StrawberrySmoothie class.
"""
# Scaling factor to sample from when generating a new particle
min_scale = None # (x,y,z) scaling
max_scale = None # (x,y,z) scaling
# Whether this system has been initialized or not
initialized = False
# Internal variables used for bookkeeping
_uuid = None
_snake_case_name = None
def __init_subclass__(cls, **kwargs):
# While class names are camel case, we convert them to snake case to be consistent with object categories.
name = camel_case_to_snake_case(cls.__name__)
# Make sure prefixes preserve their double underscore
for prefix in SYSTEM_PREFIXES:
name = name.replace(f"{prefix}_", f"{prefix}__")
cls._snake_case_name = name
cls.min_scale = np.ones(3)
cls.max_scale = np.ones(3)
# Run super init
super().__init_subclass__(**kwargs)
# Register this system if requested
if cls._register_system:
global REGISTERED_SYSTEMS, UUID_TO_SYSTEMS
REGISTERED_SYSTEMS[cls._snake_case_name] = cls
cls._uuid = get_uuid(cls._snake_case_name)
UUID_TO_SYSTEMS[cls._uuid] = cls
@classproperty
def name(cls):
# Class name is the unique name assigned
return cls._snake_case_name
@classproperty
def uuid(cls):
return cls._uuid
@classproperty
def prim_path(cls):
"""
Returns:
str: Path to this system's prim in the scene stage
"""
return f"/World/{cls.name}"
@classproperty
def n_particles(cls):
"""
Returns:
int: Number of particles belonging to this system
"""
raise NotImplementedError()
@classproperty
def material(cls):
"""
Returns:
None or MaterialPrim: Material belonging to this system, if there is any
"""
return None
@classproperty
def _register_system(cls):
"""
Returns:
bool: True if this system should be registered (i.e.: it is not an intermediate class but a "final" subclass
representing a system we'd actually like to use, e.g.: water, dust, etc. Should be set by the subclass
"""
# We assume we aren't registering by default
return False
@classproperty
def _store_local_poses(cls):
"""
Returns:
bool: Whether to store local particle poses or not when state is saved. Default is False
"""
return False
@classmethod
def initialize(cls):
"""
Initializes this system
"""
global _CALLBACKS_ON_SYSTEM_INIT
assert not cls.initialized, f"Already initialized system {cls.name}!"
og.sim.stage.DefinePrim(cls.prim_path, "Scope")
cls.initialized = True
# Add to registry
SYSTEM_REGISTRY.add(obj=cls)
# Avoid circular import
if og.sim.is_playing():
from omnigibson.transition_rules import TransitionRuleAPI
TransitionRuleAPI.refresh_all_rules()
# Run any callbacks
for callback in _CALLBACKS_ON_SYSTEM_INIT.values():
callback(cls)
@classmethod
def update(cls):
"""
Executes any necessary system updates, once per og.sim._non_physics_step
"""
# Default is no-op
pass
@classmethod
def remove_all_particles(cls):
"""
Removes all particles and deletes them from the simulator
"""
raise NotImplementedError()
@classmethod
def remove_particles(
cls,
idxs,
**kwargs,
):
"""
Removes pre-existing particles
Args:
idxs (np.array): (n_particles,) shaped array specifying IDs of particles to delete
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
raise NotImplementedError()
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
scales=None,
**kwargs,
):
"""
Generates new particles
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
**kwargs (dict): Any additional keyword-specific arguments required by subclass implementation
"""
raise NotImplementedError()
@classmethod
def clear(cls):
"""
Clears this system, so that it may possibly be re-initialized. Useful for, e.g., when loading from a new
scene during the same sim instance
"""
if cls.initialized:
cls._clear()
@classmethod
def _clear(cls):
global SYSTEM_REGISTRY, _CALLBACKS_ON_SYSTEM_CLEAR
# Run any callbacks
for callback in _CALLBACKS_ON_SYSTEM_CLEAR.values():
callback(cls)
cls.reset()
lazy.omni.isaac.core.utils.prims.delete_prim(cls.prim_path)
cls.initialized = False
# Remove from active registry
SYSTEM_REGISTRY.remove(obj=cls)
# Avoid circular import
if og.sim.is_playing():
from omnigibson.transition_rules import TransitionRuleAPI
TransitionRuleAPI.refresh_all_rules()
@classmethod
def reset(cls):
"""
Reset this system
"""
cls.remove_all_particles()
@classmethod
def create(cls, name, min_scale=None, max_scale=None, **kwargs):
"""
Helper function to programmatically generate systems
Args:
name (str): Name of the visual particles, in snake case.
min_scale (None or 3-array): If specified, sets the minumum bound for particles' relative scale.
Else, defaults to 1
max_scale (None or 3-array): If specified, sets the maximum bound for particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
BaseSystem: Generated system class given input arguments
"""
@classmethod
def cm_initialize(cls):
# Potentially override the min / max scales
if min_scale is not None:
cls.min_scale = np.array(min_scale)
if max_scale is not None:
cls.max_scale = np.array(max_scale)
# Run super (we have to use a bit esoteric syntax in order to accommodate this procedural method for
# using super calls -- cf. https://stackoverflow.com/questions/22403897/what-does-it-mean-by-the-super-object-returned-is-unbound-in-python
super(cls).__get__(cls).initialize()
kwargs["initialize"] = cm_initialize
# Create and return the class
return subclass_factory(name=snake_case_to_camel_case(name), base_classes=cls, **kwargs)
@classmethod
def get_active_systems(cls):
"""
Returns:
dict: Mapping from system name to system for all systems that are subclasses of this system AND active (initialized)
"""
return {system.name: system for system in SYSTEM_REGISTRY.objects if issubclass(system, cls)}
@classmethod
def sample_scales(cls, n):
"""
Samples scales uniformly based on @cls.min_scale and @cls.max_scale
Args:
n (int): Number of scales to sample
Returns:
(n, 3) array: Array of sampled scales
"""
return np.random.uniform(cls.min_scale, cls.max_scale, (n, 3))
@classmethod
def get_particles_position_orientation(cls):
"""
Computes all particles' positions and orientations that belong to this system in the world frame
Note: This is more optimized than doing a for loop with self.get_particle_position_orientation()
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def get_particle_position_orientation(cls, idx):
"""
Compute particle's position and orientation. This automatically takes into account the relative
pose w.r.t. its parent link and the global pose of that parent link.
Args:
idx (int): Index of the particle to compute position and orientation for. Note: this is
equivalent to grabbing the corresponding idx'th entry from @get_particles_position_orientation()
Returns:
2-tuple:
- 3-array: (x,y,z) position
- 4-array: (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to this system in the world frame
Note: This is more optimized than doing a for loop with self.set_particle_position_orientation()
Args:
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
"""
Sets particle's position and orientation. This automatically takes into account the relative
pose w.r.t. its parent link and the global pose of that parent link.
Args:
idx (int): Index of the particle to set position and orientation for. Note: this is
equivalent to setting the corresponding idx'th entry from @set_particles_position_orientation()
position (3-array): particle (x,y,z) position
orientation (4-array): particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def get_particles_local_pose(cls):
"""
Computes all particles' positions and orientations that belong to this system in the particles' parent frames
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def get_particle_local_pose(cls, idx):
"""
Compute particle's position and orientation in the particle's parent frame
Args:
idx (int): Index of the particle to compute position and orientation for. Note: this is
equivalent to grabbing the corresponding idx'th entry from @get_particles_local_pose()
Returns:
2-tuple:
- 3-array: (x,y,z) position
- 4-array: (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to this system in the particles' parent frames
Args:
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
"""
Sets particle's position and orientation in the particle's parent frame
Args:
idx (int): Index of the particle to set position and orientation for. Note: this is
equivalent to setting the corresponding idx'th entry from @set_particles_local_pose()
position (3-array): particle (x,y,z) position
orientation (4-array): particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError()
def __init__(self):
raise ValueError("System classes should not be created!")
@classproperty
def state_size(cls):
# We have n_particles (1), min / max scale (3*2), each particle pose (7*n)
return 7 + 7 * cls.n_particles
@classmethod
def _dump_state(cls):
positions, orientations = cls.get_particles_local_pose() if \
cls._store_local_poses else cls.get_particles_position_orientation()
return dict(
n_particles=cls.n_particles,
min_scale=cls.min_scale,
max_scale=cls.max_scale,
positions=positions,
orientations=orientations,
)
@classmethod
def _load_state(cls, state):
# Sanity check loading particles
assert cls.n_particles == state["n_particles"], f"Inconsistent number of particles found when loading " \
f"particles state! Current number: {cls.n_particles}, " \
f"loaded number: {state['n_particles']}"
# Load scale
cls.min_scale = state["min_scale"]
cls.max_scale = state["max_scale"]
# Load the poses
setter = cls.set_particles_local_pose if cls._store_local_poses else cls.set_particles_position_orientation
setter(positions=state["positions"], orientations=state["orientations"])
@classmethod
def _serialize(cls, state):
# Array is n_particles, then min_scale and max_scale, then poses for all particles
return np.concatenate([
[state["n_particles"]],
state["min_scale"],
state["max_scale"],
state["positions"].flatten(),
state["orientations"].flatten(),
], dtype=float)
@classmethod
def _deserialize(cls, state):
# First index is number of particles, then min_scale and max_scale, then the individual particle poses
state_dict = dict()
n_particles = int(state[0])
len_positions = n_particles * 3
len_orientations = n_particles * 4
state_dict["n_particles"] = n_particles
state_dict["min_scale"] = state[1:4]
state_dict["max_scale"] = state[4:7]
state_dict["positions"] = state[7:7+len_positions].reshape(-1, 3)
state_dict["orientations"] = state[7+len_positions:7+len_positions+len_orientations].reshape(-1, 4)
return state_dict, 7 + len_positions + len_orientations
# Global dict that contains mappings of all the systems
REGISTERED_SYSTEMS = dict()
UUID_TO_SYSTEMS = dict()
# Serializable registry of systems that are active on the stage (initialized)
SYSTEM_REGISTRY = SerializableRegistry(
name="system_registry",
class_types=BaseSystem,
default_key="name",
unique_keys=["name", "prim_path", "uuid"],
)
class VisualParticleSystem(BaseSystem):
"""
Particle system class for generating particles not subject to physics, and are attached to individual objects
"""
# Maps group name to the particles associated with it
# This is an ordered dict of ordered dict (nested ordered dict maps particle names to particle instance)
_group_particles = None
# Maps group name to the parent object (the object with particles attached to it) of the group
_group_objects = None
# Maps group name to tuple (min_scale, max_scale) to apply to sampled particles for that group
_group_scales = None
@classmethod
def initialize(cls):
# Run super method first
super().initialize()
# Initialize mutable class variables so they don't automatically get overridden by children classes
cls._group_particles = dict()
cls._group_objects = dict()
cls._group_scales = dict()
@classproperty
def particle_object(cls):
"""
Returns:
XFormPrim: Particle object to be used as a template for duplication
"""
raise NotImplementedError()
@classproperty
def groups(cls):
"""
Returns:
set of str: Current attachment particle group names
"""
return set(cls._group_particles.keys())
@classproperty
def _store_local_poses(cls):
# Store local poses since particles are attached to moving bodies
return True
@classproperty
def scale_relative_to_parent(cls):
"""
Returns:
bool: Whether or not particles should be scaled relative to the group's parent object. NOTE: If True,
this will OVERRIDE cls.min_scale and cls.max_scale when sampling particles!
"""
return False
@classproperty
def state_size(cls):
# Get super size first
state_size = super().state_size
# Additionally, we have n_groups (1), with m_particles for each group (n), attached_obj_uuids (n), and
# particle ids, particle indices, and corresponding link info for each particle (m * 3)
return state_size + 1 + 2 * len(cls._group_particles) + \
sum(3 * cls.num_group_particles(group) for group in cls.groups)
@classmethod
def _clear(cls):
super()._clear()
# Clear all groups as well
cls._group_particles = dict()
cls._group_objects = dict()
cls._group_scales = dict()
@classmethod
def remove_all_group_particles(cls, group):
"""
Remove particle with name @name from both the simulator as well as internally
Args:
group (str): Name of the attachment group to remove all particles from
"""
# Make sure the group exists
cls._validate_group(group=group)
# Remove all particles from the group
for particle_name in tuple(cls._group_particles[group].keys()):
cls.remove_particle_by_name(name=particle_name)
@classmethod
def num_group_particles(cls, group):
"""
Gets the number of particles for the given group in the simulator
Args:
group (str): Name of the attachment group to remove all particles from.
Returns:
int: Number of particles allocated to this group in the scene. Note that if @group does not
exist, this will return 0
"""
# Make sure the group exists
cls._validate_group(group=group)
return len(cls._group_particles[group])
@classmethod
def get_group_name(cls, obj):
"""
Grabs the corresponding group name for object @obj
Args:
obj (BaseObject): Object for which its procedurally generated particle attachment name should be grabbed
Returns:
str: Name of the attachment group to use when executing commands from this class on
that specific attachment group
"""
return obj.name
@classmethod
def create_attachment_group(cls, obj):
"""
Creates an attachment group internally for object @obj. Note that this does NOT automatically generate particles
for this object (should call generate_group_particles(...) ).
Args:
obj (BaseObject): Object for which a new particle attachment group will be created for
Returns:
str: Name of the attachment group to use when executing commands from this class on
that specific attachment group
"""
group = cls.get_group_name(obj=obj)
# This should only happen once for a single attachment group, so we explicitly check to make sure the object
# doesn't already exist
assert group not in cls.groups, \
f"Cannot create new attachment group because group with name {group} already exists!"
# Create the group
cls._group_particles[group] = dict()
cls._group_objects[group] = obj
# Compute the group scale if we're scaling relative to parent
if cls.scale_relative_to_parent:
cls._group_scales[group] = cls._compute_relative_group_scales(group=group)
return group
@classmethod
def remove_attachment_group(cls, group):
"""
Removes an attachment group internally for object @obj. Note that this will automatically remove any particles
currently assigned to that group
Args:
group (str): Name of the attachment group to remove
Returns:
str: Name of the attachment group to use when executing commands from this class on
that specific attachment group
"""
# Make sure the group exists
cls._validate_group(group=group)
# Remove all particles from the group
cls.remove_all_group_particles(group=group)
# Remove the actual groups
cls._group_particles.pop(group)
cls._group_objects.pop(group)
if cls.scale_relative_to_parent:
cls._group_scales.pop(group)
return group
@classmethod
def _compute_relative_group_scales(cls, group):
"""
Computes relative particle scaling for group @group required when @cls.scale_relative_to_parent is True
Args:
group (str): Specific group for which to compute the relative particle scaling
Returns:
2-tuple:
- 3-array: min scaling factor
- 3-array: max scaling factor
"""
# First set the bbox ranges -- depends on the object's bounding box
obj = cls._group_objects[group]
median_aabb_dim = np.median(obj.aabb_extent)
# Compute lower and upper limits to bbox
bbox_lower_limit_from_aabb = m.BBOX_LOWER_LIMIT_FRACTION_OF_AABB * median_aabb_dim
bbox_lower_limit = np.clip(
bbox_lower_limit_from_aabb,
m.BBOX_LOWER_LIMIT_MIN,
m.BBOX_LOWER_LIMIT_MAX,
)
bbox_upper_limit_from_aabb = m.BBOX_UPPER_LIMIT_FRACTION_OF_AABB * median_aabb_dim
bbox_upper_limit = np.clip(
bbox_upper_limit_from_aabb,
m.BBOX_UPPER_LIMIT_MIN,
m.BBOX_UPPER_LIMIT_MAX,
)
# Convert these into scaling factors for the x and y axes for our particle object
particle_bbox = cls.particle_object.aabb_extent
minimum = np.array([bbox_lower_limit / particle_bbox[0], bbox_lower_limit / particle_bbox[1], 1.0])
maximum = np.array([bbox_upper_limit / particle_bbox[0], bbox_upper_limit / particle_bbox[1], 1.0])
return minimum, maximum
@classmethod
def sample_scales_by_group(cls, group, n):
"""
Samples @n particle scales for group @group.
Args:
group (str): Specific group for which to sample scales
n (int): Number of scales to sample
Returns:
(n, 3) array: Array of sampled scales
"""
# Make sure the group exists
cls._validate_group(group=group)
# Sample based on whether we're scaling relative to parent or not
scales = np.random.uniform(*cls._group_scales[group], (n, 3)) if cls.scale_relative_to_parent else cls.sample_scales(n=n)
# Since the particles will be placed under the object, it will be affected/stretched by obj.scale. In order to
# preserve the absolute size of the particles, we need to scale the particle by obj.scale in some way. However,
# since the particles have a relative rotation w.r.t the object, the scale between the two don't align. As a
# heuristics, we divide it by the avg_scale, which is the cubic root of the product of the scales along 3 axes.
obj = cls._group_objects[group]
avg_scale = np.cbrt(np.product(obj.scale))
return scales / avg_scale
@classmethod
def generate_particles(
cls,
positions,
orientations=None,
scales=None,
**kwargs,
):
# Should not be called, since particles must be tied to a group!
raise ValueError("Cannot call generate_particles for a VisualParticleSystem! "
"Call generate_group_particles() instead.")
@classmethod
def generate_group_particles(
cls,
group,
positions,
orientations=None,
scales=None,
link_prim_paths=None,
):
"""
Generates new particle objects within group @group at the specified pose (@positions, @orientations) with
corresponding scales @scales.
NOTE: Assumes positions are the exact contact point on @group object's surface. If cls._CLIP_INTO_OBJECTS
is not True, then the positions will be offset away from the object by half of its bbox
Args:
group (str): Object on which to sample particle locations
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scaling in its
local frame. If not specified, all we randomly sampled based on @cls.min_scale and @cls.max_scale
link_prim_paths (None or list of str): Determines which link each generated particle will
be attached to. If not specified, all will be attached to the group object's prim, NOT a link
"""
raise NotImplementedError
@classmethod
def generate_group_particles_on_object(cls, group, max_samples=None, min_samples_for_success=1):
"""
Generates @max_samples new particle objects and samples their locations on the surface of object @obj. Note
that if any particles are in the group already, they will be removed
Args:
group (str): Object on which to sample particle locations
max_samples (None or int): If specified, maximum number of particles to sample
min_samples_for_success (int): Minimum number of particles required to be sampled successfully in order
for this generation process to be considered successful
Returns:
bool: True if enough particles were generated successfully (number of successfully sampled points >=
min_samples_for_success), otherwise False
"""
raise NotImplementedError
@classmethod
def get_group_particles_position_orientation(cls, group):
"""
Computes all particles' positions and orientations that belong to @group
Note: This is more optimized than doing a for loop with self.get_particle_position_orientation()
Args:
group (str): Group name whose particle positions and orientations should be computed
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def set_group_particles_position_orientation(cls, group, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to @group
Note: This is more optimized than doing a for loop with self.set_particle_position_orientation()
Args:
group (str): Group name whose particle positions and orientations should be computed
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def get_group_particles_local_pose(cls, group):
"""
Computes all particles' positions and orientations that belong to @group in the particles' parent frame
Args:
group (str): Group name whose particle positions and orientations should be computed
Returns:
2-tuple:
- (n, 3)-array: per-particle (x,y,z) position
- (n, 4)-array: per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def set_group_particles_local_pose(cls, group, positions=None, orientations=None):
"""
Sets all particles' positions and orientations that belong to @group in the particles' parent frame
Args:
group (str): Group name whose particle positions and orientations should be computed
positions (n-array): (n, 3) per-particle (x,y,z) position
orientations (n-array): (n, 4) per-particle (x,y,z,w) quaternion orientation
"""
raise NotImplementedError
@classmethod
def _validate_group(cls, group):
"""
Checks if particle attachment group @group exists. (If not, can create the group via create_attachment_group).
This will raise a ValueError if it doesn't exist.
Args:
group (str): Name of the group to check for
"""
if group not in cls.groups:
raise ValueError(f"Particle attachment group {group} does not exist!")
class PhysicalParticleSystem(BaseSystem):
"""
System whose generated particles are subject to physics
"""
@classmethod
def initialize(cls):
# Run super first
super().initialize()
# Make sure min and max scale are identical
assert np.all(cls.min_scale == cls.max_scale), \
"Min and max scale should be identical for PhysicalParticleSystem!"
@classproperty
def particle_density(cls):
"""
Returns:
float: The per-particle density, in kg / m^3
"""
raise NotImplementedError()
@classproperty
def particle_radius(cls):
"""
Returns:
float: Radius for the particles to be generated, for the purpose of sampling
"""
raise NotImplementedError()
@classproperty
def particle_contact_radius(cls):
"""
Returns:
float: Contact radius for the particles to be generated, for the purpose of estimating contacts
"""
raise NotImplementedError()
@classproperty
def particle_particle_rest_distance(cls):
"""
Returns:
The minimum distance between individual particles at rest
"""
return cls.particle_radius * 2.0
@classmethod
def check_in_contact(cls, positions):
"""
Checks whether each particle specified by @particle_positions are in contact with any rigid body.
NOTE: This is a rough proxy for contact, given @positions. Should not be taken as ground truth.
This is because for efficiency and underlying physics reasons, it's easier to treat particles as spheres
for fast checking. For particles directly spawned from Omniverse's underlying ParticleSystem API, it is a
rough proxy semantically, though it is accurate in sim-physics since all spawned particles interact as spheres.
For particles spawned manually as rigid bodies, it is a rough proxy both semantically and physically, as the
object physically interacts with its non-uniform geometry.
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
Returns:
n-array: (n_particles,) boolean array, True if in contact, otherwise False
"""
in_contact = np.zeros(len(positions), dtype=bool)
for idx, pos in enumerate(positions):
# TODO: Maybe multiply particle contact radius * 2?
in_contact[idx] = og.sim.psqi.overlap_sphere_any(cls.particle_contact_radius, pos)
return in_contact
@classmethod
def generate_particles_from_link(
cls,
obj,
link,
use_visual_meshes=True,
mesh_name_prefixes=None,
check_contact=True,
sampling_distance=None,
max_samples=None,
**kwargs,
):
"""
Generates a new particle instancer with unique identification number @idn, with particles sampled from the mesh
located at @mesh_prim_path, and registers it internally. This will also check for collision with other rigid
objects before spawning in individual particles
Args:
obj (EntityPrim): Object whose @link's visual meshes will be converted into sampled particles
link (RigidPrim): @obj's link whose visual meshes will be converted into sampled particles
use_visual_meshes (bool): Whether to use visual meshes of the link to generate particles
mesh_name_prefixes (None or str): If specified, specifies the substring that must exist in @link's
mesh names in order for that mesh to be included in the particle generator function.
If None, no filtering will be used.
check_contact (bool): If True, will only spawn in particles that do not collide with other rigid bodies
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
**kwargs (dict): Any additional keyword-mapped arguments required by subclass implementation
"""
# Run sanity checks
assert cls.initialized, "Must initialize system before generating particle instancers!"
# Generate a checker function to see if particles are within the link's volumes
check_in_volume, _ = generate_points_in_volume_checker_function(
obj=obj,
volume_link=link,
use_visual_meshes=use_visual_meshes,
mesh_name_prefixes=mesh_name_prefixes,
)
# Grab the link's AABB (or fallback to obj AABB if link does not have a valid AABB),
# and generate a grid of points based on the sampling distance
try:
low, high = link.visual_aabb
extent = link.visual_aabb_extent
except ValueError:
low, high = obj.aabb
extent = obj.aabb_extent
# We sample the range of each extent minus
sampling_distance = 2 * cls.particle_radius if sampling_distance is None else sampling_distance
n_particles_per_axis = (extent / sampling_distance).astype(int)
assert np.all(n_particles_per_axis), f"link {link.name} is too small to sample any particle of radius {cls.particle_radius}."
# 1e-10 is added because the extent might be an exact multiple of particle radius
arrs = [np.arange(l + cls.particle_radius, h - cls.particle_radius + 1e-10, cls.particle_particle_rest_distance)
for l, h, n in zip(low, high, n_particles_per_axis)]
# Generate 3D-rectangular grid of points
particle_positions = np.stack([arr.flatten() for arr in np.meshgrid(*arrs)]).T
# Check which points are inside the volume and only keep those
particle_positions = particle_positions[np.where(check_in_volume(particle_positions))[0]]
# Also prune any that in contact with anything if requested
if check_contact:
particle_positions = particle_positions[np.where(cls.check_in_contact(particle_positions) == 0)[0]]
# Also potentially sub-sample if we're past our limit
if max_samples is not None and len(particle_positions) > max_samples:
particle_positions = particle_positions[
np.random.choice(len(particle_positions), size=(int(max_samples),), replace=False)]
return cls.generate_particles(
positions=particle_positions,
**kwargs,
)
@classmethod
def generate_particles_on_object(
cls,
obj,
sampling_distance=None,
max_samples=None,
min_samples_for_success=1,
**kwargs,
):
"""
Generates @n_particles new particle objects and samples their locations on the top surface of object @obj
Args:
obj (BaseObject): Object on which to generate a particle instancer with sampled particles on the object's
top surface
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
min_samples_for_success (int): Minimum number of particles required to be sampled successfully in order
for this generation process to be considered successful
**kwargs (dict): Any additional keyword-mapped arguments required by subclass implementation
Returns:
bool: True if enough particles were generated successfully (number of successfully sampled points >=
min_samples_for_success), otherwise False
"""
assert max_samples >= min_samples_for_success, "number of particles to sample should exceed the min for success"
# We densely sample a grid of points by ray-casting from top to bottom to find the valid positions
radius = cls.particle_radius
results = sample_cuboid_on_object_full_grid_topdown(
obj,
# the grid is fully dense - particles are sitting next to each other
ray_spacing=radius * 2 if sampling_distance is None else sampling_distance,
# assume the particles are extremely small - sample cuboids of size 0 for better performance
cuboid_dimensions=np.zeros(3),
# raycast start inside the aabb in x-y plane and outside the aabb in the z-axis
aabb_offset=np.array([-radius, -radius, radius]),
# bottom padding should be the same as the particle radius
cuboid_bottom_padding=radius,
# undo_cuboid_bottom_padding should be False - the sampled positions are above the surface by its radius
undo_cuboid_bottom_padding=False,
)
particle_positions = np.array([result[0] for result in results if result[0] is not None])
# Also potentially sub-sample if we're past our limit
if max_samples is not None and len(particle_positions) > max_samples:
particle_positions = particle_positions[
np.random.choice(len(particle_positions), size=(max_samples,), replace=False)]
n_particles = len(particle_positions)
success = n_particles >= min_samples_for_success
# If we generated a sufficient number of points, generate them in the simulator
if success:
cls.generate_particles(
positions=particle_positions,
**kwargs,
)
return success
def _create_system_from_metadata(system_name):
"""
Internal helper function to programmatically create a system from dataset metadata
NOTE: This only creates the system, and does NOT initialize the system
Args:
system_name (str): Name of the system to create, e.g.: "water", "stain", etc.
Returns:
BaseSystem: Created system class
"""
# Avoid circular imports
from omnigibson import systems
# Search for the appropriate system, if not found, fallback
# TODO: Once dataset is fully constructed, DON'T fallback, and assert False instead
all_systems = set(get_all_system_categories())
if system_name not in all_systems:
# Use default config -- assume @system_name is a fluid that uses the same params as water
return systems.FluidSystem.create(
name=system_name.replace("-", "_"),
particle_contact_offset=0.012,
particle_density=500.0,
is_viscous=False,
material_mtl_name="DeepWater",
)
else:
"""
This is not defined yet, but one proposal:
Metadata = .json dict, with format:
{
"type": one of {"visual", "fluid", "granular"},
}
if visual, include:
"relative_particle_scaling" : ...,
if visual or granular, also includes:
--> note: create_particle_template should be deterministic, configured via:
lambda prim_path, name: og.objects.DatasetObject(
prim_path=prim_path,
name=name,
usd_path=os.path.join(gm.DATASET_PATH, "systems", system_name, f"{system_name}.usd"),
category=system_name,
visible=False,
fixed_base=False,
visual_only=True,
include_default_states=False,
abilities={},
)
if fluid / granular, also include:
"particle_contact_offset": ...,
"particle_density": ...,
if fluid, also include:
"is_viscous": bool
"material_mtl_name": ..., # Base material config to use
"customize_particle_kwargs": { # Maps property/ies from @MaterialPrim to value to set
"opacity_constant": ...,
"albedo_add": ...,
"diffuse_color_constant": ...,
...,
}
--> This will be programmatically constructed into a function:
def _customize_particle_material(mat: MaterialPrim): --> None
for attr, val in metadata["customize_particle_kwargs"].items():
mat.__setattr__(attr, val)
Then, compile the necessary kwargs and generate the requested system
"""
# Parse information
system_dir = os.path.join(gm.DATASET_PATH, "systems", system_name)
with open(os.path.join(system_dir, "metadata.json"), "r") as f:
metadata = json.load(f)
system_type = metadata["type"]
system_kwargs = dict(name=system_name)
particle_assets = set(os.listdir(system_dir))
particle_assets.remove("metadata.json")
has_asset = len(particle_assets) > 0
if has_asset:
model = sorted(particle_assets)[0]
asset_path = os.path.join(system_dir, model, "usd", f"{model}.usd")
else:
asset_path = None
if not has_asset:
if system_type == "macro_visual_particle":
# Fallback to stain asset
asset_path = os.path.join(gm.DATASET_PATH, "systems", "stain", "ahkjul", "usd", "ahkjul.usd")
has_asset = True
if has_asset:
def generate_particle_template_fcn():
return lambda prim_path, name: \
og.objects.USDObject(
prim_path=prim_path,
name=name,
usd_path=asset_path,
encrypted=True,
category=system_name,
visible=False,
fixed_base=True,
visual_only=True,
kinematic_only=True,
include_default_states=False,
abilities={},
)
else:
def generate_particle_template_fcn():
return lambda prim_path, name: \
og.objects.PrimitiveObject(
prim_path=prim_path,
name=name,
primitive_type="Sphere",
category=system_name,
radius=0.015,
visible=False,
fixed_base=True,
visual_only=True,
kinematic_only=True,
include_default_states=False,
abilities={},
)
def generate_customize_particle_material_fcn(mat_kwargs):
def customize_mat(mat):
for attr, val in mat_kwargs.items():
setattr(mat, attr, np.array(val) if isinstance(val, list) else val)
return customize_mat
if system_type == "macro_visual_particle":
system_kwargs["create_particle_template"] = generate_particle_template_fcn()
system_kwargs["scale_relative_to_parent"] = metadata["relative_particle_scaling"]
elif system_type == "granular" or system_type == "macro_physical_particle":
system_kwargs["create_particle_template"] = generate_particle_template_fcn()
system_kwargs["particle_density"] = metadata["particle_density"]
elif system_type == "fluid":
system_kwargs["particle_contact_offset"] = metadata["particle_contact_offset"]
system_kwargs["particle_density"] = metadata["particle_density"]
system_kwargs["is_viscous"] = metadata["is_viscous"]
system_kwargs["material_mtl_name"] = metadata["material_mtl_name"]
system_kwargs["customize_particle_material"] = \
generate_customize_particle_material_fcn(mat_kwargs=metadata["customize_material_kwargs"])
else:
raise ValueError(f"{system_name} system's type {system_type} is invalid! Must be one of "
f"{{ 'macro_visual_particle', 'macro_physical_particle', 'granular', or 'fluid' }}")
# Generate the requested system
system_cls = "".join([st.capitalize() for st in system_type.split("_")])
return getattr(systems, f"{system_cls}System").create(**system_kwargs)
def import_og_systems():
system_dir = os.path.join(gm.DATASET_PATH, "systems")
if os.path.exists(system_dir):
system_names = os.listdir(system_dir)
for system_name in system_names:
if system_name not in REGISTERED_SYSTEMS:
_create_system_from_metadata(system_name=system_name)
def is_system_active(system_name):
if system_name not in REGISTERED_SYSTEMS:
return False
# assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
return system.initialized
def is_visual_particle_system(system_name):
assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
return issubclass(system, VisualParticleSystem)
def is_physical_particle_system(system_name):
assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
return issubclass(system, PhysicalParticleSystem)
def is_fluid_system(system_name):
assert system_name in REGISTERED_SYSTEMS, f"System {system_name} not in REGISTERED_SYSTEMS."
system = REGISTERED_SYSTEMS[system_name]
# Avoid circular imports
from omnigibson.systems.micro_particle_system import FluidSystem
return issubclass(system, FluidSystem)
def get_system(system_name, force_active=True):
# Make sure scene exists
assert og.sim.scene is not None, "Cannot get systems until scene is imported!"
# If system_name is not in REGISTERED_SYSTEMS, create from metadata
system = REGISTERED_SYSTEMS[system_name] if system_name in REGISTERED_SYSTEMS \
else _create_system_from_metadata(system_name=system_name)
if not system.initialized and force_active:
system.initialize()
return system
def clear_all_systems():
global _CALLBACKS_ON_SYSTEM_INIT, _CALLBACKS_ON_SYSTEM_CLEAR
_CALLBACKS_ON_SYSTEM_INIT = dict()
_CALLBACKS_ON_SYSTEM_CLEAR = dict()
for system in SYSTEM_REGISTRY.objects:
system.clear()
def add_callback_on_system_init(name, callback):
global _CALLBACKS_ON_SYSTEM_INIT
_CALLBACKS_ON_SYSTEM_INIT[name] = callback
def add_callback_on_system_clear(name, callback):
global _CALLBACKS_ON_SYSTEM_CLEAR
_CALLBACKS_ON_SYSTEM_CLEAR[name] = callback
def remove_callback_on_system_init(name):
global _CALLBACKS_ON_SYSTEM_INIT
_CALLBACKS_ON_SYSTEM_INIT.pop(name)
def remove_callback_on_system_clear(name):
global _CALLBACKS_ON_SYSTEM_CLEAR
_CALLBACKS_ON_SYSTEM_CLEAR.pop(name)
| 51,397 | Python | 39.092044 | 151 | 0.623694 |
StanfordVL/OmniGibson/omnigibson/systems/__init__.py | from omnigibson.systems.system_base import get_system, is_system_active, is_visual_particle_system, \
is_physical_particle_system, SYSTEM_REGISTRY, add_callback_on_system_init, add_callback_on_system_clear, \
remove_callback_on_system_init, remove_callback_on_system_clear, import_og_systems
from omnigibson.systems.micro_particle_system import *
from omnigibson.systems.macro_particle_system import *
# Import all OG systems from dataset
import_og_systems()
| 468 | Python | 51.111105 | 110 | 0.792735 |
StanfordVL/OmniGibson/omnigibson/systems/micro_particle_system.py | import uuid
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.geom_prim import VisualGeomPrim
from omnigibson.prims.material_prim import MaterialPrim
from omnigibson.systems.system_base import BaseSystem, PhysicalParticleSystem, REGISTERED_SYSTEMS
from omnigibson.utils.geometry_utils import generate_points_in_volume_checker_function
from omnigibson.utils.python_utils import classproperty, assert_valid_key, subclass_factory, snake_case_to_camel_case
from omnigibson.utils.sampling_utils import sample_cuboid_on_object_full_grid_topdown
from omnigibson.utils.usd_utils import mesh_prim_to_trimesh_mesh, PoseAPI
from omnigibson.utils.physx_utils import create_physx_particle_system, create_physx_particleset_pointinstancer
from omnigibson.utils.ui_utils import disclaimer, create_module_logger
from pathlib import Path
import os
import tempfile
import datetime
import trimesh
import pymeshlab
import numpy as np
from collections import defaultdict
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
# TODO: Tune these default values!
# TODO (eric): figure out whether one offset can fit all
m.MAX_CLOTH_PARTICLES = 20000 # Comes from a limitation in physx - do not increase
m.CLOTH_PARTICLE_CONTACT_OFFSET = 0.0075
m.CLOTH_REMESHING_ERROR_THRESHOLD = 0.05
m.CLOTH_STRETCH_STIFFNESS = 10000.0
m.CLOTH_BEND_STIFFNESS = 200.0
m.CLOTH_SHEAR_STIFFNESS = 100.0
m.CLOTH_DAMPING = 0.2
m.CLOTH_FRICTION = 0.4
m.CLOTH_DRAG = 0.001
m.CLOTH_LIFT = 0.003
m.MIN_PARTICLE_CONTACT_OFFSET = 0.005 # Minimum particle contact offset for physical micro particles
m.FLUID_PARTICLE_PARTICLE_DISTANCE_SCALE = 0.8 # How much overlap expected between fluid particles at rest
m.MICRO_PARTICLE_SYSTEM_MAX_VELOCITY = None # If set, the maximum particle velocity for micro particle systems
def set_carb_settings_for_fluid_isosurface():
"""
Sets relevant rendering settings in the carb settings in order to use isosurface effectively
"""
# Settings for Isosurface
isregistry = lazy.carb.settings.acquire_settings_interface()
# disable grid and lights
dOptions = isregistry.get_as_int("persistent/app/viewport/displayOptions")
dOptions &= ~(1 << 6 | 1 << 8)
isregistry.set_int("persistent/app/viewport/displayOptions", dOptions)
isregistry.set_bool(lazy.omni.physx.bindings._physx.SETTING_UPDATE_TO_USD, True)
isregistry.set_int(lazy.omni.physx.bindings._physx.SETTING_NUM_THREADS, 8)
isregistry.set_bool(lazy.omni.physx.bindings._physx.SETTING_UPDATE_VELOCITIES_TO_USD, True)
isregistry.set_bool(lazy.omni.physx.bindings._physx.SETTING_UPDATE_PARTICLES_TO_USD, True) # TODO: Why does setting this value --> True result in no isosurface being rendered?
isregistry.set_int("persistent/simulation/minFrameRate", 60)
isregistry.set_bool("rtx-defaults/pathtracing/lightcache/cached/enabled", False)
isregistry.set_bool("rtx-defaults/pathtracing/cached/enabled", False)
isregistry.set_int("rtx-defaults/pathtracing/fireflyFilter/maxIntensityPerSample", 10000)
isregistry.set_int("rtx-defaults/pathtracing/fireflyFilter/maxIntensityPerSampleDiffuse", 50000)
isregistry.set_float("rtx-defaults/pathtracing/optixDenoiser/blendFactor", 0.09)
isregistry.set_int("rtx-defaults/pathtracing/aa/op", 2)
isregistry.set_int("rtx-defaults/pathtracing/maxBounces", 32)
isregistry.set_int("rtx-defaults/pathtracing/maxSpecularAndTransmissionBounces", 16)
isregistry.set_int("rtx-defaults/post/dlss/execMode", 1)
isregistry.set_int("rtx-defaults/translucency/maxRefractionBounces", 12)
class PhysxParticleInstancer(BasePrim):
"""
Simple class that wraps the raw omniverse point instancer prim and provides convenience functions for
particle access
"""
def __init__(self, prim_path, name, idn):
"""
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
idn (int): Unique identification number to assign to this particle instancer. This is used to
deterministically reproduce individual particle instancer states dynamically, even if we
delete / add additional ones at runtime during simulation.
"""
# Store inputs
self._idn = idn
# Run super method directly
super().__init__(prim_path=prim_path, name=name)
self._parent_prim = BasePrim(prim_path=self.prim.GetParent().GetPath().pathString, name=f"{name}_parent")
def _load(self):
# We raise an error, this should NOT be created from scratch
raise NotImplementedError("PhysxPointInstancer should NOT be loaded via this class! Should be created before.")
def remove(self):
super().remove()
self._parent_prim.remove()
def add_particles(
self,
positions,
velocities=None,
orientations=None,
scales=None,
prototype_indices=None,
):
"""
Adds particles to this particle instancer.
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be scale [1, 1, 1] by default
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
"""
n_new_particles = len(positions)
velocities = np.zeros((n_new_particles, 3)) if velocities is None else velocities
if orientations is None:
orientations = np.zeros((n_new_particles, 4))
orientations[:, -1] = 1.0
scales = np.ones((n_new_particles, 3)) * np.ones((1, 3)) if scales is None else scales
prototype_indices = np.zeros(n_new_particles, dtype=int) if prototype_indices is None else prototype_indices
self.particle_positions = np.vstack([self.particle_positions, positions])
self.particle_velocities = np.vstack([self.particle_velocities, velocities])
self.particle_orientations = np.vstack([self.particle_orientations, orientations])
self.particle_scales = np.vstack([self.particle_scales, scales])
self.particle_prototype_ids = np.hstack([self.particle_prototype_ids, prototype_indices])
def remove_particles(self, idxs):
"""
Remove particles from this instancer, specified by their indices @idxs in the data array
Args:
idxs (list or np.array of int): IDs corresponding to the indices of specific particles to remove from this
instancer
"""
if len(idxs) > 0:
# Remove all requested indices and write to all the internal data arrays
self.particle_positions = np.delete(self.particle_positions, idxs, axis=0)
self.particle_velocities = np.delete(self.particle_velocities, idxs, axis=0)
self.particle_orientations = np.delete(self.particle_orientations, idxs, axis=0)
self.particle_scales = np.delete(self.particle_scales, idxs, axis=0)
self.particle_prototype_ids = np.delete(self.particle_prototype_ids, idxs, axis=0)
def remove_all_particles(self):
self.remove_particles(idxs=np.arange(self.n_particles))
@property
def n_particles(self):
"""
Returns:
int: Number of particles owned by this instancer
"""
return len(self.particle_positions)
@property
def idn(self):
"""
Returns:
int: Identification number of this particle instancer
"""
return self._idn
@property
def particle_group(self):
"""
Returns:
int: Particle group this instancer belongs to
"""
return self.get_attribute(attr="physxParticle:particleGroup")
@particle_group.setter
def particle_group(self, group):
"""
Args:
group (int): Particle group this instancer belongs to
"""
self.set_attribute(attr="physxParticle:particleGroup", val=group)
@property
def particle_positions(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="positions"))
@particle_positions.setter
def particle_positions(self, pos):
"""
Set the particle positions for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired positions are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
self.set_attribute(attr="positions", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(pos.astype(float)))
@property
def particle_orientations(self):
"""
Returns:
np.array: (N, 4) numpy array, where each of the N particles' orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="orientations"))
@particle_orientations.setter
def particle_orientations(self, quat):
"""
Set the particle positions for this instancer
Args:
np.array: (N, 4) numpy array, where each of the N particles' desired orientations are expressed in (x,y,z,w)
quaternion coordinates relative to this instancer's parent prim
"""
assert quat.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {quat.shape[0]}, vs. number of particles {self.n_particles}!"
# If the number of particles is nonzero, swap w position, since Quath takes (w,x,y,z)
quat = quat.astype(float)
if self.n_particles > 0:
quat = quat[:, [3, 0, 1, 2]]
self.set_attribute(attr="orientations", val=lazy.pxr.Vt.QuathArray.FromNumpy(quat))
@property
def particle_velocities(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
"""
Set the particle velocities for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired velocities are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert vel.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {vel.shape[0]}, vs. number of particles {self.n_particles}!"
self.set_attribute(attr="velocities", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(vel.astype(float)))
@property
def particle_scales(self):
"""
Returns:
np.array: (N, 3) numpy array, where each of the N particles' scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
return np.array(self.get_attribute(attr="scales"))
@particle_scales.setter
def particle_scales(self, scales):
"""
Set the particle scales for this instancer
Args:
np.array: (N, 3) numpy array, where each of the N particles' desired scales are expressed in (x,y,z)
cartesian coordinates relative to this instancer's parent prim
"""
assert scales.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {scales.shape[0]}, vs. number of particles {self.n_particles}!"
self.set_attribute(attr="scales", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(scales.astype(float)))
@property
def particle_prototype_ids(self):
"""
Returns:
np.array: (N,) numpy array, where each of the N particles' prototype_id (i.e.: which prototype is being used
for that particle)
"""
return np.array(self.get_attribute(attr="protoIndices"))
@particle_prototype_ids.setter
def particle_prototype_ids(self, prototype_ids):
"""
Set the particle prototype_ids for this instancer
Args:
np.array: (N,) numpy array, where each of the N particles' desired prototype_id
(i.e.: which prototype is being used for that particle)
"""
assert prototype_ids.shape[0] == self.n_particles, \
f"Got mismatch in particle setting size: {prototype_ids.shape[0]}, vs. number of particles {self.n_particles}!"
self.set_attribute(attr="protoIndices", val=prototype_ids.astype(np.int32))
@property
def state_size(self):
# idn (1), particle_group (1), n_particles (1), and the corresponding states for each particle
# N * (pos (3) + vel (3) + orn (4) + scale (3) + prototype_id (1))
return 3 + self.n_particles * 14
def _dump_state(self):
return dict(
idn=self._idn,
particle_group=self.particle_group,
n_particles=self.n_particles,
particle_positions=self.particle_positions,
particle_velocities=self.particle_velocities,
particle_orientations=self.particle_orientations,
particle_scales=self.particle_scales,
particle_prototype_ids=self.particle_prototype_ids,
)
def _load_state(self, state):
# Sanity check the identification number and particle group
assert self._idn == state["idn"], f"Got mismatch in identification number for this particle instancer when " \
f"loading state! Should be: {self._idn}, got: {state['idn']}."
assert self.particle_group == state["particle_group"], f"Got mismatch in particle group for this particle " \
f"instancer when loading state! Should be: {self.particle_group}, got: {state['particle_group']}."
# Set values appropriately
keys = ("particle_positions", "particle_velocities", "particle_orientations", "particle_scales", "particle_prototype_ids")
for key in keys:
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
val = np.array(state[key]) if not isinstance(state[key], np.ndarray) else state[key]
setattr(self, key, val)
def _serialize(self, state):
# Compress into a 1D array
return np.concatenate([
[state["idn"], state["particle_group"], state["n_particles"]],
state["particle_positions"].reshape(-1),
state["particle_velocities"].reshape(-1),
state["particle_orientations"].reshape(-1),
state["particle_scales"].reshape(-1),
state["particle_prototype_ids"],
]).astype(float)
def _deserialize(self, state):
# Sanity check the identification number
assert self._idn == state[0], f"Got mismatch in identification number for this particle instancer when " \
f"deserializing state! Should be: {self._idn}, got: {state[0]}."
assert self.particle_group == state[1], f"Got mismatch in particle group for this particle " \
f"instancer when deserializing state! Should be: {self.particle_group}, got: {state[1]}."
# De-compress from 1D array
n_particles = int(state[2])
state_dict = dict(
idn=int(state[0]),
particle_group=int(state[1]),
n_particles=n_particles,
)
# Process remaining keys and reshape automatically
keys = ("particle_positions", "particle_velocities", "particle_orientations", "particle_scales", "particle_prototype_ids")
sizes = ((n_particles, 3), (n_particles, 3), (n_particles, 4), (n_particles, 3), (n_particles,))
idx = 3
for key, size in zip(keys, sizes):
length = np.product(size)
state_dict[key] = state[idx: idx + length].reshape(size)
idx += length
return state_dict, idx
class MicroParticleSystem(BaseSystem):
"""
Global system for modeling "micro" level particles, e.g.: water, seeds, cloth. This system leverages
Omniverse's native physx particle systems
"""
# Particle system prim in the scene, should be generated at runtime
system_prim = None
# Material -- MaterialPrim associated with this particle system
_material = None
# Color of the generated material. Default is white [1.0, 1.0, 1.0]
# (NOTE: external queries should call cls.color)
_color = np.array([1.0, 1.0, 1.0])
@classmethod
def initialize(cls):
# Run super first
super().initialize()
# Run sanity checks
if not gm.USE_GPU_DYNAMICS:
raise ValueError(f"Failed to initialize {cls.name} system. Please set gm.USE_GPU_DYNAMICS to be True.")
# Make sure flatcache is not being used OR isosurface is enabled -- otherwise, raise an error, since
# non-isosurface particles don't get rendered properly when flatcache is enabled
assert cls.use_isosurface or not gm.ENABLE_FLATCACHE, \
f"Cannot use flatcache with MicroParticleSystem {cls.name} when no isosurface is used!"
cls.system_prim = cls._create_particle_system()
# Get material
material = cls._get_particle_material_template()
# Load the material if it's newly created and has never been loaded before
if not material.loaded:
material.load()
material.add_user(cls)
cls._material = material
# Bind the material to the particle system (for isosurface) and the prototypes (for non-isosurface)
cls._material.bind(cls.system_prim_path)
# Also apply physics to this material
lazy.omni.physx.scripts.particleUtils.add_pbd_particle_material(og.sim.stage, cls.mat_path, **cls._pbd_material_kwargs)
# Force populate inputs and outputs of the shader
cls._material.shader_force_populate()
# Potentially modify the material
cls._customize_particle_material()
@classmethod
def _clear(cls):
cls._material.remove_user(cls)
super()._clear()
cls.system_prim = None
cls._material = None
cls._color = np.array([1.0, 1.0, 1.0])
@classproperty
def particle_radius(cls):
# Magic number from omni tutorials
# See https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#offset-autocomputation
# Also https://nvidia-omniverse.github.io/PhysX/physx/5.1.3/docs/ParticleSystem.html#particle-system-configuration
return 0.99 * cls.particle_contact_offset
@classproperty
def color(cls):
"""
Returns:
None or 3-array: If @cls._material exists, this will be its corresponding RGB color. Otherwise,
will return None
"""
return cls._color
@classproperty
def material(cls):
return cls._material
@classproperty
def mat_path(cls):
"""
Returns:
str: Path to this system's material in the scene stage
"""
return f"{cls.prim_path}/material"
@classproperty
def mat_name(cls):
"""
Returns:
str: Name of this system's material
"""
return f"{cls.name}:material"
@classproperty
def _pbd_material_kwargs(cls):
"""
Returns:
dict: Any PBD material kwargs to pass to the PBD material method particleUtils.add_pbd_particle_material
used to define physical properties associated with this particle system
"""
# Default is empty dictionary
return dict()
@classmethod
def _get_particle_material_template(cls):
"""
Creates the particle material template to be used for this particle system. Prim path does not matter,
as it will be overridden internally such that it is a child prim of this particle system's prim.
NOTE: This material is a template because it is loading an Omni material preset. It can then be customized (in
addition to modifying its physical material properties) via @_customize_particle_material
Returns:
MaterialPrim: The material to apply to all particles
"""
# Default is PBR material
return MaterialPrim.get_material(
prim_path=cls.mat_path,
name=cls.mat_name,
load_config={
"mdl_name": f"OmniPBR.mdl",
"mtl_name": f"OmniPBR",
}
)
@classmethod
def _customize_particle_material(cls):
"""
Modifies this particle system's particle material once it is loaded. Default is a no-op
"""
pass
@classproperty
def system_prim_path(cls):
return f"{cls.prim_path}/system"
@classproperty
def visual_only(cls):
"""
Returns:
bool: Whether this particle system should be visual-only, i.e.: not subject to collisions and physics. If True,
the generated particles will not move or collide
"""
return False
@classproperty
def particle_contact_offset(cls):
"""
Returns:
float: Contact offset value to use for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#particle-system-configuration
for more information
"""
raise NotImplementedError()
@classproperty
def use_smoothing(cls):
"""
Returns:
bool: Whether to use smoothing or not for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#smoothing
for more information
"""
return False
@classproperty
def use_anisotropy(cls):
"""
Returns:
bool: Whether to use anisotropy or not for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#anisotropy
for more information
"""
return False
@classproperty
def use_isosurface(cls):
"""
Returns:
bool: Whether to use isosurface or not for this particle system.
See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics.html?highlight=isosurface#isosurface
for more information
"""
return False
@classmethod
def _create_particle_system(cls):
"""
Creates the single, global particle system. This should only be ever called once, and during initialize()
Returns:
Usd.Prim: Particle system prim created
"""
return create_physx_particle_system(
prim_path=cls.system_prim_path,
physics_scene_path=og.sim.get_physics_context().get_current_physics_scene_prim().GetPrimPath().pathString,
particle_contact_offset=cls.particle_contact_offset,
visual_only=cls.visual_only,
smoothing=cls.use_smoothing and gm.ENABLE_HQ_RENDERING,
anisotropy=cls.use_anisotropy and gm.ENABLE_HQ_RENDERING,
isosurface=cls.use_isosurface and gm.ENABLE_HQ_RENDERING,
).GetPrim()
class MicroPhysicalParticleSystem(MicroParticleSystem, PhysicalParticleSystem):
"""
Global system for modeling physical "micro" level particles, e.g.: water, seeds, rice, etc. This system leverages
Omniverse's native physx particle systems
"""
# Particle prototypes -- will be list of mesh prims to use as particle prototypes for this system
particle_prototypes = None
# Particle instancers -- maps name to particle instancer prims (dict)
particle_instancers = None
@classproperty
def n_particles(cls):
return sum([instancer.n_particles for instancer in cls.particle_instancers.values()])
@classproperty
def n_instancers(cls):
"""
Returns:
int: Number of active particles in this system
"""
return len(cls.particle_instancers)
@classproperty
def instancer_idns(cls):
"""
Returns:
int: Number of active particles in this system
"""
return [inst.idn for inst in cls.particle_instancers.values()]
@classproperty
def self_collision(cls):
"""
Returns:
bool: Whether this system's particle should have self collisions enabled or not
"""
# Default is True
return True
@classmethod
def _sync_particle_prototype_ids(cls):
"""
Synchronizes the particle prototype IDs across all particle instancers when sim is stopped.
Omniverse has a bug where all particle positions, orientations, velocities, and scales are correctly reset
when sim is stopped, but not the prototype IDs. This function is a workaround for that.
"""
if cls.initialized:
for instancer in cls.particle_instancers.values():
instancer.particle_prototype_ids = np.zeros(instancer.n_particles, dtype=np.int32)
@classmethod
def initialize(cls):
# Create prototype before running super!
cls.particle_prototypes = cls._create_particle_prototypes()
# Run super
super().initialize()
# Potentially set system prim's max velocity value
if m.MICRO_PARTICLE_SYSTEM_MAX_VELOCITY is not None:
cls.system_prim.GetProperty("maxVelocity").Set(m.MICRO_PARTICLE_SYSTEM_MAX_VELOCITY)
# Initialize class variables that are mutable so they don't get overridden by children classes
cls.particle_instancers = dict()
# TODO: remove this hack once omniverse fixes the issue (now we assume prototype IDs are all 0 always)
og.sim.add_callback_on_stop(name=f"{cls.name}_sync_particle_prototype_ids", callback=cls._sync_particle_prototype_ids)
@classmethod
def _clear(cls):
for prototype in cls.particle_prototypes:
og.sim.remove_prim(prototype)
super()._clear()
cls.particle_prototypes = None
cls.particle_instancers = None
@classproperty
def next_available_instancer_idn(cls):
"""
Updates the max instancer identification number based on the current internal state
"""
if cls.n_instancers == 0:
return cls.default_instancer_idn
else:
for idn in range(max(cls.instancer_idns) + 2):
if idn not in cls.instancer_idns:
return idn
@classproperty
def default_instancer_idn(cls):
return 0
@classproperty
def state_size(cls):
# We have the number of particle instancers (1), the instancer groups, particle groups, and,
# number of particles in each instancer (3n),
# and the corresponding states in each instancer (X)
return 1 + 3 * len(cls.particle_instancers) + sum(inst.state_size for inst in cls.particle_instancers.values())
@classproperty
def default_particle_instancer(cls):
"""
Returns:
PhysxParticleInstancer: Default particle instancer for this particle system
"""
# Default instancer is the 0th ID instancer
name = cls.particle_instancer_idn_to_name(idn=cls.default_instancer_idn)
# NOTE: Cannot use dict.get() call for some reason; it messes up IDE introspection
return cls.particle_instancers[name] if name in cls.particle_instancers \
else cls.generate_particle_instancer(n_particles=0, idn=cls.default_instancer_idn)
@classproperty
def particle_contact_radius(cls):
# This is simply the contact offset
return cls.particle_contact_offset
@classproperty
def is_fluid(cls):
"""
Returns:
bool: Whether this system is modeling fluid or not
"""
raise NotImplementedError()
@classmethod
def _create_particle_prototypes(cls):
"""
Creates any relevant particle prototypes to be used by this particle system.
Returns:
list of VisualGeomPrim: Visual mesh prim(s) to use as this system's particle prototype(s)
"""
raise NotImplementedError()
@classmethod
def remove_particles(
cls,
idxs,
instancer_idn=None,
):
"""
Removes pre-existing particles from instancer @instancer_idn
Args:
idxs (np.array): (n_particles,) shaped array specifying IDs of particles to delete
instancer_idn (None or int): Unique identification number of the particle instancer to delete the particles
from. If None, this system will delete particles from the default particle instancer
"""
# Create a new particle instancer if a new idn is requested, otherwise use the pre-existing one
inst = cls.default_particle_instancer if instancer_idn is None else \
cls.particle_instancers.get(cls.particle_instancer_idn_to_name(idn=instancer_idn), None)
assert inst is not None, f"No instancer with ID {inst} exists!"
inst.remove_particles(idxs=idxs)
@classmethod
def generate_particles(
cls,
positions,
instancer_idn=None,
particle_group=0,
velocities=None,
orientations=None,
scales=None,
prototype_indices=None,
):
"""
Generates new particles, either as part of a pre-existing instancer corresponding to @instancer_idn or as part
of a newly generated instancer.
Args:
positions (np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
active instancer that matches the requested idn, a new one will be created.
If None, this system will add particles to the default particle instancer
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@cls.self_collision
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
Returns:
PhysxParticleInstancer: Particle instancer that includes the generated particles
"""
# Create a new particle instancer if a new idn is requested, otherwise use the pre-existing one
inst = cls.default_particle_instancer if instancer_idn is None else \
cls.particle_instancers.get(cls.particle_instancer_idn_to_name(idn=instancer_idn), None)
n_particles = len(positions)
if prototype_indices is not None:
prototype_indices = np.ones(n_particles, dtype=int) * prototype_indices if \
isinstance(prototype_indices, int) else np.array(prototype_indices, dtype=int)
else:
prototype_indices = np.random.choice(np.arange(len(cls.particle_prototypes)), size=(n_particles,))
if inst is None:
inst = cls.generate_particle_instancer(
idn=instancer_idn,
particle_group=particle_group,
n_particles=len(positions),
positions=positions,
velocities=velocities,
orientations=orientations,
scales=scales,
prototype_indices=prototype_indices,
)
else:
inst.add_particles(
positions=positions,
velocities=velocities,
orientations=orientations,
scales=scales,
prototype_indices=prototype_indices,
)
# Update semantics
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=cls.prim_path),
semantic_label=cls.name,
type_label="class",
)
return inst
@classmethod
def generate_particle_instancer(
cls,
n_particles,
idn=None,
particle_group=0,
positions=None,
velocities=None,
orientations=None,
scales=None,
prototype_indices=None,
):
"""
Generates a new particle instancer with unique identification number @idn, and registers it internally
Args:
n_particles (int): Number of particles to generate for this instancer
idn (None or int): Unique identification number to assign to this particle instancer. This is used to
deterministically reproduce individual particle instancer states dynamically, even if we
delete / add additional ones at runtime during simulation. If None, this system will generate a unique
identifier automatically.
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@cls.self_collision
positions (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) positions.
If not specified, will be set to the origin by default
velocities (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) velocities.
If not specified, all will be set to 0
orientations (None or np.array): (n_particles, 4) shaped array specifying per-particle (x,y,z,w) quaternion
orientations. If not specified, all will be set to canonical orientation (0, 0, 0, 1)
scales (None or np.array): (n_particles, 3) shaped array specifying per-particle (x,y,z) scales.
If not specified, will be uniformly randomly sampled from (cls.min_scale, cls.max_scale)
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will use all 0s (i.e.: the first prototype created)
Returns:
PhysxParticleInstancer: Generated particle instancer
"""
# Run sanity checks
assert cls.initialized, "Must initialize system before generating particle instancers!"
# Multiple particle instancers is NOT supported currently, since there is no clear use case for multiple
assert cls.n_instancers == 0, f"Cannot create multiple instancers for the same system! " \
f"There is already {cls.n_instancers} pre-existing instancers."
# Automatically generate an identification number for this instancer if none is specified
if idn is None:
idn = cls.next_available_instancer_idn
assert idn not in cls.instancer_idns, f"instancer idn {idn} already exists."
# Generate standardized prim path for this instancer
name = cls.particle_instancer_idn_to_name(idn=idn)
# Create the instancer
instance = create_physx_particleset_pointinstancer(
name=name,
particle_system_path=cls.prim_path,
physx_particle_system_path=cls.system_prim_path,
particle_group=particle_group,
positions=np.zeros((n_particles, 3)) if positions is None else positions,
self_collision=cls.self_collision,
fluid=cls.is_fluid,
particle_mass=None,
particle_density=cls.particle_density,
orientations=orientations,
velocities=velocities,
angular_velocities=None,
scales=cls.sample_scales(n=n_particles) if scales is None else scales,
prototype_prim_paths=[pp.prim_path for pp in cls.particle_prototypes],
prototype_indices=prototype_indices,
enabled=not cls.visual_only,
)
# Create the instancer object that wraps the raw prim
instancer = PhysxParticleInstancer(
prim_path=instance.GetPrimPath().pathString,
name=name,
idn=idn,
)
instancer.initialize()
cls.particle_instancers[name] = instancer
return instancer
@classmethod
def generate_particles_from_link(
cls,
obj,
link,
use_visual_meshes=True,
mesh_name_prefixes=None,
check_contact=True,
instancer_idn=None,
particle_group=0,
sampling_distance=None,
max_samples=None,
prototype_indices=None,
):
"""
Generates a new particle instancer with unique identification number @idn, with particles sampled from the mesh
located at @mesh_prim_path, and registers it internally. This will also check for collision with other rigid
objects before spawning in individual particles
Args:
obj (EntityPrim): Object whose @link's visual meshes will be converted into sampled particles
link (RigidPrim): @obj's link whose visual meshes will be converted into sampled particles
use_visual_meshes (bool): Whether to use visual meshes of the link to generate particles
mesh_name_prefixes (None or str): If specified, specifies the substring that must exist in @link's
mesh names in order for that mesh to be included in the particle generator function.
If None, no filtering will be used.
check_contact (bool): If True, will only spawn in particles that do not collide with other rigid bodies
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
active instancer that matches the requested idn, a new one will be created.
If None, this system will add particles to the default particle instancer
particle_group (int): ID for this particle set. Particles from different groups will automatically collide
with each other. Particles in the same group will have collision behavior dictated by
@cls.self_collision.
Only used if a new particle instancer is created!
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
"""
return super().generate_particles_from_link(
obj=obj,
link=link,
use_visual_meshes=use_visual_meshes,
mesh_name_prefixes=mesh_name_prefixes,
check_contact=check_contact,
instancer_idn=instancer_idn,
particle_group=particle_group,
sampling_distance=sampling_distance,
max_samples=max_samples,
prototype_indices=prototype_indices,
)
@classmethod
def generate_particles_on_object(
cls,
obj,
instancer_idn=None,
particle_group=0,
sampling_distance=None,
max_samples=None,
min_samples_for_success=1,
prototype_indices=None,
):
"""
Generates @n_particles new particle objects and samples their locations on the top surface of object @obj
Args:
obj (BaseObject): Object on which to generate a particle instancer with sampled particles on the object's
top surface
instancer_idn (None or int): Unique identification number of the particle instancer to assign the generated
particles to. This is used to deterministically reproduce individual particle instancer states
dynamically, even if we delete / add additional ones at runtime during simulation. If there is no
active instancer that matches the requested idn, a new one will be created.
If None, this system will add particles to the default particle instancer
particle_group (int): ID for this particle set. Particles from different groups will automatically collide.
Only used if a new particle instancer is created!
sampling_distance (None or float): If specified, sets the distance between sampled particles. If None,
a simulator autocomputed value will be used
max_samples (None or int): If specified, maximum number of particles to sample
min_samples_for_success (int): Minimum number of particles required to be sampled successfully in order
for this generation process to be considered successful
prototype_indices (None or list of int): If specified, should specify which prototype should be used for
each particle. If None, will randomly sample from all available prototypes
Returns:
bool: True if enough particles were generated successfully (number of successfully sampled points >=
min_samples_for_success), otherwise False
"""
return super().generate_particles_on_object(
obj=obj,
instancer_idn=instancer_idn,
particle_group=particle_group,
sampling_distance=sampling_distance,
max_samples=max_samples,
min_samples_for_success=min_samples_for_success,
prototype_indices=prototype_indices,
)
@classmethod
def remove_particle_instancer(cls, name):
"""
Removes particle instancer with name @name from this system.
Args:
name (str): Particle instancer name to remove. If it does not exist, then an error will be raised
"""
# Make sure the instancer actually exists
assert_valid_key(key=name, valid_keys=cls.particle_instancers, name="particle instancer")
# Remove instancer from our tracking and delete its prim
instancer = cls.particle_instancers.pop(name)
og.sim.remove_prim(instancer)
@classmethod
def particle_instancer_name_to_idn(cls, name):
"""
Args:
name (str): Particle instancer name
Returns:
int: Particle instancer identification number
"""
return int(name.split(f"{cls.name}Instancer")[-1])
@classmethod
def particle_instancer_idn_to_name(cls, idn):
"""
Args:
idn (idn): Particle instancer identification number
Returns:
str: Name of the particle instancer auto-generated from its unique identification number
"""
return f"{cls.name}Instancer{idn}"
@classmethod
def get_particles_position_orientation(cls):
return cls.default_particle_instancer.particle_positions, cls.default_particle_instancer.particle_orientations
@classmethod
def get_particles_local_pose(cls):
return cls.get_particles_position_orientation()
@classmethod
def get_particle_position_orientation(cls, idx):
pos, ori = cls.get_particles_position_orientation()
return pos[idx], ori[idx]
@classmethod
def get_particle_local_pose(cls, idx):
return cls.get_particle_position_orientation(idx=idx)
@classmethod
def set_particles_position_orientation(cls, positions=None, orientations=None):
if positions is not None:
cls.default_particle_instancer.particle_positions = positions
if orientations is not None:
cls.default_particle_instancer.particle_orientations = orientations
@classmethod
def set_particles_local_pose(cls, positions=None, orientations=None):
cls.set_particles_position_orientation(positions=positions, orientations=orientations)
@classmethod
def set_particle_position_orientation(cls, idx, position=None, orientation=None):
if position is not None:
positions = cls.default_particle_instancer.particle_positions
positions[idx] = position
cls.default_particle_instancer.particle_positions = positions
if orientation is not None:
orientations = cls.default_particle_instancer.particle_orientations
orientations[idx] = orientation
cls.default_particle_instancer.particle_orientations = orientations
@classmethod
def set_particle_local_pose(cls, idx, position=None, orientation=None):
cls.set_particle_position_orientation(idx=idx, position=position, orientation=orientation)
@classmethod
def _sync_particle_instancers(cls, idns, particle_groups, particle_counts):
"""
Synchronizes the particle instancers based on desired identification numbers @idns
Args:
idns (list of int): Desired unique instancers that should be active for this particle system
particle_groups (list of int): Desired particle groups that each instancer should be. Length of this
list should be the same length as @idns
particle_counts (list of int): Desired particle counts that should exist per instancer. Length of this
list should be the same length as @idns
"""
# We have to be careful here -- some particle instancers may have been deleted / are mismatched, so we need
# to update accordingly, potentially deleting stale instancers and creating new instancers as needed
idn_to_info_mapping = {idn: {"group": group, "count": count}
for idn, group, count in zip(idns, particle_groups, particle_counts)}
current_instancer_names = set(cls.particle_instancers.keys())
desired_instancer_names = set(cls.particle_instancer_idn_to_name(idn=idn) for idn in idns)
instancers_to_delete = current_instancer_names - desired_instancer_names
instancers_to_create = desired_instancer_names - current_instancer_names
common_instancers = current_instancer_names.intersection(desired_instancer_names)
# Sanity check the common instancers, we will recreate any where there is a mismatch
for name in common_instancers:
idn = cls.particle_instancer_name_to_idn(name=name)
info = idn_to_info_mapping[idn]
instancer = cls.particle_instancers[name]
if instancer.particle_group != info["group"]:
instancer.particle_group = info["group"]
count_diff = info["count"] - instancer.n_particles
if count_diff > 0:
# We need to add more particles to this group
instancer.add_particles(positions=np.zeros((count_diff, 3)))
elif count_diff < 0:
# We need to remove particles from this group
instancer.remove_particles(idxs=np.arange(-count_diff))
# Delete any instancers we no longer want
for name in instancers_to_delete:
cls.remove_particle_instancer(name=name)
# Create any instancers we don't already have
for name in instancers_to_create:
idn = cls.particle_instancer_name_to_idn(name=name)
info = idn_to_info_mapping[idn]
cls.generate_particle_instancer(idn=idn, particle_group=info["group"], n_particles=info["count"])
@classmethod
def _dump_state(cls):
return dict(
n_instancers=cls.n_instancers,
instancer_idns=cls.instancer_idns,
instancer_particle_groups=[inst.particle_group for inst in cls.particle_instancers.values()],
instancer_particle_counts=[inst.n_particles for inst in cls.particle_instancers.values()],
particle_states=dict(((name, inst.dump_state(serialized=False))
for name, inst in cls.particle_instancers.items())),
)
@classmethod
def _load_state(cls, state):
# Synchronize the particle instancers
cls._sync_particle_instancers(
idns=state["instancer_idns"],
particle_groups=state["instancer_particle_groups"],
particle_counts=state["instancer_particle_counts"],
)
# Iterate over all particle states and load their respective states
for name, inst_state in state["particle_states"].items():
cls.particle_instancers[name].load_state(inst_state, serialized=False)
@classmethod
def _serialize(cls, state):
# Array is number of particle instancers, then the corresponding states for each particle instancer
return np.concatenate([
[state["n_instancers"]],
state["instancer_idns"],
state["instancer_particle_groups"],
state["instancer_particle_counts"],
*[cls.particle_instancers[name].serialize(inst_state)
for name, inst_state in state["particle_states"].items()],
]).astype(float)
@classmethod
def _deserialize(cls, state):
# Synchronize the particle instancers
n_instancers = int(state[0])
instancer_info = dict()
idx = 1
for info_name in ("instancer_idns", "instancer_particle_groups", "instancer_particle_counts"):
instancer_info[info_name] = state[idx: idx + n_instancers].astype(int).tolist()
idx += n_instancers
# Syncing is needed so that each particle instancer can further deserialize its own state
log.debug(f"Syncing {cls.name} particles with {n_instancers} instancers..")
cls._sync_particle_instancers(
idns=instancer_info["instancer_idns"],
particle_groups=instancer_info["instancer_particle_groups"],
particle_counts=instancer_info["instancer_particle_counts"],
)
# Procedurally deserialize the particle states
particle_states = dict()
for idn in instancer_info["instancer_idns"]:
name = cls.particle_instancer_idn_to_name(idn=idn)
state_size = cls.particle_instancers[name].state_size
particle_states[name] = cls.particle_instancers[name].deserialize(state[idx: idx + state_size])
idx += state_size
return dict(
n_instancers=n_instancers,
**instancer_info,
particle_states=particle_states,
), idx
@classmethod
def remove_all_particles(cls):
cls._sync_particle_instancers(idns=[], particle_groups=[], particle_counts=[])
@classmethod
def create(
cls,
name,
particle_density,
min_scale=None,
max_scale=None,
**kwargs,
):
"""
Utility function to programmatically generate monolithic fluid system classes.
Args:
name (str): Name of the system, in snake case.
particle_density (float): Particle density for the generated system
min_scale (None or 3-array): If specified, sets the minumum bound for particles' relative scale.
Else, defaults to 1
max_scale (None or 3-array): If specified, sets the maximum bound for particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
MicroPhysicalParticleSystem: Generated system class
"""
# Override the necessary parameters
@classproperty
def cp_register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def cp_particle_density(cls):
return particle_density
# Add to any other params specified
kwargs["_register_system"] = cp_register_system
kwargs["particle_density"] = cp_particle_density
# Run super
return super().create(name=name, min_scale=min_scale, max_scale=max_scale, **kwargs)
class FluidSystem(MicroPhysicalParticleSystem):
"""
Particle system class simulating fluids, leveraging isosurface feature in omniverse to render nice PBR fluid
texture. Individual particles are composed of spheres.
"""
@classmethod
def initialize(cls):
# Run super first
super().initialize()
# Bind the material to the particle system (for isosurface) and the prototypes (for non-isosurface)
cls._material.bind(cls.system_prim_path)
for prototype in cls.particle_prototypes:
cls._material.bind(prototype.prim_path)
# Apply the physical material preset based on whether or not this fluid is viscous
apply_mat_physics = lazy.omni.physx.scripts.particleUtils.AddPBDMaterialViscous if cls.is_viscous else lazy.omni.physx.scripts.particleUtils.AddPBDMaterialWater
apply_mat_physics(p=cls._material.prim)
# Compute the overall color of the fluid system
base_color_weight = cls._material.diffuse_reflection_weight
transmission_weight = cls._material.enable_specular_transmission * cls._material.specular_transmission_weight
total_weight = base_color_weight + transmission_weight
if total_weight == 0.0:
# If the fluid doesn't have any color, we add a "blue" tint by default
color = np.array([0.0, 0.0, 1.0])
else:
base_color_weight /= total_weight
transmission_weight /= total_weight
# Weighted sum of base color and transmission color
color = base_color_weight * cls._material.diffuse_reflection_color + \
transmission_weight * (0.5 * cls._material.specular_transmission_color + \
0.5 * cls._material.specular_transmission_scattering_color)
cls._color = color
# Set custom isosurface rendering settings if we are using high-quality rendering
if gm.ENABLE_HQ_RENDERING:
set_carb_settings_for_fluid_isosurface()
# We also modify the grid smoothing radius to avoid "blobby" appearances
cls.system_prim.GetAttribute("physxParticleIsosurface:gridSmoothingRadius").Set(0.0001)
@classproperty
def is_fluid(cls):
return True
@classproperty
def use_isosurface(cls):
return True
@classproperty
def is_viscous(cls):
"""
Returns:
bool: True if this material is viscous or not. Default is False
"""
raise NotImplementedError()
@classproperty
def particle_radius(cls):
# Magic number from omni tutorials
# See https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html#offset-autocomputation
return 0.99 * 0.6 * cls.particle_contact_offset
@classproperty
def particle_particle_rest_distance(cls):
# Magic number, based on intuition from https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/physics-particles.html#particle-particle-interaction
return cls.particle_radius * 2.0 * m.FLUID_PARTICLE_PARTICLE_DISTANCE_SCALE
@classproperty
def _material_mtl_name(cls):
"""
Returns:
None or str: Material mdl preset name to use for generating this fluid material. NOTE: Should be an
entry from OmniSurfacePresets.mdl, minus the "OmniSurface_" string. If None if specified, will default
to the generic OmniSurface material
"""
return None
@classmethod
def _create_particle_prototypes(cls):
# Simulate particles with simple spheres
prototype = lazy.pxr.UsdGeom.Sphere.Define(og.sim.stage, f"{cls.prim_path}/prototype0")
prototype.CreateRadiusAttr().Set(cls.particle_radius)
prototype = VisualGeomPrim(prim_path=prototype.GetPath().pathString, name=prototype.GetPath().pathString)
prototype.visible = False
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prototype.prim,
semantic_label=cls.name,
type_label="class",
)
return [prototype]
@classmethod
def _get_particle_material_template(cls):
# We use a template from OmniPresets if @_material_mtl_name is specified, else the default OmniSurface
return MaterialPrim.get_material(
prim_path=cls.mat_path,
name=cls.mat_name,
load_config={
"mdl_name": f"OmniSurface{'' if cls._material_mtl_name is None else 'Presets'}.mdl",
"mtl_name": f"OmniSurface{'' if cls._material_mtl_name is None else ('_' + cls._material_mtl_name)}"
}
)
@classmethod
def create(
cls,
name,
particle_contact_offset,
particle_density,
is_viscous=False,
material_mtl_name=None,
customize_particle_material=None,
**kwargs,
):
"""
Utility function to programmatically generate monolithic fluid system classes.
Args:
name (str): Name of the system
particle_contact_offset (float): Contact offset for the generated system
particle_density (float): Particle density for the generated system
is_viscous (bool): Whether or not the generated fluid system should be viscous
material_mtl_name (None or str): Material mdl preset name to use for generating this fluid material.
NOTE: Should be an entry from OmniSurfacePresets.mdl, minus the "OmniSurface_" string.
If None if specified, will default to the generic OmniSurface material
customize_particle_material (None or function): Method for customizing the particle material for the fluid
after it has been loaded. Default is None, which will produce a no-op.
If specified, expected signature:
_customize_particle_material(mat: MaterialPrim) --> None
where @MaterialPrim is the material to modify in-place
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
FluidSystem: Generated system class
"""
@classproperty
def cp_particle_contact_offset(cls):
return particle_contact_offset
@classproperty
def cp_material_mtl_name(cls):
return material_mtl_name
@classproperty
def cp_is_viscous(cls):
return is_viscous
@classmethod
def cm_customize_particle_material(cls):
if customize_particle_material is not None:
customize_particle_material(mat=cls._material)
# Add to any other params specified
kwargs["particle_contact_offset"] = cp_particle_contact_offset
kwargs["_material_mtl_name"] = cp_material_mtl_name
kwargs["is_viscous"] = cp_is_viscous
kwargs["_customize_particle_material"] = cm_customize_particle_material
# Create and return the class
return super().create(
name=name,
particle_density=particle_density,
**kwargs,
)
def customize_particle_material_factory(attr, value):
def func(mat):
setattr(mat, attr, np.array(value))
return func
class GranularSystem(MicroPhysicalParticleSystem):
"""
Particle system class simulating granular materials. Individual particles are composed of custom USD objects.
"""
# Cached particle contact offset determined from loaded prototype
_particle_contact_offset = None
_particle_template = None
@classproperty
def self_collision(cls):
# Don't self-collide to improve physics stability
# For whatever reason, granular (non-fluid) particles tend to explode when sampling Filled states, and it seems
# the only way to avoid this unstable behavior is to disable self-collisions. This actually enables the granular
# particles to converge to zero velocity.
return False
@classmethod
def _clear(cls):
og.sim.remove_object(cls._particle_template)
super()._clear()
cls._particle_template = None
cls._particle_contact_offset = None
@classproperty
def particle_contact_offset(cls):
return cls._particle_contact_offset
@classproperty
def is_fluid(cls):
return False
@classmethod
def _create_particle_prototypes(cls):
# Load the particle template
particle_template = cls._create_particle_template()
og.sim.import_object(obj=particle_template, register=False)
cls._particle_template = particle_template
# Make sure there is no ambiguity about which mesh to use as the particle from this template
assert len(particle_template.links) == 1, "GranularSystem particle template has more than one link"
assert len(particle_template.root_link.visual_meshes) == 1, "GranularSystem particle template has more than one visual mesh"
# Make sure template scaling is [1, 1, 1] -- any particle scaling should be done via cls.min/max_scale
assert np.all(particle_template.scale == 1.0)
# The prototype is assumed to be the first and only visual mesh belonging to the root link
visual_geom = list(particle_template.root_link.visual_meshes.values())[0]
# Copy it to the standardized prim path
prototype_path = f"{cls.prim_path}/prototype0"
lazy.omni.kit.commands.execute("CopyPrim", path_from=visual_geom.prim_path, path_to=prototype_path)
# Wrap it with VisualGeomPrim with the correct scale
prototype = VisualGeomPrim(prim_path=prototype_path, name=prototype_path)
prototype.scale *= cls.max_scale
prototype.visible = False
lazy.omni.isaac.core.utils.semantics.add_update_semantics(
prim=prototype.prim,
semantic_label=cls.name,
type_label="class",
)
# Store the contact offset based on a minimum sphere
# Threshold the lower-bound to avoid super small particles
vertices = np.array(prototype.get_attribute("points")) * prototype.scale
_, particle_contact_offset = trimesh.nsphere.minimum_nsphere(trimesh.Trimesh(vertices=vertices))
if particle_contact_offset < m.MIN_PARTICLE_CONTACT_OFFSET:
prototype.scale *= m.MIN_PARTICLE_CONTACT_OFFSET / particle_contact_offset
particle_contact_offset = m.MIN_PARTICLE_CONTACT_OFFSET
cls._particle_contact_offset = particle_contact_offset
return [prototype]
@classmethod
def _create_particle_template(cls):
"""
Creates the particle template to be used for this system.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
Returns:
EntityPrim: Particle template that will be duplicated when generating future particle groups
"""
raise NotImplementedError()
@classmethod
def create(
cls,
name,
particle_density,
create_particle_template,
scale=None,
**kwargs,
):
"""
Utility function to programmatically generate monolithic fluid system classes.
Args:
name (str): Name of the system
particle_density (float): Particle density for the generated system
material_mtl_name (None or str): Material mdl preset name to use for generating this fluid material.
NOTE: Should be an entry from OmniSurfacePresets.mdl, minus the "OmniSurface_" string.
If None if specified, will default to the generic OmniSurface material
create_particle_template (function): Method for generating the visual particle template that will be duplicated
when generating groups of particles.
Expected signature:
create_particle_template(prim_path: str, name: str) --> EntityPrim
where @prim_path and @name are the parameters to assign to the generated EntityPrim.
NOTE: The loaded particle template is expected to be a non-articulated, single-link object with a single
visual mesh attached to its root link, since this will be the actual visual mesh used
scale (None or 3-array): If specified, sets the scaling factor for the particles' relative scale.
Else, defaults to 1
**kwargs (any): keyword-mapped parameters to override / set in the child class, where the keys represent
the class attribute to modify and the values represent the functions / value to set
(Note: These values should have either @classproperty or @classmethod decorators!)
Returns:
GranularSystem: Generated granular system class
"""
@classmethod
def cm_create_particle_template(cls):
return create_particle_template(prim_path=f"{cls.prim_path}/template", name=f"{cls.name}_template")
# Add to any other params specified
kwargs["_create_particle_template"] = cm_create_particle_template
# Create and return the class
return super().create(
name=name,
particle_density=particle_density,
min_scale=scale,
max_scale=scale,
**kwargs,
)
class Cloth(MicroParticleSystem):
"""
Particle system class to simulate cloth.
"""
@classmethod
def remove_all_particles(cls):
# Override base method since there are no particles to be deleted
pass
@classmethod
def clothify_mesh_prim(cls, mesh_prim, remesh=True, particle_distance=None):
"""
Clothifies @mesh_prim by applying the appropriate Cloth API, optionally re-meshing the mesh so that the
resulting generated particles are roughly @particle_distance apart from each other.
Args:
mesh_prim (Usd.Prim): Mesh prim to clothify
remesh (bool): If True, will remesh the input mesh before converting it into a cloth
particle_distance (None or float): If set and @remesh is True, specifies the absolute target distance
between generated cloth particles. If None, a value is automatically chosen such that the generated
cloth particles are roughly touching each other, given cls.particle_contact_offset and
@mesh_prim's scale
"""
has_uv_mapping = mesh_prim.GetAttribute("primvars:st").Get() is not None
if not remesh:
# We always load into trimesh to remove redundant particles (since natively omni redundantly represents
# the number of vertices as 6x the total unique number of vertices)
tm = mesh_prim_to_trimesh_mesh(mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=False)
texcoord = np.array(mesh_prim.GetAttribute("primvars:st").Get()) if has_uv_mapping else None
else:
# We will remesh in pymeshlab, but it doesn't allow programmatic construction of a mesh with texcoords so
# we convert our mesh into a trimesh mesh, then export it to a temp file, then load it into pymeshlab
scaled_world_transform = PoseAPI.get_world_pose_with_scale(mesh_prim.GetPath().pathString)
# Convert to trimesh mesh (in world frame)
tm = mesh_prim_to_trimesh_mesh(mesh_prim=mesh_prim, include_normals=True, include_texcoord=True, world_frame=True)
# Tmp file written to: {tmp_dir}/{tmp_fname}/{tmp_fname}.obj
tmp_name = str(uuid.uuid4())
tmp_dir = os.path.join(tempfile.gettempdir(), tmp_name)
tmp_fpath = os.path.join(tmp_dir, f"{tmp_name}.obj")
Path(tmp_dir).mkdir(parents=True, exist_ok=True)
tm.export(tmp_fpath)
# Start with the default particle distance
particle_distance = cls.particle_contact_offset * 2 / 1.5 if particle_distance is None else particle_distance
# Repetitively re-mesh at lower resolution until we have a mesh that has less than MAX_CLOTH_PARTICLES vertices
for _ in range(10):
ms = pymeshlab.MeshSet()
ms.load_new_mesh(tmp_fpath)
# Re-mesh based on @particle_distance - distance chosen such that at rest particles should be just touching
# each other. The 1.5 magic number comes from the particle cloth demo from omni
# Note that this means that the particles will overlap with each other, since at dist = 2 * contact_offset
# the particles are just touching each other at rest
avg_edge_percentage_mismatch = 1.0
# Loop re-meshing until average edge percentage is within error threshold or we reach the max number of tries
for _ in range(5):
if avg_edge_percentage_mismatch <= m.CLOTH_REMESHING_ERROR_THRESHOLD:
break
ms.meshing_isotropic_explicit_remeshing(iterations=5, adaptive=True, targetlen=pymeshlab.AbsoluteValue(particle_distance))
avg_edge_percentage_mismatch = abs(1.0 - particle_distance / ms.get_geometric_measures()["avg_edge_length"])
else:
# Terminate anyways, but don't fail
log.warn("The generated cloth may not have evenly distributed particles.")
# Check if we have too many vertices
cm = ms.current_mesh()
if cm.vertex_number() > m.MAX_CLOTH_PARTICLES:
# We have too many vertices, so we will re-mesh again
particle_distance *= np.sqrt(2) # halve the number of vertices
log.warn(f"Too many vertices ({cm.vertex_number()})! Re-meshing with particle distance {particle_distance}...")
else:
break
else:
raise ValueError(f"Could not remesh with less than MAX_CLOTH_PARTICLES ({m.MAX_CLOTH_PARTICLES}) vertices!")
# Re-write data to @mesh_prim
new_faces = cm.face_matrix()
new_vertices = cm.vertex_matrix()
new_normals = cm.vertex_normal_matrix()
texcoord = np.array(cm.wedge_tex_coord_matrix()) if has_uv_mapping else None
tm = trimesh.Trimesh(
vertices=new_vertices,
faces=new_faces,
vertex_normals=new_normals,
)
# Apply the inverse of the world transform to get the mesh back into its local frame
tm.apply_transform(np.linalg.inv(scaled_world_transform))
# Update the mesh prim
face_vertex_counts = np.array([len(face) for face in tm.faces], dtype=int)
mesh_prim.GetAttribute("faceVertexCounts").Set(face_vertex_counts)
mesh_prim.GetAttribute("points").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertices))
mesh_prim.GetAttribute("faceVertexIndices").Set(tm.faces.flatten())
mesh_prim.GetAttribute("normals").Set(lazy.pxr.Vt.Vec3fArray.FromNumpy(tm.vertex_normals))
if has_uv_mapping:
mesh_prim.GetAttribute("primvars:st").Set(lazy.pxr.Vt.Vec2fArray.FromNumpy(texcoord))
# Convert into particle cloth
lazy.omni.physx.scripts.particleUtils.add_physx_particle_cloth(
stage=og.sim.stage,
path=mesh_prim.GetPath(),
dynamic_mesh_path=None,
particle_system_path=cls.system_prim_path,
spring_stretch_stiffness=m.CLOTH_STRETCH_STIFFNESS,
spring_bend_stiffness=m.CLOTH_BEND_STIFFNESS,
spring_shear_stiffness=m.CLOTH_SHEAR_STIFFNESS,
spring_damping=m.CLOTH_DAMPING,
self_collision=True,
self_collision_filter=True,
)
# Disable welding because it can potentially make thin objects non-manifold
auto_particle_cloth_api = lazy.pxr.PhysxSchema.PhysxAutoParticleClothAPI(mesh_prim)
auto_particle_cloth_api.GetDisableMeshWeldingAttr().Set(True)
@classproperty
def _pbd_material_kwargs(cls):
return dict(
friction=m.CLOTH_FRICTION,
drag=m.CLOTH_DRAG,
lift=m.CLOTH_LIFT,
)
@classproperty
def _register_system(cls):
# We should register this system since it's an "actual" system (not an intermediate class)
return True
@classproperty
def particle_contact_offset(cls):
return m.CLOTH_PARTICLE_CONTACT_OFFSET
@classproperty
def state_size(cls):
# Default is no state
return 0
@classmethod
def _dump_state(cls):
# Empty by default
return dict()
@classmethod
def _load_state(cls, state):
# Nothing by default
pass
@classmethod
def _serialize(cls, state):
# Nothing by default
return np.array([], dtype=float)
@classmethod
def _deserialize(cls, state):
# Nothing by default
return dict(), 0
| 76,701 | Python | 43.490719 | 183 | 0.645089 |
StanfordVL/OmniGibson/omnigibson/action_primitives/action_primitive_set_base.py | import inspect
from abc import ABCMeta, abstractmethod
from enum import IntEnum
from typing import List
from future.utils import with_metaclass
from omnigibson import Environment
from omnigibson.robots import BaseRobot
from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene
from omnigibson.tasks.task_base import BaseTask
REGISTERED_PRIMITIVE_SETS = {}
class ActionPrimitiveError(ValueError):
class Reason(IntEnum):
# A primitive could not be executed because a precondition was not satisfied, e.g. PLACE was called without an
# object currently in hand.
PRE_CONDITION_ERROR = 0
# A sampling error occurred: e.g. a position to place an object could not be found, or the robot could not
# find a pose near the object to navigate to.
SAMPLING_ERROR = 1
# The planning for a primitive failed possibly due to not being able to find a path.
PLANNING_ERROR = 2
# The planning for a primitive was successfully completed, but an error occurred during execution.
EXECUTION_ERROR = 3
# The execution of the primitive happened correctly, but while checking post-conditions, an error was found.
POST_CONDITION_ERROR = 4
def __init__(self, reason: Reason, message, metadata=None):
self.reason = reason
self.metadata = metadata if metadata is not None else {}
super().__init__(f"{reason.name}: {message}. Additional info: {metadata}")
class ActionPrimitiveErrorGroup(ValueError):
def __init__(self, exceptions: List[ActionPrimitiveError]) -> None:
self._exceptions = tuple(exceptions)
submessages = [f"Attempt {i}: {e}" for i, e in enumerate(exceptions)]
submessages = "\n\n".join(submessages)
message = "An error occurred during each attempt of this action.\n\n" + submessages
super().__init__(message)
@property
def exceptions(self):
return self._exceptions
class BaseActionPrimitiveSet(with_metaclass(ABCMeta, object)):
def __init_subclass__(cls, **kwargs):
"""
Registers all subclasses as part of this registry. This is useful to decouple internal codebase from external
user additions. This way, users can add their custom primitive set by simply extending this class,
and it will automatically be registered internally. This allows users to then specify their primitive set
directly in string-from in e.g., their config files, without having to manually set the str-to-class mapping
in our code.
"""
if not inspect.isabstract(cls):
REGISTERED_PRIMITIVE_SETS[cls.__name__] = cls
def __init__(self, env):
self.env : Environment = env
@property
def robot(self):
# Currently returns the first robot in the environment, but can be scaled to multiple robots
# by creating multiple action generators and passing in a robot index etc.
return self.env.robots[0]
@abstractmethod
def get_action_space(self):
"""Get the higher-level action space as an OpenAI Gym Space object."""
pass
@abstractmethod
def apply(self, action):
"""
Apply a primitive action.
Given a higher-level action in the same format as the action space (e.g. as a number),
generates a sequence of lower level actions (or raise ActionPrimitiveError). The action
will get resolved and passed into apply_ref.
"""
pass
@abstractmethod
def apply_ref(self, action, *args):
"""
Apply a primitive action by reference.
Given a higher-level action from the corresponding action set enum and any necessary arguments,
generates a sequence of lower level actions (or raise ActionPrimitiveError)
"""
pass
| 3,867 | Python | 38.070707 | 118 | 0.681148 |
StanfordVL/OmniGibson/omnigibson/action_primitives/symbolic_semantic_action_primitives.py | """
WARNING!
A set of action primitives that work without executing low-level physics but instead teleporting
objects directly into their post-condition states. Useful for learning high-level methods.
"""
from aenum import IntEnum, auto
import numpy as np
from omnigibson.robots.robot_base import BaseRobot
from omnigibson.systems.system_base import REGISTERED_SYSTEMS
from omnigibson.transition_rules import REGISTERED_RULES, TransitionRuleAPI
from omnigibson import object_states
from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError, ActionPrimitiveErrorGroup
from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives
from omnigibson.objects import DatasetObject
class SymbolicSemanticActionPrimitiveSet(IntEnum):
_init_ = 'value __doc__'
GRASP = auto(), "Grasp an object"
PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object"
PLACE_INSIDE = auto(), "Place the currently grasped object inside another object"
OPEN = auto(), "Open an object"
CLOSE = auto(), "Close an object"
TOGGLE_ON = auto(), "Toggle an object on"
TOGGLE_OFF = auto(), "Toggle an object off"
SOAK_UNDER = auto(), "Soak the currently grasped object under a fluid source."
SOAK_INSIDE = auto(), "Soak the currently grasped object inside the fluid within a container."
WIPE = auto(), "Wipe the given object with the currently grasped object."
CUT = auto(), "Cut (slice or dice) the given object with the currently grasped object."
PLACE_NEAR_HEATING_ELEMENT = auto(), "Place the currently grasped object near the heating element of another object."
NAVIGATE_TO = auto(), "Navigate to an object"
RELEASE = auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object."
class SymbolicSemanticActionPrimitives(StarterSemanticActionPrimitives):
def __init__(self, env):
super().__init__(env)
self.controller_functions = {
SymbolicSemanticActionPrimitiveSet.GRASP: self._grasp,
SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top,
SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside,
SymbolicSemanticActionPrimitiveSet.OPEN: self._open,
SymbolicSemanticActionPrimitiveSet.CLOSE: self._close,
SymbolicSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on,
SymbolicSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off,
SymbolicSemanticActionPrimitiveSet.SOAK_UNDER: self._soak_under,
SymbolicSemanticActionPrimitiveSet.SOAK_INSIDE: self._soak_inside,
SymbolicSemanticActionPrimitiveSet.WIPE: self._wipe,
SymbolicSemanticActionPrimitiveSet.CUT: self._cut,
SymbolicSemanticActionPrimitiveSet.PLACE_NEAR_HEATING_ELEMENT: self._place_near_heating_element,
SymbolicSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj,
SymbolicSemanticActionPrimitiveSet.RELEASE: self._release,
}
def apply_ref(self, prim, *args, attempts=3):
"""
Yields action for robot to execute the primitive with the given arguments.
Args:
prim (SymbolicSemanticActionPrimitiveSet): Primitive to execute
args: Arguments for the primitive
attempts (int): Number of attempts to make before raising an error
Returns:
np.array or None: Action array for one step for the robot tto execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
"""
assert attempts > 0, "Must make at least one attempt"
ctrl = self.controller_functions[prim]
if any(isinstance(arg, BaseRobot) for arg in args):
raise ActionPrimitiveErrorGroup([
ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot call a symbolic semantic action primitive with a robot as an argument."
)
])
errors = []
for _ in range(attempts):
# Attempt
success = False
try:
yield from ctrl(*args)
success = True
except ActionPrimitiveError as e:
errors.append(e)
try:
# Settle before returning.
yield from self._settle_robot()
except ActionPrimitiveError:
pass
# Stop on success
if success:
return
raise ActionPrimitiveErrorGroup(errors)
def _open_or_close(self, obj, should_open):
if self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot open or close an object while holding an object",
{"object in hand": self._get_obj_in_hand().name},
)
if object_states.Open not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not openable.",
{"target object": obj.name}
)
# Don't do anything if the object is already closed and we're trying to close.
if should_open == obj.states[object_states.Open].get_value():
return
# Set the value
obj.states[object_states.Open].set_value(should_open)
# Settle
yield from self._settle_robot()
if obj.states[object_states.Open].get_value() != should_open:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"The object did not open or close as expected. Maybe try again",
{"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()},
)
def _grasp(self, obj: DatasetObject):
"""
Yields action for the robot to navigate to object if needed, then to grasp it
Args:
DatasetObject: Object for robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Don't do anything if the object is already grasped.
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is not None:
if obj_in_hand == obj:
return
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot grasp when your hand is already full",
{"target object": obj.name, "object currently in hand": obj_in_hand.name},
)
# Get close
# yield from self._navigate_if_needed(obj)
# Perform forced assisted grasp
obj.set_position(self.robot.get_eef_position(self.arm))
self.robot._establish_grasp(self.arm, (obj, obj.root_link), obj.get_position())
# Execute for a moment
yield from self._settle_robot()
# Verify
if self._get_obj_in_hand() is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"Grasp completed, but no object detected in hand after executing grasp",
{"target object": obj.name},
)
if self._get_obj_in_hand() != obj:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"An unexpected object was detected in hand after executing grasp. Consider releasing it",
{"expected object": obj.name, "actual object": self._get_obj_in_hand().name},
)
def _release(self):
if not self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot release an object if you're not already holding an object",
)
self.robot.release_grasp_immediately()
yield from self._settle_robot()
def _toggle(self, obj, value):
if self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot toggle an object while holding an object",
{"object in hand": self._get_obj_in_hand()},
)
if object_states.ToggledOn not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not toggleable.",
{"target object": obj.name}
)
if obj.states[object_states.ToggledOn].get_value() == value:
return
# Call the setter
obj.states[object_states.ToggledOn].set_value(value)
# Yield some actions
yield from self._settle_robot()
# Check that it actually happened
if obj.states[object_states.ToggledOn].get_value() != value:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"The object did not toggle as expected - maybe try again",
{"target object": obj.name, "is it currently toggled on": obj.states[object_states.ToggledOn].get_value()}
)
def _place_with_predicate(self, obj, predicate, near_poses=None, near_poses_threshold=None):
"""
Yields action for the robot to navigate to the object if needed, then to place it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
)
# Find a spot to put it
obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj, near_poses=near_poses, near_poses_threshold=near_poses_threshold)
# Get close, release the object.
# yield from self._navigate_if_needed(obj, pose_on_obj=obj_pose)
yield from self._release()
# Actually move the object to the spot and step a bit to settle it.
obj_in_hand.set_position_orientation(*obj_pose)
yield from self._settle_robot()
if not obj_in_hand.states[predicate].get_value(obj):
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Failed to place object at the desired place (probably dropped). The object was still released, so you need to grasp it again to continue",
{"dropped object": obj_in_hand.name, "target object": obj.name}
)
def _soak_under(self, obj):
# Check that our current object is a particle remover
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a soakable object first."
)
# Check that the target object is a particle source
if object_states.ParticleSource not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not a particle source, so you can not soak anything under it.",
{"target object": obj.name}
)
# Check if the target object has any particles in it
producing_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.ParticleSource].check_conditions_for_system(ps)}
if not producing_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object currently is not producing any particles - try toggling it on.",
{"target object": obj.name}
)
# Check that the current object can remove those particles
if object_states.Saturated not in obj_in_hand.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The currently grasped object cannot soak particles.",
{"object in hand": obj_in_hand.name}
)
supported_systems = {
x for x in producing_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
}
if not supported_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object only contains particles that this object cannot soak.",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is producing": sorted(x.name for x in producing_systems),
"particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
}
)
currently_removable_systems = {
x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
}
if not currently_removable_systems:
# TODO: This needs to be far more descriptive.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered by some particles that this object can normally soak, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is producing": sorted(x.name for x in producing_systems),
}
)
# If so, remove the particles.
for system in currently_removable_systems:
obj_in_hand.states[object_states.Saturated].set_value(system, True)
def _soak_inside(self, obj):
# Check that our current object is a particle remover
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a soakable object first."
)
# Check that the target object is fillable
if object_states.Contains not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not fillable by particles, so you can not soak anything in it.",
{"target object": obj.name}
)
# Check if the target object has any particles in it
contained_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.Contains].get_value(ps.states)}
if not contained_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object currently does not contain any particles.",
{"target object": obj.name}
)
# Check that the current object can remove those particles
if object_states.Saturated not in obj_in_hand.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The currently grasped object cannot soak particles.",
{"object in hand": obj_in_hand.name}
)
supported_systems = {
x for x in contained_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
}
if not supported_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object only contains particles that this object cannot soak.",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object contains": sorted(x.name for x in contained_systems),
"particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
}
)
currently_removable_systems = {
x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
}
if not currently_removable_systems:
# TODO: This needs to be far more descriptive.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered by some particles that this object can normally soak, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object contains": sorted(x.name for x in contained_systems),
}
)
# If so, remove the particles.
for system in currently_removable_systems:
obj_in_hand.states[object_states.Saturated].set_value(system, True)
def _wipe(self, obj):
# Check that our current object is a particle remover
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a wiping tool (particle remover) first to wipe an object."
)
# Check that the target object is coverable
if object_states.Covered not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not coverable by any particles, so there is no need to wipe it.",
{"target object": obj.name}
)
# Check if the target object has any particles on it
covering_systems = {ps for ps in REGISTERED_SYSTEMS.values() if obj.states[object_states.Covered].get_value(ps.states)}
if not covering_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not covered by any particles.",
{"target object": obj.name}
)
# Check that the current object can remove those particles
if object_states.ParticleRemover not in obj_in_hand.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The currently grasped object is not a particle remover.",
{"object in hand": obj_in_hand.name}
)
supported_systems = {
x for x in covering_systems if obj_in_hand.states[object_states.ParticleRemover].supports_system(x)
}
if not supported_systems:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered only by particles that this cleaning tool cannot remove.",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is covered by": sorted(x.name for x in covering_systems),
"particles the grasped object can remove": sorted([x for x in obj_in_hand.states[object_states.ParticleRemover].conditions.keys()])
}
)
currently_removable_systems = {
x for x in supported_systems if obj_in_hand.states[object_states.ParticleRemover].check_conditions_for_system(x)
}
if not currently_removable_systems:
# TODO: This needs to be far more descriptive.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is covered by some particles that this cleaning tool can normally remove, but needs to be in a different state to do so (e.g. toggled on, soaked by another fluid first, etc.).",
{
"target object": obj.name,
"cleaning tool": obj_in_hand.name,
"particles the target object is covered by": sorted(x.name for x in covering_systems),
}
)
# If so, remove the particles.
for system in currently_removable_systems:
obj_in_hand.states[object_states.Covered].set_value(system, False)
def _cut(self, obj):
# Check that our current object is a slicer
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping a cutting tool first to slice an object."
)
if "slicer" not in obj_in_hand._abilities:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The current object is not a cutting tool.",
{"object in hand": obj_in_hand.name}
)
# Check that the target object is sliceable
if "sliceable" not in obj._abilities and "diceable" not in obj._abilities:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The target object is not sliceable or diceable.",
{"target object": obj.name}
)
# Get close
# yield from self._navigate_if_needed(obj)
# TODO: Do some more validation
added_obj_attrs = []
removed_objs = []
output = REGISTERED_RULES["SlicingRule"].transition({"sliceable": [obj]})
added_obj_attrs += output.add
removed_objs += output.remove
TransitionRuleAPI.execute_transition(added_obj_attrs=added_obj_attrs, removed_objs=removed_objs)
yield from self._settle_robot()
def _place_near_heating_element(self, heat_source_obj):
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
)
if object_states.HeatSourceOrSink not in heat_source_obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "The target object is not a heat source or sink.", {"target object": heat_source_obj.name}
)
if heat_source_obj.states[object_states.HeatSourceOrSink].requires_inside:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"The heat source object has no explicit heating element, it just requires the cookable object to be placed inside it.",
{"target object": heat_source_obj.name}
)
# Get the position of the heat source on the thing we're placing near
heating_element_positions = np.array([link.get_position() for link in heat_source_obj.states[object_states.HeatSourceOrSink].links.values()])
heating_distance_threshold = heat_source_obj.states[object_states.HeatSourceOrSink].distance_threshold
# Call place-with-predicate
yield from self._place_with_predicate(heat_source_obj, object_states.OnTop, near_poses=heating_element_positions, near_poses_threshold=heating_distance_threshold)
def _wait_for_cooked(self, obj):
# Check that the current object is cookable
if object_states.Cooked not in obj.states:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "Target object is not cookable.",
{"target object": obj.name}
)
# Keep waiting as long as the thing is warming up.
prev_temp = obj.states[object_states.Temperature].get_value()
while not obj.states[object_states.Cooked].get_value():
# Pass some time
for _ in range(10):
yield from self._empty_action()
# Check that we are still heating up
new_temp = obj.states[object_states.Temperature].get_value()
if new_temp - prev_temp < 1e-2:
raise ActionPrimitiveError(
ActionPrimitiveError.PRE_CONDITION_ERROR,
"Target object is not currently heating up.",
{"target object": obj.name}
)
def _navigate_to_pose(self, pose_2d):
"""
Yields the action to navigate robot to the specified 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
robot_pose = self._get_robot_pose_from_2d_pose(pose_2d)
self.robot.set_position_orientation(*robot_pose)
yield from self._settle_robot()
| 26,260 | Python | 45.64476 | 212 | 0.614966 |
StanfordVL/OmniGibson/omnigibson/action_primitives/starter_semantic_action_primitives.py | """
WARNING!
The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example.
It currently only works with Fetch and Tiago with their JointControllers set to delta mode.
See provided tiago_primitives.yaml config file for an example. See examples/action_primitives for
runnable examples.
"""
from functools import cached_property
import inspect
import logging
import random
from aenum import IntEnum, auto
from math import ceil
import cv2
from matplotlib import pyplot as plt
import gym
import numpy as np
from scipy.spatial.transform import Rotation, Slerp
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson import object_states
from omnigibson.action_primitives.action_primitive_set_base import ActionPrimitiveError, ActionPrimitiveErrorGroup, BaseActionPrimitiveSet
from omnigibson.controllers import JointController, DifferentialDriveController
from omnigibson.macros import create_module_macros
from omnigibson.utils.object_state_utils import sample_cuboid_for_predicate
from omnigibson.objects.object_base import BaseObject
from omnigibson.robots import BaseRobot, Fetch, Tiago
from omnigibson.tasks.behavior_task import BehaviorTask
from omnigibson.utils.motion_planning_utils import (
plan_base_motion,
plan_arm_motion,
plan_arm_motion_ik,
set_base_and_detect_collision,
detect_robot_collision_in_sim
)
import omnigibson.utils.transform_utils as T
from omnigibson.utils.control_utils import IKSolver
from omnigibson.utils.grasping_planning_utils import (
get_grasp_poses_for_object_sticky,
get_grasp_position_for_open
)
from omnigibson.controllers.controller_base import ControlType
from omnigibson.utils.control_utils import FKSolver
from omnigibson.utils.ui_utils import create_module_logger
from omnigibson.objects.usd_object import USDObject
m = create_module_macros(module_path=__file__)
m.DEFAULT_BODY_OFFSET_FROM_FLOOR = 0.05
m.KP_LIN_VEL = 0.3
m.KP_ANGLE_VEL = 0.2
m.MAX_STEPS_FOR_SETTLING = 500
m.MAX_CARTESIAN_HAND_STEP = 0.002
m.MAX_STEPS_FOR_HAND_MOVE_JOINT = 500
m.MAX_STEPS_FOR_HAND_MOVE_IK = 1000
m.MAX_STEPS_FOR_GRASP_OR_RELEASE = 250
m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION = 500
m.MAX_ATTEMPTS_FOR_OPEN_CLOSE = 20
m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE = 20
m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT = 200
m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM = 60
m.PREDICATE_SAMPLING_Z_OFFSET = 0.02
m.GRASP_APPROACH_DISTANCE = 0.2
m.OPEN_GRASP_APPROACH_DISTANCE = 0.4
m.DEFAULT_DIST_THRESHOLD = 0.05
m.DEFAULT_ANGLE_THRESHOLD = 0.05
m.LOW_PRECISION_DIST_THRESHOLD = 0.1
m.LOW_PRECISION_ANGLE_THRESHOLD = 0.2
m.TIAGO_TORSO_FIXED = False
m.JOINT_POS_DIFF_THRESHOLD = 0.005
m.JOINT_CONTROL_MIN_ACTION = 0.0
m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION = np.deg2rad(45)
log = create_module_logger(module_name=__name__)
def indented_print(msg, *args, **kwargs):
log.debug(" " * len(inspect.stack()) + str(msg), *args, **kwargs)
class RobotCopy:
"""A data structure for storing information about a robot copy, used for collision checking in planning."""
def __init__(self):
self.prims = {}
self.meshes = {}
self.relative_poses = {}
self.links_relative_poses = {}
self.reset_pose = {
"original": ([0, 0, -5.0], [0, 0, 0, 1]),
"simplified": ([5, 0, -5.0], [0, 0, 0, 1]),
}
class PlanningContext(object):
"""
A context manager that sets up a robot copy for collision checking in planning.
"""
def __init__(self, robot, robot_copy, robot_copy_type="original"):
self.robot = robot
self.robot_copy = robot_copy
self.robot_copy_type = robot_copy_type if robot_copy_type in robot_copy.prims.keys() else "original"
self.disabled_collision_pairs_dict = {}
def __enter__(self):
self._assemble_robot_copy()
self._construct_disabled_collision_pairs()
return self
def __exit__(self, *args):
self._set_prim_pose(self.robot_copy.prims[self.robot_copy_type], self.robot_copy.reset_pose[self.robot_copy_type])
def _assemble_robot_copy(self):
if m.TIAGO_TORSO_FIXED:
fk_descriptor = "left_fixed"
else:
fk_descriptor = "combined" if "combined" in self.robot.robot_arm_descriptor_yamls else self.robot.default_arm
self.fk_solver = FKSolver(
robot_description_path=self.robot.robot_arm_descriptor_yamls[fk_descriptor],
robot_urdf_path=self.robot.urdf_path,
)
# TODO: Remove the need for this after refactoring the FK / descriptors / etc.
arm_links = self.robot.manipulation_link_names
if m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
joint_control_idx = self.robot.arm_control_idx["left"]
joint_pos = np.array(self.robot.get_joint_positions()[joint_control_idx])
else:
joint_combined_idx = np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[fk_descriptor]])
joint_pos = np.array(self.robot.get_joint_positions()[joint_combined_idx])
link_poses = self.fk_solver.get_link_poses(joint_pos, arm_links)
# Set position of robot copy root prim
self._set_prim_pose(self.robot_copy.prims[self.robot_copy_type], self.robot.get_position_orientation())
# Assemble robot meshes
for link_name, meshes in self.robot_copy.meshes[self.robot_copy_type].items():
for mesh_name, copy_mesh in meshes.items():
# Skip grasping frame (this is necessary for Tiago, but should be cleaned up in the future)
if "grasping_frame" in link_name:
continue
# Set poses of meshes relative to the robot to construct the robot
link_pose = link_poses[link_name] if link_name in arm_links else self.robot_copy.links_relative_poses[self.robot_copy_type][link_name]
mesh_copy_pose = T.pose_transform(*link_pose, *self.robot_copy.relative_poses[self.robot_copy_type][link_name][mesh_name])
self._set_prim_pose(copy_mesh, mesh_copy_pose)
def _set_prim_pose(self, prim, pose):
translation = lazy.pxr.Gf.Vec3d(*np.array(pose[0], dtype=float))
prim.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(pose[1], dtype=float)[[3, 0, 1, 2]]
prim.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
def _construct_disabled_collision_pairs(self):
robot_meshes_copy = self.robot_copy.meshes[self.robot_copy_type]
# Filter out collision pairs of meshes part of the same link
for meshes in robot_meshes_copy.values():
for mesh in meshes.values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] = [m.GetPrimPath().pathString for m in meshes.values()]
# Filter out all self-collisions
if self.robot_copy_type == "simplified":
all_meshes = [mesh.GetPrimPath().pathString for link in robot_meshes_copy.keys() for mesh in robot_meshes_copy[link].values()]
for link in robot_meshes_copy.keys():
for mesh in robot_meshes_copy[link].values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += all_meshes
# Filter out collision pairs of meshes part of disabled collision pairs
else:
for pair in self.robot.disabled_collision_pairs:
link_1 = pair[0]
link_2 = pair[1]
if link_1 in robot_meshes_copy.keys() and link_2 in robot_meshes_copy.keys():
for mesh in robot_meshes_copy[link_1].values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [m.GetPrimPath().pathString for m in robot_meshes_copy[link_2].values()]
for mesh in robot_meshes_copy[link_2].values():
self.disabled_collision_pairs_dict[mesh.GetPrimPath().pathString] += [m.GetPrimPath().pathString for m in robot_meshes_copy[link_1].values()]
# Filter out colliders all robot copy meshes should ignore
disabled_colliders = []
# Disable original robot colliders so copy can't collide with it
disabled_colliders += [link.prim_path for link in self.robot.links.values()]
filter_categories = ["floors"]
for obj in og.sim.scene.objects:
if obj.category in filter_categories:
disabled_colliders += [link.prim_path for link in obj.links.values()]
# Disable object in hand
obj_in_hand = self.robot._ag_obj_in_hand[self.robot.default_arm]
if obj_in_hand is not None:
disabled_colliders += [link.prim_path for link in obj_in_hand.links.values()]
for colliders in self.disabled_collision_pairs_dict.values():
colliders += disabled_colliders
class StarterSemanticActionPrimitiveSet(IntEnum):
_init_ = 'value __doc__'
GRASP = auto(), "Grasp an object"
PLACE_ON_TOP = auto(), "Place the currently grasped object on top of another object"
PLACE_INSIDE = auto(), "Place the currently grasped object inside another object"
OPEN = auto(), "Open an object"
CLOSE = auto(), "Close an object"
NAVIGATE_TO = auto(), "Navigate to an object (mostly for debugging purposes - other primitives also navigate first)"
RELEASE = auto(), "Release an object, letting it fall to the ground. You can then grasp it again, as a way of reorienting your grasp of the object."
TOGGLE_ON = auto(), "Toggle an object on"
TOGGLE_OFF = auto(), "Toggle an object off"
class StarterSemanticActionPrimitives(BaseActionPrimitiveSet):
def __init__(self, env, add_context=False, enable_head_tracking=True, always_track_eef=False, task_relevant_objects_only=False):
"""
Initializes a StarterSemanticActionPrimitives generator.
Args:
env (Environment): The environment that the primitives will run on.
add_context (bool): Whether to add text context to the return value. Defaults to False.
enable_head_tracking (bool): Whether to enable head tracking. Defaults to True.
always_track_eef (bool, optional): Whether to always track the end effector, as opposed
to switching between target object and end effector based on context. Defaults to False.
task_relevant_objects_only (bool): Whether to only consider objects relevant to the task
when computing the action space. Defaults to False.
"""
log.warning(
"The StarterSemanticActionPrimitive is a work-in-progress and is only provided as an example. "
"It currently only works with Fetch and Tiago with their JointControllers set to delta mode."
)
super().__init__(env)
self.controller_functions = {
StarterSemanticActionPrimitiveSet.GRASP: self._grasp,
StarterSemanticActionPrimitiveSet.PLACE_ON_TOP: self._place_on_top,
StarterSemanticActionPrimitiveSet.PLACE_INSIDE: self._place_inside,
StarterSemanticActionPrimitiveSet.OPEN: self._open,
StarterSemanticActionPrimitiveSet.CLOSE: self._close,
StarterSemanticActionPrimitiveSet.NAVIGATE_TO: self._navigate_to_obj,
StarterSemanticActionPrimitiveSet.RELEASE: self._execute_release,
StarterSemanticActionPrimitiveSet.TOGGLE_ON: self._toggle_on,
StarterSemanticActionPrimitiveSet.TOGGLE_OFF: self._toggle_off,
}
# Validate the robot
assert isinstance(self.robot, (Fetch, Tiago)), "StarterSemanticActionPrimitives only works with Fetch and Tiago."
assert isinstance(self.robot.controllers["base"], (JointController, DifferentialDriveController)), \
"StarterSemanticActionPrimitives only works with a JointController or DifferentialDriveController at the robot base."
self._base_controller_is_joint = isinstance(self.robot.controllers["base"], JointController)
if self._base_controller_is_joint:
assert self.robot.controllers["base"].control_type == ControlType.VELOCITY, \
"StarterSemanticActionPrimitives only works with a base JointController with velocity mode."
assert not self.robot.controllers["base"].use_delta_commands, \
"StarterSemanticActionPrimitives only works with a base JointController with absolute mode."
assert self.robot.controllers["base"].command_dim == 3, \
"StarterSemanticActionPrimitives only works with a base JointController with 3 dof (x, y, theta)."
self.arm = self.robot.default_arm
self.robot_model = self.robot.model_name
self.robot_base_mass = self.robot._links["base_link"].mass
self.add_context = add_context
self._task_relevant_objects_only = task_relevant_objects_only
self._enable_head_tracking = enable_head_tracking
self._always_track_eef = always_track_eef
self._tracking_object = None
self.robot_copy = self._load_robot_copy()
def _postprocess_action(self, action):
"""Postprocesses action by applying head tracking and adding context if necessary."""
if self._enable_head_tracking:
action = self._overwrite_head_action(action)
if not self.add_context:
return action
stack = inspect.stack()
action_type = "manip:"
context_function = stack[1].function
for frame_info in stack[1:]:
function_name = frame_info.function
# TODO: Make this stop at apply_ref
if function_name in ["_grasp", "_place_on_top", "_place_or_top", "_open_or_close"]:
break
if "nav" in function_name:
action_type = "nav"
context = action_type + context_function
return action, context
def _load_robot_copy(self):
"""Loads a copy of the robot that can be manipulated into arbitrary configurations for collision checking in planning."""
robot_copy = RobotCopy()
robots_to_copy = {
"original": {
"robot": self.robot,
"copy_path": "/World/robot_copy"
}
}
if hasattr(self.robot, 'simplified_mesh_usd_path'):
simplified_robot = {
"robot": USDObject("simplified_copy", self.robot.simplified_mesh_usd_path),
"copy_path": "/World/simplified_robot_copy"
}
robots_to_copy['simplified'] = simplified_robot
for robot_type, rc in robots_to_copy.items():
copy_robot = None
copy_robot_meshes = {}
copy_robot_meshes_relative_poses = {}
copy_robot_links_relative_poses = {}
# Create prim under which robot meshes are nested and set position
lazy.omni.usd.commands.CreatePrimCommand("Xform", rc['copy_path']).do()
copy_robot = lazy.omni.isaac.core.utils.prims.get_prim_at_path(rc['copy_path'])
reset_pose = robot_copy.reset_pose[robot_type]
translation = lazy.pxr.Gf.Vec3d(*np.array(reset_pose[0], dtype=float))
copy_robot.GetAttribute("xformOp:translate").Set(translation)
orientation = np.array(reset_pose[1], dtype=float)[[3, 0, 1, 2]]
copy_robot.GetAttribute("xformOp:orient").Set(lazy.pxr.Gf.Quatd(*orientation))
robot_to_copy = None
if robot_type == "simplified":
robot_to_copy = rc['robot']
og.sim.import_object(robot_to_copy)
else:
robot_to_copy = rc['robot']
# Copy robot meshes
for link in robot_to_copy.links.values():
link_name = link.prim_path.split("/")[-1]
for mesh_name, mesh in link.collision_meshes.items():
split_path = mesh.prim_path.split("/")
# Do not copy grasping frame (this is necessary for Tiago, but should be cleaned up in the future)
if "grasping_frame" in link_name:
continue
copy_mesh_path = rc['copy_path'] + "/" + link_name
copy_mesh_path += f"_{split_path[-1]}" if split_path[-1] != "collisions" else ""
lazy.omni.usd.commands.CopyPrimCommand(mesh.prim_path, path_to=copy_mesh_path).do()
copy_mesh = lazy.omni.isaac.core.utils.prims.get_prim_at_path(copy_mesh_path)
relative_pose = T.relative_pose_transform(*mesh.get_position_orientation(), *link.get_position_orientation())
relative_pose = (relative_pose[0], np.array([0, 0, 0, 1]))
if link_name not in copy_robot_meshes.keys():
copy_robot_meshes[link_name] = {mesh_name: copy_mesh}
copy_robot_meshes_relative_poses[link_name] = {mesh_name: relative_pose}
else:
copy_robot_meshes[link_name][mesh_name] = copy_mesh
copy_robot_meshes_relative_poses[link_name][mesh_name] = relative_pose
copy_robot_links_relative_poses[link_name] = T.relative_pose_transform(*link.get_position_orientation(), *self.robot.get_position_orientation())
if robot_type == "simplified":
og.sim.remove_object(robot_to_copy)
robot_copy.prims[robot_type] = copy_robot
robot_copy.meshes[robot_type] = copy_robot_meshes
robot_copy.relative_poses[robot_type] = copy_robot_meshes_relative_poses
robot_copy.links_relative_poses[robot_type] = copy_robot_links_relative_poses
og.sim.step()
return robot_copy
def get_action_space(self):
# TODO: Figure out how to implement what happens when the set of objects in scene changes.
if self._task_relevant_objects_only:
assert isinstance(self.env.task, BehaviorTask), "Activity relevant objects can only be used for BEHAVIOR tasks"
self.addressable_objects = sorted(set(self.env.task.object_scope.values()), key=lambda obj: obj.name)
else:
self.addressable_objects = sorted(set(self.env.scene.objects_by_name.values()), key=lambda obj: obj.name)
# Filter out the robots.
self.addressable_objects = [obj for obj in self.addressable_objects if not isinstance(obj, BaseRobot)]
self.num_objects = len(self.addressable_objects)
return gym.spaces.Tuple(
[gym.spaces.Discrete(self.num_objects), gym.spaces.Discrete(len(StarterSemanticActionPrimitiveSet))]
)
def get_action_from_primitive_and_object(self, primitive: StarterSemanticActionPrimitiveSet, obj: BaseObject):
assert obj in self.addressable_objects
primitive_int = int(primitive)
return primitive_int, self.addressable_objects.index(obj)
def _get_obj_in_hand(self):
"""
Get object in the robot's hand
Returns:
StatefulObject or None: Object if robot is holding something or None if it is not
"""
obj_in_hand = self.robot._ag_obj_in_hand[self.arm] # TODO(MP): Expose this interface.
return obj_in_hand
def apply(self, action):
# Decompose the tuple
action_idx, obj_idx = action
# Find the target object.
target_obj = self.addressable_objects[obj_idx]
# Find the appropriate action generator.
action = StarterSemanticActionPrimitiveSet(action_idx)
return self.apply_ref(action, target_obj)
def apply_ref(self, prim, *args, attempts=3):
"""
Yields action for robot to execute the primitive with the given arguments.
Args:
prim (StarterSemanticActionPrimitiveSet): Primitive to execute
args: Arguments for the primitive
attempts (int): Number of attempts to make before raising an error
Yields:
np.array or None: Action array for one step for the robot to execute the primitve or None if primitive completed
Raises:
ActionPrimitiveError: If primitive fails to execute
"""
assert attempts > 0, "Must make at least one attempt"
ctrl = self.controller_functions[prim]
errors = []
for _ in range(attempts):
# Attempt
success = False
try:
yield from ctrl(*args)
success = True
except ActionPrimitiveError as e:
errors.append(e)
try:
# If we're not holding anything, release the hand so it doesn't stick to anything else.
if not self._get_obj_in_hand():
yield from self._execute_release()
except ActionPrimitiveError:
pass
try:
# Make sure we retract the arm after every step
yield from self._reset_hand()
except ActionPrimitiveError:
pass
try:
# Settle before returning.
yield from self._settle_robot()
except ActionPrimitiveError:
pass
# Stop on success
if success:
return
raise ActionPrimitiveErrorGroup(errors)
def _open(self, obj):
yield from self._open_or_close(obj, True)
def _close(self, obj):
yield from self._open_or_close(obj, False)
def _open_or_close(self, obj, should_open):
# Update the tracking to track the eef.
self._tracking_object = self.robot
if self._get_obj_in_hand():
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot open or close an object while holding an object",
{"object in hand": self._get_obj_in_hand().name},
)
# Open the hand first
yield from self._execute_release()
for _ in range(m.MAX_ATTEMPTS_FOR_OPEN_CLOSE):
try:
# TODO: This needs to be fixed. Many assumptions (None relevant joint, 3 waypoints, etc.)
if should_open:
grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None)
else:
grasp_data = get_grasp_position_for_open(self.robot, obj, should_open, None, num_waypoints=3)
if grasp_data is None:
# We were trying to do something but didn't have the data.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.SAMPLING_ERROR,
"Could not sample grasp position for target object",
{"target object": obj.name},
)
relevant_joint, grasp_pose, target_poses, object_direction, grasp_required, pos_change = grasp_data
if abs(pos_change) < 0.1:
indented_print("Yaw change is small and done,", pos_change)
return
# Prepare data for the approach later.
approach_pos = grasp_pose[0] + object_direction * m.OPEN_GRASP_APPROACH_DISTANCE
approach_pose = (approach_pos, grasp_pose[1])
# If the grasp pose is too far, navigate
yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose)
yield from self._move_hand(grasp_pose, stop_if_stuck=True)
# We can pre-grasp in sticky grasping mode only for opening
if should_open:
yield from self._execute_grasp()
# Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm.
# It's okay if we can't go all the way because we run into the object.
yield from self._navigate_if_needed(obj, pose_on_obj=approach_pose)
if should_open:
yield from self._move_hand_linearly_cartesian(approach_pose, ignore_failure=False, stop_on_contact=True, stop_if_stuck=True)
else:
yield from self._move_hand_linearly_cartesian(approach_pose, ignore_failure=False, stop_if_stuck=True)
# Step once to update
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
for i, target_pose in enumerate(target_poses):
yield from self._move_hand_linearly_cartesian(target_pose, ignore_failure=False, stop_if_stuck=True)
# Moving to target pose often fails. This might leave the robot's motors with torques that
# try to get to a far-away position thus applying large torques, but unable to move due to
# the sticky grasp joint. Thus if we release the joint, the robot might suddenly launch in an
# arbitrary direction. To avoid this, we command the hand to apply torques with its current
# position as its target. This prevents the hand from jerking into some other position when we do a release.
yield from self._move_hand_linearly_cartesian(
self.robot.eef_links[self.arm].get_position_orientation(),
ignore_failure=True,
stop_if_stuck=True
)
if should_open:
yield from self._execute_release()
yield from self._move_base_backward()
except ActionPrimitiveError as e:
indented_print(e)
if should_open:
yield from self._execute_release()
yield from self._move_base_backward()
else:
yield from self._move_hand_backward()
if obj.states[object_states.Open].get_value() != should_open:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"Despite executing the planned trajectory, the object did not open or close as expected. Maybe try again",
{"target object": obj.name, "is it currently open": obj.states[object_states.Open].get_value()},
)
# TODO: Figure out how to generalize out of this "backing out" behavior.
def _move_base_backward(self, steps=5, speed=0.2):
"""
Yields action for the robot to move base so the eef is in the target pose using the planner
Args:
steps (int): steps to move base
speed (float): base speed
Returns:
np.array or None: Action array for one step for the robot to move base or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
action[self.robot.base_control_idx[0]] = -speed
yield self._postprocess_action(action)
def _move_hand_backward(self, steps=5, speed=0.2):
"""
Yields action for the robot to move its base backwards.
Args:
steps (int): steps to move eef
speed (float): eef speed
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
for _ in range(steps):
action = self._empty_action()
action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
action[self.robot.controller_action_idx["arm_{}".format(self.arm)][0]] = -speed
yield self._postprocess_action(action)
def _move_hand_upward(self, steps=5, speed=0.1):
"""
Yields action for the robot to move hand upward.
Args:
steps (int): steps to move eef
speed (float): eef speed
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
# TODO: Combine these movement functions.
for _ in range(steps):
action = self._empty_action()
action[self.robot.controller_action_idx["gripper_{}".format(self.arm)]] = 1.0
action[self.robot.controller_action_idx["arm_{}".format(self.arm)][2]] = speed
yield self._postprocess_action(action)
def _grasp(self, obj):
"""
Yields action for the robot to navigate to object if needed, then to grasp it
Args:
StatefulObject: Object for robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if grasp completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
# Don't do anything if the object is already grasped.
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is not None:
if obj_in_hand == obj:
return
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR,
"Cannot grasp when your hand is already full",
{"target object": obj.name, "object currently in hand": obj_in_hand.name},
)
# Open the hand first
yield from self._execute_release()
# Allow grasping from suboptimal extents if we've tried enough times.
grasp_poses = get_grasp_poses_for_object_sticky(obj)
grasp_pose, object_direction = random.choice(grasp_poses)
# Prepare data for the approach later.
approach_pos = grasp_pose[0] + object_direction * m.GRASP_APPROACH_DISTANCE
approach_pose = (approach_pos, grasp_pose[1])
# If the grasp pose is too far, navigate.
yield from self._navigate_if_needed(obj, pose_on_obj=grasp_pose)
yield from self._move_hand(grasp_pose)
# We can pre-grasp in sticky grasping mode.
yield from self._execute_grasp()
# Since the grasp pose is slightly off the object, we want to move towards the object, around 5cm.
# It's okay if we can't go all the way because we run into the object.
indented_print("Performing grasp approach")
yield from self._move_hand_linearly_cartesian(approach_pose, stop_on_contact=True)
# Step once to update
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
if self._get_obj_in_hand() is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"Grasp completed, but no object detected in hand after executing grasp",
{"target object": obj.name},
)
yield from self._reset_hand()
if self._get_obj_in_hand() != obj:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"An unexpected object was detected in hand after executing grasp. Consider releasing it",
{"expected object": obj.name, "actual object": self._get_obj_in_hand().name},
)
def _place_on_top(self, obj):
"""
Yields action for the robot to navigate to the object if needed, then to place an object on it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.OnTop)
def _place_inside(self, obj):
"""
Yields action for the robot to navigate to the object if needed, then to place an object in it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
yield from self._place_with_predicate(obj, object_states.Inside)
def _toggle_on(self, obj):
yield from self._toggle(obj, True)
def _toggle_off(self, obj):
yield from self._toggle(obj, False)
def _toggle(self, obj, value):
if obj.states[object_states.ToggledOn].get_value() == value:
return
# Put the hand in the toggle marker.
toggle_state = obj.states[object_states.ToggledOn]
toggle_position = toggle_state.get_link_position()
yield from self._navigate_if_needed(obj, toggle_position)
hand_orientation = self.robot.eef_links[self.arm].get_orientation() # Just keep the current hand orientation.
desired_hand_pose = (toggle_position, hand_orientation)
yield from self._move_hand(desired_hand_pose)
if obj.states[object_states.ToggledOn].get_value() != value:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.POST_CONDITION_ERROR,
"The object did not toggle as expected - maybe try again",
{"target object": obj.name, "is it currently toggled on": obj.states[object_states.ToggledOn].get_value()}
)
def _place_with_predicate(self, obj, predicate):
"""
Yields action for the robot to navigate to the object if needed, then to place it
Args:
obj (StatefulObject): Object for robot to place the object in its hand on
predicate (object_states.OnTop or object_states.Inside): Determines whether to place on top or inside
Returns:
np.array or None: Action array for one step for the robot to place or None if place completed
"""
# Update the tracking to track the object.
self._tracking_object = obj
obj_in_hand = self._get_obj_in_hand()
if obj_in_hand is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PRE_CONDITION_ERROR, "You need to be grasping an object first to place it somewhere."
)
# Sample location to place object
obj_pose = self._sample_pose_with_object_and_predicate(predicate, obj_in_hand, obj)
hand_pose = self._get_hand_pose_for_object_pose(obj_pose)
yield from self._navigate_if_needed(obj, pose_on_obj=hand_pose)
yield from self._move_hand(hand_pose)
yield from self._execute_release()
if self._get_obj_in_hand() is not None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Could not release object - the object is still in your hand",
{"object": self._get_obj_in_hand().name}
)
if not obj_in_hand.states[predicate].get_value(obj):
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Failed to place object at the desired place (probably dropped). The object was still released, so you need to grasp it again to continue",
{"dropped object": obj_in_hand.name, "target object": obj.name}
)
yield from self._move_hand_upward()
def _convert_cartesian_to_joint_space(self, target_pose):
"""
Gets joint positions for the arm so eef is at the target pose
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef
Returns:
2-tuple
- np.array or None: Joint positions to reach target pose or None if impossible to reach target pose
- np.array: Indices for joints in the robot
"""
relative_target_pose = self._get_pose_in_robot_frame(target_pose)
joint_pos = self._ik_solver_cartesian_to_joint_space(relative_target_pose)
if joint_pos is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"Could not find joint positions for target pose. You cannot reach it. Try again for a new pose"
)
return joint_pos
def _target_in_reach_of_robot(self, target_pose):
"""
Determines whether the eef for the robot can reach the target pose in the world frame
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for the pose for the eef
Returns:
bool: Whether eef can reach the target pose
"""
relative_target_pose = self._get_pose_in_robot_frame(target_pose)
return self._target_in_reach_of_robot_relative(relative_target_pose)
def _target_in_reach_of_robot_relative(self, relative_target_pose):
"""
Determines whether eef for the robot can reach the target pose where the target pose is in the robot frame
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose for the eef
Returns:
bool: Whether eef can the reach target pose
"""
return self._ik_solver_cartesian_to_joint_space(relative_target_pose) is not None
@cached_property
def _manipulation_control_idx(self):
"""The appropriate manipulation control idx for the current settings."""
if isinstance(self.robot, Tiago):
if m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
return self.robot.arm_control_idx["left"]
else:
return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])
# Otherwise just return the default arm control idx
return np.concatenate([self.robot.trunk_control_idx, self.robot.arm_control_idx[self.arm]])
@cached_property
def _manipulation_descriptor_path(self):
"""The appropriate manipulation descriptor for the current settings."""
if isinstance(self.robot, Tiago) and m.TIAGO_TORSO_FIXED:
assert self.arm == "left", "Fixed torso mode only supports left arm!"
return self.robot.robot_arm_descriptor_yamls["left_fixed"]
# Otherwise just return the default arm control idx
return self.robot.robot_arm_descriptor_yamls[self.arm]
def _ik_solver_cartesian_to_joint_space(self, relative_target_pose):
"""
Get joint positions for the arm so eef is at the target pose where the target pose is in the robot frame
Args:
relative_target_pose (Iterable of array): Position and orientation arrays in an iterable for pose in the robot frame
Returns:
2-tuple
- np.array or None: Joint positions to reach target pose or None if impossible to reach the target pose
- np.array: Indices for joints in the robot
"""
ik_solver = IKSolver(
robot_description_path=self._manipulation_descriptor_path,
robot_urdf_path=self.robot.urdf_path,
reset_joint_pos=self.robot.reset_joint_pos[self._manipulation_control_idx],
eef_name=self.robot.eef_link_names[self.arm],
)
# Grab the joint positions in order to reach the desired pose target
joint_pos = ik_solver.solve(
target_pos=relative_target_pose[0],
target_quat=relative_target_pose[1],
max_iterations=100,
)
return joint_pos
def _move_hand(self, target_pose, stop_if_stuck=False):
"""
Yields action for the robot to move hand so the eef is in the target pose using the planner
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
Returns:
np.array or None: Action array for one step for the robot to move hand or None if its at the target pose
"""
yield from self._settle_robot()
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
target_pose_relative = self._get_pose_in_robot_frame(target_pose)
yield from self._move_hand_ik(target_pose_relative, stop_if_stuck=stop_if_stuck)
else:
joint_pos = self._convert_cartesian_to_joint_space(target_pose)
yield from self._move_hand_joint(joint_pos)
def _move_hand_joint(self, joint_pos):
"""
Yields action for the robot to move arm to reach the specified joint positions using the planner
Args:
joint_pos (np.array): Joint positions for the arm
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
with PlanningContext(self.robot, self.robot_copy, "original") as context:
plan = plan_arm_motion(
robot=self.robot,
end_conf=joint_pos,
context=context,
torso_fixed=m.TIAGO_TORSO_FIXED,
)
# plan = self._add_linearly_interpolated_waypoints(plan, 0.1)
if plan is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"There is no accessible path from where you are to the desired joint position. Try again"
)
# Follow the plan to navigate.
indented_print("Plan has %d steps", len(plan))
for i, joint_pos in enumerate(plan):
indented_print("Executing grasp plan step %d/%d", i + 1, len(plan))
yield from self._move_hand_direct_joint(joint_pos, ignore_failure=True)
def _move_hand_ik(self, eef_pose, stop_if_stuck=False):
"""
Yields action for the robot to move arm to reach the specified eef positions using the planner
Args:
eef_pose (np.array): End Effector pose for the arm
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
eef_pos = eef_pose[0]
eef_ori = T.quat2axisangle(eef_pose[1])
end_conf = np.append(eef_pos, eef_ori)
with PlanningContext(self.robot, self.robot_copy, "original") as context:
plan = plan_arm_motion_ik(
robot=self.robot,
end_conf=end_conf,
context=context,
torso_fixed=m.TIAGO_TORSO_FIXED,
)
# plan = self._add_linearly_interpolated_waypoints(plan, 0.1)
if plan is None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"There is no accessible path from where you are to the desired joint position. Try again"
)
# Follow the plan to navigate.
indented_print("Plan has %d steps", len(plan))
for i, target_pose in enumerate(plan):
target_pos = target_pose[:3]
target_quat = T.axisangle2quat(target_pose[3:])
indented_print("Executing grasp plan step %d/%d", i + 1, len(plan))
yield from self._move_hand_direct_ik((target_pos, target_quat), ignore_failure=True, in_world_frame=False, stop_if_stuck=stop_if_stuck)
def _add_linearly_interpolated_waypoints(self, plan, max_inter_dist):
"""
Adds waypoints to the plan so the distance between values in the plan never exceeds the max_inter_dist.
Args:
plan (Array of arrays): Planned path
max_inter_dist (float): Maximum distance between values in the plan
Returns:
Array of arrays: Planned path with additional waypoints
"""
plan = np.array(plan)
interpolated_plan = []
for i in range(len(plan) - 1):
max_diff = max(plan[i+1] - plan[i])
num_intervals = ceil(max_diff / max_inter_dist)
interpolated_plan += np.linspace(plan[i], plan[i+1], num_intervals, endpoint=False).tolist()
interpolated_plan.append(plan[-1].tolist())
return interpolated_plan
def _move_hand_direct_joint(self, joint_pos, stop_on_contact=False, ignore_failure=False):
"""
Yields action for the robot to move its arm to reach the specified joint positions by directly actuating with no planner
Args:
joint_pos (np.array): Array of joint positions for the arm
stop_on_contact (boolean): Determines whether to stop move once an object is hit
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the joint positions
"""
controller_name = f"arm_{self.arm}"
use_delta = self.robot._controllers[controller_name].use_delta_commands
action = self._empty_action()
controller_name = "arm_{}".format(self.arm)
action[self.robot.controller_action_idx[controller_name]] = joint_pos
prev_eef_pos = np.zeros(3)
for _ in range(m.MAX_STEPS_FOR_HAND_MOVE_JOINT):
current_joint_pos = self.robot.get_joint_positions()[self._manipulation_control_idx]
diff_joint_pos = np.array(current_joint_pos) - np.array(joint_pos)
if np.max(np.abs(diff_joint_pos)) < m.JOINT_POS_DIFF_THRESHOLD:
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
if np.max(np.abs(self.robot.get_eef_position(self.arm) - prev_eef_pos)) < 0.0001:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
f"Hand got stuck during execution."
)
if use_delta:
# Convert actions to delta.
action[self.robot.controller_action_idx[controller_name]] = diff_joint_pos
prev_eef_pos = self.robot.get_eef_position(self.arm)
yield self._postprocess_action(action)
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired joint position"
)
def _move_hand_direct_ik(self, target_pose, stop_on_contact=False, ignore_failure=False, pos_thresh=0.04, ori_thresh=0.4, in_world_frame=True, stop_if_stuck=False):
"""
Moves the hand to a target pose using inverse kinematics.
Args:
target_pose (tuple): A tuple of two elements, representing the target pose of the hand as a position and a quaternion.
stop_on_contact (bool, optional): Whether to stop the movement if the hand collides with an object. Defaults to False.
ignore_failure (bool, optional): Whether to raise an exception if the movement fails. Defaults to False.
pos_thresh (float, optional): The position threshold for considering the target pose reached. Defaults to 0.04.
ori_thresh (float, optional): The orientation threshold for considering the target pose reached. Defaults to 0.4.
in_world_frame (bool, optional): Whether the target pose is given in the world frame. Defaults to True.
stop_if_stuck (bool, optional): Whether to stop the movement if the hand is stuck. Defaults to False.
Yields:
numpy.ndarray: The action to be executed by the robot controller.
Raises:
ActionPrimitiveError: If the movement fails and ignore_failure is False.
"""
# make sure controller is InverseKinematicsController and in expected mode
controller_config = self.robot._controller_config["arm_" + self.arm]
assert controller_config["name"] == "InverseKinematicsController", "Controller must be InverseKinematicsController"
assert controller_config["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode"
if in_world_frame:
target_pose = self._get_pose_in_robot_frame(target_pose)
target_pos = target_pose[0]
target_orn = target_pose[1]
target_orn_axisangle = T.quat2axisangle(target_pose[1])
action = self._empty_action()
control_idx = self.robot.controller_action_idx["arm_" + self.arm]
prev_pos = prev_orn = None
for i in range(m.MAX_STEPS_FOR_HAND_MOVE_IK):
current_pose = self._get_pose_in_robot_frame((self.robot.get_eef_position(), self.robot.get_eef_orientation()))
current_pos = current_pose[0]
current_orn = current_pose[1]
delta_pos = target_pos - current_pos
target_pos_diff = np.linalg.norm(delta_pos)
target_orn_diff = (Rotation.from_quat(target_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
reached_goal = target_pos_diff < pos_thresh and target_orn_diff < ori_thresh
if reached_goal:
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
# if i > 0 and stop_if_stuck and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
if i > 0 and stop_if_stuck:
pos_diff = np.linalg.norm(prev_pos - current_pos)
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
orn_diff = (Rotation.from_quat(prev_orn) * Rotation.from_quat(current_orn).inv()).magnitude()
if pos_diff < 0.0003 and orn_diff < 0.01:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
f"Hand is stuck"
)
prev_pos = current_pos
prev_orn = current_orn
action[control_idx] = np.concatenate([delta_pos, target_orn_axisangle])
yield self._postprocess_action(action)
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired joint position"
)
def _move_hand_linearly_cartesian(self, target_pose, stop_on_contact=False, ignore_failure=False, stop_if_stuck=False):
"""
Yields action for the robot to move its arm to reach the specified target pose by moving the eef along a line in cartesian
space from its current pose
Args:
target_pose (Iterable of array): Position and orientation arrays in an iterable for pose
stop_on_contact (boolean): Determines whether to stop move once an object is hit
ignore_failure (boolean): Determines whether to throw error for not reaching final joint positions
Returns:
np.array or None: Action array for one step for the robot to move arm or None if its at the target pose
"""
# To make sure that this happens in a roughly linear fashion, we will divide the trajectory
# into 1cm-long pieces
start_pos, start_orn = self.robot.eef_links[self.arm].get_position_orientation()
travel_distance = np.linalg.norm(target_pose[0] - start_pos)
num_poses = np.max([2, int(travel_distance / m.MAX_CARTESIAN_HAND_STEP) + 1])
pos_waypoints = np.linspace(start_pos, target_pose[0], num_poses)
# Also interpolate the rotations
combined_rotation = Rotation.from_quat(np.array([start_orn, target_pose[1]]))
slerp = Slerp([0, 1], combined_rotation)
orn_waypoints = slerp(np.linspace(0, 1, num_poses))
quat_waypoints = [x.as_quat() for x in orn_waypoints]
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
waypoints = list(zip(pos_waypoints, quat_waypoints))
for i, waypoint in enumerate(waypoints):
if i < len(waypoints) - 1:
yield from self._move_hand_direct_ik(waypoint, stop_on_contact=stop_on_contact, ignore_failure=ignore_failure, stop_if_stuck=stop_if_stuck)
else:
yield from self._move_hand_direct_ik(
waypoints[-1],
pos_thresh=0.01, ori_thresh=0.1,
stop_on_contact=stop_on_contact,
ignore_failure=ignore_failure,
stop_if_stuck=stop_if_stuck
)
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.005 and orn_diff < np.deg2rad(0.1):
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired world position"
)
else:
# Use joint positions
joint_space_data = [self._convert_cartesian_to_joint_space(waypoint) for waypoint in zip(pos_waypoints, quat_waypoints)]
joints = list(self.robot.joints.values())
for joint_pos in joint_space_data:
# Check if the movement can be done roughly linearly.
current_joint_positions = self.robot.get_joint_positions()[self._manipulation_control_idx]
failed_joints = []
for joint_idx, target_joint_pos, current_joint_pos in zip(self._manipulation_control_idx, joint_pos, current_joint_positions):
if np.abs(target_joint_pos - current_joint_pos) > m.MAX_ALLOWED_JOINT_ERROR_FOR_LINEAR_MOTION:
failed_joints.append(joints[joint_idx].joint_name)
if failed_joints:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"You cannot reach the target position in a straight line - it requires rotating your arm which might cause collisions. You might need to get closer and retry",
{"failed joints": failed_joints}
)
# Otherwise, move the joint
yield from self._move_hand_direct_joint(joint_pos, stop_on_contact=stop_on_contact, ignore_failure=ignore_failure)
# Also decide if we can stop early.
current_pos, current_orn = self.robot.eef_links[self.arm].get_position_orientation()
pos_diff = np.linalg.norm(np.array(current_pos) - np.array(target_pose[0]))
orn_diff = (Rotation.from_quat(current_orn) * Rotation.from_quat(target_pose[1]).inv()).magnitude()
if pos_diff < 0.001 and orn_diff < np.deg2rad(0.1):
return
if stop_on_contact and detect_robot_collision_in_sim(self.robot, ignore_obj_in_hand=False):
return
if not ignore_failure:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Your hand was obstructed from moving to the desired world position"
)
def _execute_grasp(self):
"""
Yields action for the robot to grasp
Returns:
np.array or None: Action array for one step for the robot to grasp or None if its done grasping
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
action = self._empty_action()
controller_name = "gripper_{}".format(self.arm)
action[self.robot.controller_action_idx[controller_name]] = -1.0
yield self._postprocess_action(action)
def _execute_release(self):
"""
Yields action for the robot to release its grasp
Returns:
np.array or None: Action array for one step for the robot to release or None if its done releasing
"""
for _ in range(m.MAX_STEPS_FOR_GRASP_OR_RELEASE):
action = self._empty_action()
controller_name = "gripper_{}".format(self.arm)
action[self.robot.controller_action_idx[controller_name]] = 1.0
yield self._postprocess_action(action)
if self._get_obj_in_hand() is not None:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"An object was still detected in your hand after executing release",
{"object in hand": self._get_obj_in_hand().name},
)
def _overwrite_head_action(self, action):
"""
Overwrites camera control actions to track an object of interest.
If self._always_track_eef is true, always tracks the end effector of the robot.
Otherwise, tracks the object of interest or the end effector as specified by the primitive.
Args:
action (array) : action array to overwrite
"""
if self._always_track_eef:
target_obj_pose = (self.robot.get_eef_position(), self.robot.get_eef_orientation())
else:
if self._tracking_object is None:
return action
if self._tracking_object == self.robot:
target_obj_pose = (self.robot.get_eef_position(), self.robot.get_eef_orientation())
else:
target_obj_pose = self._tracking_object.get_position_orientation()
assert self.robot_model == "Tiago", "Tracking object with camera is currently only supported for Tiago"
head_q = self._get_head_goal_q(target_obj_pose)
head_idx = self.robot.controller_action_idx["camera"]
config = self.robot._controller_config["camera"]
assert config["name"] == "JointController", "Camera controller must be JointController"
assert config["motor_type"] == "position", "Camera controller must be in position control mode"
use_delta = config["use_delta_commands"]
if use_delta:
cur_head_q = self.robot.get_joint_positions()[self.robot.camera_control_idx]
head_action = head_q - cur_head_q
else:
head_action = head_q
action[head_idx] = head_action
return action
def _get_head_goal_q(self, target_obj_pose):
"""
Get goal joint positions for head to look at an object of interest,
If the object cannot be seen, return the current head joint positions.
"""
# get current head joint positions
head1_joint = self.robot.joints["head_1_joint"]
head2_joint = self.robot.joints["head_2_joint"]
head1_joint_limits = [head1_joint.lower_limit, head1_joint.upper_limit]
head2_joint_limits = [head2_joint.lower_limit, head2_joint.upper_limit]
head1_joint_goal = head1_joint.get_state()[0][0]
head2_joint_goal = head2_joint.get_state()[0][0]
# grab robot and object poses
robot_pose = self.robot.get_position_orientation()
# obj_pose = obj.get_position_orientation()
obj_in_base = T.relative_pose_transform(*target_obj_pose, *robot_pose)
# compute angle between base and object in xy plane (parallel to floor)
theta = np.arctan2(obj_in_base[0][1], obj_in_base[0][0])
# if it is possible to get object in view, compute both head joint positions
if head1_joint_limits[0] < theta < head1_joint_limits[1]:
head1_joint_goal = theta
# compute angle between base and object in xz plane (perpendicular to floor)
head2_pose = self.robot.links["head_2_link"].get_position_orientation()
head2_in_base = T.relative_pose_transform(*head2_pose, *robot_pose)
phi = np.arctan2(obj_in_base[0][2] - head2_in_base[0][2], obj_in_base[0][0])
if head2_joint_limits[0] < phi < head2_joint_limits[1]:
head2_joint_goal = phi
# if not possible to look at object, return current head joint positions
else:
default_head_pos = self._get_reset_joint_pos()[self.robot.controller_action_idx["camera"]]
head1_joint_goal = default_head_pos[0]
head2_joint_goal = default_head_pos[1]
return [head1_joint_goal, head2_joint_goal]
def _empty_action(self):
"""
Get a no-op action that allows us to run simulation without changing robot configuration.
Returns:
np.array or None: Action array for one step for the robot to do nothing
"""
action = np.zeros(self.robot.action_dim)
for name, controller in self.robot._controllers.items():
joint_idx = controller.dof_idx
action_idx = self.robot.controller_action_idx[name]
if controller.control_type == ControlType.POSITION and len(joint_idx) == len(action_idx) and not controller.use_delta_commands:
action[action_idx] = self.robot.get_joint_positions()[joint_idx]
elif self.robot._controller_config[name]["name"] == "InverseKinematicsController":
# overwrite the goal orientation, since it is in absolute frame.
assert self.robot._controller_config["arm_" + self.arm]["mode"] == "pose_absolute_ori", "Controller must be in pose_absolute_ori mode"
current_quat = self.robot.get_relative_eef_orientation()
current_ori = T.quat2axisangle(current_quat)
control_idx = self.robot.controller_action_idx["arm_" + self.arm]
action[control_idx[3:]] = current_ori
return action
def _reset_hand(self):
"""
Yields action to move the hand to the position optimal for executing subsequent action primitives
Returns:
np.array or None: Action array for one step for the robot to reset its hand or None if it is done resetting
"""
controller_config = self.robot._controller_config["arm_" + self.arm]
if controller_config["name"] == "InverseKinematicsController":
indented_print("Resetting hand")
reset_eef_pose = self._get_reset_eef_pose()
try:
yield from self._move_hand_ik(reset_eef_pose)
except ActionPrimitiveError:
indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body")
yield from self._move_hand_direct_ik(reset_eef_pose, ignore_failure=True, in_world_frame=False)
else:
indented_print("Resetting hand")
reset_pose = self._get_reset_joint_pos()[self._manipulation_control_idx]
try:
yield from self._move_hand_joint(reset_pose)
except ActionPrimitiveError:
indented_print("Could not do a planned reset of the hand - probably obj_in_hand collides with body")
yield from self._move_hand_direct_joint(reset_pose, ignore_failure=True)
def _get_reset_eef_pose(self):
# TODO: Add support for Fetch
if self.robot_model == "Tiago":
return np.array([0.28493954, 0.37450749, 1.1512334]), np.array([-0.21533823, 0.05361032, -0.08631776, 0.97123871])
else:
return np.array([ 0.48688125, -0.12507881, 0.97888719]), np.array([ 0.61324748, 0.61305553, -0.35266518, 0.35173529])
def _get_reset_joint_pos(self):
reset_pose_fetch = np.array(
[
0.0,
0.0, # wheels
0.0, # trunk
0.0,
-1.0,
0.0, # head
-1.0,
1.53448,
2.2,
0.0,
1.36904,
1.90996, # arm
0.05,
0.05, # gripper
]
)
reset_pose_tiago = np.array([
-1.78029833e-04,
3.20231302e-05,
-1.85759447e-07,
0.0,
-0.2,
0.0,
0.1,
-6.10000000e-01,
-1.10000000e+00,
0.00000000e+00,
-1.10000000e+00,
1.47000000e+00,
0.00000000e+00,
8.70000000e-01,
2.71000000e+00,
1.50000000e+00,
1.71000000e+00,
-1.50000000e+00,
-1.57000000e+00,
4.50000000e-01,
1.39000000e+00,
0.00000000e+00,
0.00000000e+00,
4.50000000e-02,
4.50000000e-02,
4.50000000e-02,
4.50000000e-02
])
return reset_pose_tiago if self.robot_model == "Tiago" else reset_pose_fetch
def _navigate_to_pose(self, pose_2d):
"""
Yields the action to navigate robot to the specified 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
with PlanningContext(self.robot, self.robot_copy, "simplified") as context:
plan = plan_base_motion(
robot=self.robot,
end_conf=pose_2d,
context=context,
)
if plan is None:
# TODO: Would be great to produce a more informative error.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.PLANNING_ERROR,
"Could not make a navigation plan to get to the target position"
)
# self._draw_plan(plan)
# Follow the plan to navigate.
indented_print("Plan has %d steps", len(plan))
for i, pose_2d in enumerate(plan):
indented_print("Executing navigation plan step %d/%d", i + 1, len(plan))
low_precision = True if i < len(plan) - 1 else False
yield from self._navigate_to_pose_direct(pose_2d, low_precision=low_precision)
def _draw_plan(self, plan):
SEARCHED = []
trav_map = self.env.scene._trav_map
for q in plan:
# The below code is useful for plotting the RRT tree.
SEARCHED.append(np.flip(trav_map.world_to_map((q[0], q[1]))))
fig = plt.figure()
plt.imshow(trav_map.floor_map[0])
plt.scatter(*zip(*SEARCHED), 5)
fig.canvas.draw()
# Convert the canvas to image
img = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="")
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close(fig)
# Convert to BGR for cv2-based viewing.
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imshow("SceneGraph", img)
cv2.waitKey(1)
def _navigate_if_needed(self, obj, pose_on_obj=None, **kwargs):
"""
Yields action to navigate the robot to be in range of the object if it not in the range
Args:
obj (StatefulObject): Object for the robot to be in range of
pose_on_obj (Iterable): (pos, quat) Pose
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
if pose_on_obj is not None:
if self._target_in_reach_of_robot(pose_on_obj):
# No need to navigate.
return
elif self._target_in_reach_of_robot(obj.get_position_orientation()):
return
yield from self._navigate_to_obj(obj, pose_on_obj=pose_on_obj, **kwargs)
def _navigate_to_obj(self, obj, pose_on_obj=None, **kwargs):
"""
Yields action to navigate the robot to be in range of the pose
Args:
obj (StatefulObject): object to be in range of
pose_on_obj (Iterable): (pos, quat) pose
Returns:
np.array or None: Action array for one step for the robot to navigate in range or None if it is done navigating
"""
pose = self._sample_pose_near_object(obj, pose_on_obj=pose_on_obj, **kwargs)
yield from self._navigate_to_pose(pose)
def _navigate_to_pose_direct(self, pose_2d, low_precision=False):
"""
Yields action to navigate the robot to the 2d pose without planning
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
low_precision (bool): Determines whether to navigate to the pose within a large range (low precision) or small range (high precison)
Returns:
np.array or None: Action array for one step for the robot to navigate or None if it is done navigating
"""
dist_threshold = m.LOW_PRECISION_DIST_THRESHOLD if low_precision else m.DEFAULT_DIST_THRESHOLD
angle_threshold = m.LOW_PRECISION_ANGLE_THRESHOLD if low_precision else m.DEFAULT_ANGLE_THRESHOLD
end_pose = self._get_robot_pose_from_2d_pose(pose_2d)
body_target_pose = self._get_pose_in_robot_frame(end_pose)
for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION):
if np.linalg.norm(body_target_pose[0][:2]) < dist_threshold:
break
diff_pos = end_pose[0] - self.robot.get_position()
intermediate_pose = (end_pose[0], T.euler2quat([0, 0, np.arctan2(diff_pos[1], diff_pos[0])]))
body_intermediate_pose = self._get_pose_in_robot_frame(intermediate_pose)
diff_yaw = T.quat2euler(body_intermediate_pose[1])[2]
if abs(diff_yaw) > m.DEFAULT_ANGLE_THRESHOLD:
yield from self._rotate_in_place(intermediate_pose, angle_threshold=m.DEFAULT_ANGLE_THRESHOLD)
else:
action = self._empty_action()
if self._base_controller_is_joint:
direction_vec = body_target_pose[0][:2] / np.linalg.norm(body_target_pose[0][:2]) * m.KP_LIN_VEL
base_action = [direction_vec[0], direction_vec[1], 0.0]
action[self.robot.controller_action_idx["base"]] = base_action
else:
base_action = [m.KP_LIN_VEL, 0.0]
action[self.robot.controller_action_idx["base"]] = base_action
yield self._postprocess_action(action)
body_target_pose = self._get_pose_in_robot_frame(end_pose)
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Could not navigate to the target position",
{"target pose": end_pose},
)
# Rotate in place to final orientation once at location
yield from self._rotate_in_place(end_pose, angle_threshold=angle_threshold)
def _rotate_in_place(self, end_pose, angle_threshold = m.DEFAULT_ANGLE_THRESHOLD):
"""
Yields action to rotate the robot to the 2d end pose
Args:
end_pose (Iterable): (x, y, yaw) 2d pose
angle_threshold (float): The angle difference between the robot's current and end pose that determines when the robot is done rotating
Returns:
np.array or None: Action array for one step for the robot to rotate or None if it is done rotating
"""
body_target_pose = self._get_pose_in_robot_frame(end_pose)
diff_yaw = T.quat2euler(body_target_pose[1])[2]
for _ in range(m.MAX_STEPS_FOR_WAYPOINT_NAVIGATION):
if abs(diff_yaw) < angle_threshold:
break
action = self._empty_action()
direction = -1.0 if diff_yaw < 0.0 else 1.0
ang_vel = m.KP_ANGLE_VEL * direction
base_action = [0.0, 0.0, ang_vel] if self._base_controller_is_joint else [0.0, ang_vel]
action[self.robot.controller_action_idx["base"]] = base_action
yield self._postprocess_action(action)
body_target_pose = self._get_pose_in_robot_frame(end_pose)
diff_yaw = T.quat2euler(body_target_pose[1])[2]
else:
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.EXECUTION_ERROR,
"Could not rotate in place to the desired orientation",
{"target pose": end_pose},
)
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
def _sample_pose_near_object(self, obj, pose_on_obj=None, **kwargs):
"""
Returns a 2d pose for the robot within in the range of the object and where the robot is not in collision with anything
Args:
obj (StatefulObject): Object to sample a 2d pose near
pose_on_obj (Iterable of arrays or None): The pose to sample near
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
with PlanningContext(self.robot, self.robot_copy, "simplified") as context:
for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_NEAR_OBJECT):
if pose_on_obj is None:
pos_on_obj = self._sample_position_on_aabb_side(obj)
pose_on_obj = [pos_on_obj, np.array([0, 0, 0, 1])]
distance = np.random.uniform(0.0, 5.0)
yaw = np.random.uniform(-np.pi, np.pi)
avg_arm_workspace_range = np.mean(self.robot.arm_workspace_range[self.arm])
pose_2d = np.array(
[pose_on_obj[0][0] + distance * np.cos(yaw), pose_on_obj[0][1] + distance * np.sin(yaw), yaw + np.pi - avg_arm_workspace_range]
)
# Check room
obj_rooms = obj.in_rooms if obj.in_rooms else [self.env.scene._seg_map.get_room_instance_by_point(pose_on_obj[0][:2])]
if self.env.scene._seg_map.get_room_instance_by_point(pose_2d[:2]) not in obj_rooms:
indented_print("Candidate position is in the wrong room.")
continue
if not self._test_pose(pose_2d, context, pose_on_obj=pose_on_obj, **kwargs):
continue
return pose_2d
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.SAMPLING_ERROR, "Could not find valid position near object.",
{"target object": obj.name, "target pos": obj.get_position(), "pose on target": pose_on_obj}
)
@staticmethod
def _sample_position_on_aabb_side(target_obj):
"""
Returns a position on one of the axis-aligned bounding box (AABB) side faces of the target object.
Args:
target_obj (StatefulObject): Object to sample a position on
Returns:
3-array: (x,y,z) Position in the world frame
"""
aabb_center, aabb_extent = target_obj.aabb_center, target_obj.aabb_extent
# We want to sample only from the side-facing faces.
face_normal_axis = np.random.choice([0, 1])
face_normal_direction = np.random.choice([-1, 1])
face_center = aabb_center + np.eye(3)[face_normal_axis] * aabb_extent * face_normal_direction
face_lateral_axis = 0 if face_normal_axis == 1 else 1
face_lateral_half_extent = np.eye(3)[face_lateral_axis] * aabb_extent / 2
face_vertical_half_extent = np.eye(3)[2] * aabb_extent / 2
face_min = face_center - face_vertical_half_extent - face_lateral_half_extent
face_max = face_center + face_vertical_half_extent + face_lateral_half_extent
return np.random.uniform(face_min, face_max)
# def _sample_pose_in_room(self, room: str):
# """
# Returns a pose for the robot within in the room where the robot is not in collision with anything
# Args:
# room (str): Name of room
# Returns:
# 2-tuple:
# - 3-array: (x,y,z) Position in the world frame
# - 4-array: (x,y,z,w) Quaternion orientation in the world frame
# """
# # TODO(MP): Bias the sampling near the agent.
# for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_IN_ROOM):
# _, pos = self.env.scene.get_random_point_by_room_instance(room)
# yaw = np.random.uniform(-np.pi, np.pi)
# pose = (pos[0], pos[1], yaw)
# if self._test_pose(pose):
# return pose
# raise ActionPrimitiveError(
# ActionPrimitiveError.Reason.SAMPLING_ERROR,
# "Could not find valid position in the given room to travel to",
# {"room": room}
# )
def _sample_pose_with_object_and_predicate(self, predicate, held_obj, target_obj, near_poses=None, near_poses_threshold=None):
"""
Returns a pose for the held object relative to the target object that satisfies the predicate
Args:
predicate (object_states.OnTop or object_states.Inside): Relation between held object and the target object
held_obj (StatefulObject): Object held by the robot
target_obj (StatefulObject): Object to sample a pose relative to
near_poses (Iterable of arrays): Poses in the world frame to sample near
near_poses_threshold (float): The distance threshold to check if the sampled pose is near the poses in near_poses
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
pred_map = {object_states.OnTop: "onTop", object_states.Inside: "inside"}
for _ in range(m.MAX_ATTEMPTS_FOR_SAMPLING_POSE_WITH_OBJECT_AND_PREDICATE):
_, _, bb_extents, bb_center_in_base = held_obj.get_base_aligned_bbox()
sampling_results = sample_cuboid_for_predicate(pred_map[predicate], target_obj, bb_extents)
if sampling_results[0][0] is None:
continue
sampled_bb_center = sampling_results[0][0] + np.array([0, 0, m.PREDICATE_SAMPLING_Z_OFFSET])
sampled_bb_orn = sampling_results[0][2]
# Get the object pose by subtracting the offset
sampled_obj_pose = T.pose2mat((sampled_bb_center, sampled_bb_orn)) @ T.pose_inv(T.pose2mat((bb_center_in_base, [0, 0, 0, 1])))
# Check that the pose is near one of the poses in the near_poses list if provided.
if near_poses:
sampled_pos = np.array([sampled_obj_pose[0]])
if not np.any(np.linalg.norm(near_poses - sampled_pos, axis=1) < near_poses_threshold):
continue
# Return the pose
return T.mat2pose(sampled_obj_pose)
# If we get here, sampling failed.
raise ActionPrimitiveError(
ActionPrimitiveError.Reason.SAMPLING_ERROR,
"Could not find a position to put this object in the desired relation to the target object",
{"target object": target_obj.name, "object in hand": held_obj.name, "relation": pred_map[predicate]},
)
# TODO: Why do we need to pass in the context here?
def _test_pose(self, pose_2d, context, pose_on_obj=None):
"""
Determines whether the robot can reach the pose on the object and is not in collision at the specified 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
context (Context): Planning context reference
pose_on_obj (Iterable of arrays): Pose on the object in the world frame
Returns:
bool: True if the robot is in a valid pose, False otherwise
"""
pose = self._get_robot_pose_from_2d_pose(pose_2d)
if pose_on_obj is not None:
relative_pose = T.relative_pose_transform(*pose_on_obj, *pose)
if not self._target_in_reach_of_robot_relative(relative_pose):
return False
if set_base_and_detect_collision(context, pose):
indented_print("Candidate position failed collision test.")
return False
return True
@staticmethod
def _get_robot_pose_from_2d_pose(pose_2d):
"""
Gets 3d pose from 2d pose
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
pos = np.array([pose_2d[0], pose_2d[1], m.DEFAULT_BODY_OFFSET_FROM_FLOOR])
orn = T.euler2quat([0, 0, pose_2d[2]])
return pos, orn
def _get_pose_in_robot_frame(self, pose):
"""
Converts the pose in the world frame to the robot frame
Args:
pose_2d (Iterable): (x, y, yaw) 2d pose
Returns:
2-tuple:
- 3-array: (x,y,z) Position in the world frame
- 4-array: (x,y,z,w) Quaternion orientation in the world frame
"""
body_pose = self.robot.get_position_orientation()
return T.relative_pose_transform(*pose, *body_pose)
def _get_hand_pose_for_object_pose(self, desired_pose):
"""
Gets the pose of the hand for the desired object pose
Args:
desired_pose (Iterable of arrays): Pose of the object in the world frame
Returns:
2-tuple:
- 3-array: (x,y,z) Position of the hand in the world frame
- 4-array: (x,y,z,w) Quaternion orientation of the hand in the world frame
"""
obj_in_hand = self._get_obj_in_hand()
assert obj_in_hand is not None
# Get the object pose & the robot hand pose
obj_in_world = obj_in_hand.get_position_orientation()
hand_in_world = self.robot.eef_links[self.arm].get_position_orientation()
# Get the hand pose relative to the obj pose
hand_in_obj = T.relative_pose_transform(*hand_in_world, *obj_in_world)
# Now apply desired obj pose.
desired_hand_pose = T.pose_transform(*desired_pose, *hand_in_obj)
return desired_hand_pose
# Function that is particularly useful for Fetch, where it gives time for the base of robot to settle due to its uneven base.
def _settle_robot(self):
"""
Yields a no op action for a few steps to allow the robot and physics to settle
Returns:
np.array or None: Action array for one step for the robot to do nothing
"""
for _ in range(30):
empty_action = self._empty_action()
yield self._postprocess_action(empty_action)
for _ in range(m.MAX_STEPS_FOR_SETTLING):
if np.linalg.norm(self.robot.get_linear_velocity()) < 0.01:
break
empty_action = self._empty_action()
yield self._postprocess_action(empty_action) | 84,553 | Python | 45.128751 | 183 | 0.605561 |
StanfordVL/OmniGibson/omnigibson/prims/entity_prim.py | import numpy as np
import networkx as nx
from functools import cached_property
import omnigibson as og
import omnigibson.lazy as lazy
import omnigibson.utils.transform_utils as T
from omnigibson.prims.cloth_prim import ClothPrim
from omnigibson.prims.joint_prim import JointPrim
from omnigibson.prims.rigid_prim import RigidPrim
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.constants import PrimType, JointType, JointAxis
from omnigibson.utils.ui_utils import suppress_omni_log
from omnigibson.utils.usd_utils import PoseAPI
from omnigibson.macros import gm, create_module_macros
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Default sleep threshold for all objects -- see https://docs.omniverse.nvidia.com/extensions/latest/ext_physics/simulation-control/physics-settings.html?highlight=sleep#sleeping
m.DEFAULT_SLEEP_THRESHOLD = 0.001
class EntityPrim(XFormPrim):
"""
Provides high level functions to deal with an articulation prim and its attributes/ properties. Note that this
type of prim cannot be created from scratch, and assumes there is already a pre-existing prim tree that should
be converted into an articulation!
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that by default, this assumes an articulation already exists (i.e.:
load() will raise NotImplementedError)! Subclasses must implement _load() for this prim to be able to be
dynamically loaded after this class is created.
visual_only (None or bool): If specified, whether this prim should include collisions or not.
Default is True.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._root_link_name = None # Name of the root link
self._n_dof = None
self._links = None
self._joints = None
self._materials = None
self._visual_only = None
self._articulation_tree = None
self._articulation_view_direct = None
# This needs to be initialized to be used for _load() of PrimitiveObject
self._prim_type = load_config["prim_type"] if load_config is not None and "prim_type" in load_config else PrimType.RIGID
assert self._prim_type in iter(PrimType), f"Unknown prim type {self._prim_type}!"
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _initialize(self):
# Run super method
super()._initialize()
# Set the default sleep threshold
self.sleep_threshold = m.DEFAULT_SLEEP_THRESHOLD
# Force populate inputs and outputs of the shaders of all materials
# We suppress errors from omni.usd if we're using encrypted assets, because we're loading from tmp location,
# not the original location
with suppress_omni_log(channels=["omni.usd"]):
for material in self.materials:
material.shader_force_populate(render=False)
# Initialize all the links
for link in self._links.values():
link.initialize()
# Update joint information
self.update_joints()
def _load(self):
# By default, this prim cannot be instantiated from scratch!
raise NotImplementedError("By default, an entity prim cannot be created from scratch.")
def _post_load(self):
# If this is a cloth, delete the root link and replace it with the single nested mesh
if self._prim_type == PrimType.CLOTH:
# Verify only a single link and a single mesh exists
old_link_prim = None
cloth_mesh_prim = None
for prim in self._prim.GetChildren():
if prim.GetPrimTypeInfo().GetTypeName() == "Xform":
assert old_link_prim is None, "Found multiple XForm links for a Cloth entity prim! Expected: 1"
old_link_prim = prim
for child in prim.GetChildren():
if child.GetPrimTypeInfo().GetTypeName() == "Mesh" and not child.HasAPI(lazy.pxr.UsdPhysics.CollisionAPI):
assert cloth_mesh_prim is None, "Found multiple meshes for a Cloth entity prim! Expected: 1"
cloth_mesh_prim = child
# Move mesh prim one level up via copy, then delete the original link
# NOTE: We copy because we cannot directly move the prim because it is ancestral
# NOTE: We use this specific delete method because alternative methods (eg: "delete_prim") fail beacuse
# the prim is ancestral. Note that because it is non-destructive, the original link prim path is still
# tracked by omni, so we have to utilize a new unique prim path for the copied cloth mesh
# See omni.kit.context_menu module for reference
new_path = f"{self._prim_path}/{old_link_prim.GetName()}_cloth"
lazy.omni.kit.commands.execute("CopyPrim", path_from=cloth_mesh_prim.GetPath(), path_to=new_path)
lazy.omni.kit.commands.execute("DeletePrims", paths=[old_link_prim.GetPath()], destructive=False)
# Setup links info FIRST before running any other post loading behavior
# We pass in scale explicitly so that the generated links can leverage the desired entity scale
self.update_links()
# Optionally set the scale
if "scale" in self._load_config and self._load_config["scale"] is not None:
self.scale = self._load_config["scale"]
# Prepare the articulation view.
if self.n_joints > 0:
# Import now to avoid too-eager load of Omni classes due to inheritance
from omnigibson.utils.deprecated_utils import ArticulationView
self._articulation_view_direct = ArticulationView(f"{self._prim_path}/{self.root_link_name}")
# Set visual only flag
# This automatically handles setting collisions / gravity appropriately per-link
self.visual_only = self._load_config["visual_only"] if \
"visual_only" in self._load_config and self._load_config["visual_only"] is not None else False
if self._prim_type == PrimType.CLOTH:
assert not self._visual_only, "Cloth cannot be visual-only."
assert len(self._links) == 1, f"Cloth entity prim can only have one link; got: {len(self._links)}"
if gm.AG_CLOTH:
self.create_attachment_point_link()
# Globally disable any requested collision links
for link_name in self.disabled_collision_link_names:
self._links[link_name].disable_collisions()
# Disable any requested collision pairs
for a_name, b_name in self.disabled_collision_pairs:
link_a, link_b = self._links[a_name], self._links[b_name]
link_a.add_filtered_collision_pair(prim=link_b)
# Run super
super()._post_load()
# Cache material information
materials = set()
material_paths = set()
for link in self._links.values():
xforms = [link] + list(link.visual_meshes.values()) if self.prim_type == PrimType.RIGID else [link]
for xform in xforms:
if xform.has_material():
mat_path = xform.material.prim_path
if mat_path not in material_paths:
materials.add(xform.material)
material_paths.add(mat_path)
self._materials = materials
def remove(self):
# First remove all joints
if self._joints is not None:
for joint in self._joints.values():
joint.remove()
# Then links
if self._links is not None:
for link in self._links.values():
link.remove()
# Finally, remove this prim
super().remove()
def update_links(self):
"""
Helper function to refresh owned joints. Useful for synchronizing internal data if
additional bodies are added manually
"""
# Make sure to clean up all pre-existing names for all links
if self._links is not None:
for link in self._links.values():
link.remove_names()
# We iterate over all children of this object's prim,
# and grab any that are presumed to be rigid bodies (i.e.: other Xforms)
joint_children = set()
links_to_create = {}
for prim in self._prim.GetChildren():
link_cls = None
link_name = prim.GetName()
if self._prim_type == PrimType.RIGID and prim.GetPrimTypeInfo().GetTypeName() == "Xform":
# For rigid body object, process prims that are Xforms (e.g. rigid links)
link_cls = RigidPrim
# Also iterate through all children to infer joints and determine the children of those joints
# We will use this info to infer which link is the base link!
for child_prim in prim.GetChildren():
if "joint" in child_prim.GetPrimTypeInfo().GetTypeName().lower():
# Store the child target of this joint
relationships = {r.GetName(): r for r in child_prim.GetRelationships()}
# Only record if this is NOT a fixed link tying us to the world (i.e.: no target for body0)
if len(relationships["physics:body0"].GetTargets()) > 0:
joint_children.add(relationships["physics:body1"].GetTargets()[0].pathString.split("/")[-1])
elif self._prim_type == PrimType.CLOTH and prim.GetPrimTypeInfo().GetTypeName() == "Mesh":
# For cloth object, process prims that are Meshes
link_cls = ClothPrim
# Keep track of all the links we will create. We can't create that just yet because we need to find
# the base link first.
if link_cls is not None:
links_to_create[link_name] = (link_cls, prim)
# Infer the correct root link name -- this corresponds to whatever link does not have any joint existing
# in the children joints
valid_root_links = list(set(links_to_create.keys()) - joint_children)
assert len(valid_root_links) == 1, f"Only a single root link should have been found for {self.name}, " \
f"but found multiple instead: {valid_root_links}"
self._root_link_name = valid_root_links[0] if len(valid_root_links) == 1 else "base_link"
# Now actually create the links
self._links = dict()
for link_name, (link_cls, prim) in links_to_create.items():
# Fixed child links of kinematic-only objects are not kinematic-only, to avoid the USD error:
# PhysicsUSD: CreateJoint - cannot create a joint between static bodies, joint prim: ...
link_load_config = {
"kinematic_only": self._load_config.get("kinematic_only", False)
if link_name == self._root_link_name else False,
"remesh": self._load_config.get("remesh", True),
}
self._links[link_name] = link_cls(
prim_path=prim.GetPrimPath().__str__(),
name=f"{self._name}:{link_name}",
load_config=link_load_config,
)
def update_joints(self):
"""
Helper function to refresh owned joints. Useful for synchronizing internal data if
additional bodies are added manually
"""
# Make sure to clean up all pre-existing names for all joints
if self._joints is not None:
for joint in self._joints.values():
joint.remove_names()
# Initialize joints dictionary
self._joints = dict()
self.update_handles()
# Handle case separately based on whether we are actually articulated or not
if self._articulation_view and not self.kinematic_only:
self._n_dof = self._articulation_view.num_dof
# Additionally grab DOF info if we have non-fixed joints
if self._n_dof > 0:
for i in range(self._articulation_view._metadata.joint_count):
# Only add the joint if it's not fixed (i.e.: it has DOFs > 0)
if self._articulation_view._metadata.joint_dof_counts[i] > 0:
joint_name = self._articulation_view._metadata.joint_names[i]
joint_dof_offset = self._articulation_view._metadata.joint_dof_offsets[i]
joint_path = self._articulation_view._dof_paths[0][joint_dof_offset]
joint = JointPrim(
prim_path=joint_path,
name=f"{self._name}:joint_{joint_name}",
articulation_view=self._articulation_view_direct,
)
joint.initialize()
self._joints[joint_name] = joint
else:
# TODO: May need to extend to clusters of rigid bodies, that aren't exactly joined
# We assume this object contains a single rigid body
self._n_dof = 0
assert self.n_joints == len(self._joints), \
f"Number of joints inferred from prim tree ({self.n_joints}) does not match number of joints " \
f"found in the articulation view ({len(self._joints)})!"
self._update_joint_limits()
self._compute_articulation_tree()
def _update_joint_limits(self):
"""
Helper function to update internal joint limits for prismatic joints based on the object's scale
"""
# If the scale is [1, 1, 1], we can skip this step
if np.allclose(self.scale, np.ones(3)):
return
prismatic_joints = {j_name: j for j_name, j in self._joints.items() if j.joint_type == JointType.JOINT_PRISMATIC}
# If there are no prismatic joints, we can skip this step
if len(prismatic_joints) == 0:
return
uniform_scale = np.allclose(self.scale, self.scale[0])
for joint_name, joint in prismatic_joints.items():
if uniform_scale:
scale_along_axis = self.scale[0]
else:
assert not self.initialized, \
"Cannot update joint limits for a non-uniformly scaled object when already initialized."
for link in self.links.values():
if joint.body0 == link.prim_path:
# Find the parent link frame orientation in the object frame
_, link_local_orn = link.get_local_pose()
# Find the joint frame orientation in the parent link frame
joint_local_orn = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(joint.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
# Compute the joint frame orientation in the object frame
joint_orn = T.quat_multiply(quaternion1=joint_local_orn, quaternion0=link_local_orn)
# assert T.check_quat_right_angle(joint_orn), \
# f"Objects that are NOT uniformly scaled requires all joints to have orientations that " \
# f"are factors of 90 degrees! Got orn: {joint_orn} for object {self.name}"
# Find the joint axis unit vector (e.g. [1, 0, 0] for "X", [0, 1, 0] for "Y", etc.)
axis_in_joint_frame = np.zeros(3)
axis_in_joint_frame[JointAxis.index(joint.axis)] = 1.0
# Compute the joint axis unit vector in the object frame
axis_in_obj_frame = T.quat2mat(joint_orn) @ axis_in_joint_frame
# Find the correct scale along the joint axis direction
scale_along_axis = self.scale[np.argmax(np.abs(axis_in_obj_frame))]
joint.lower_limit = joint.lower_limit * scale_along_axis
joint.upper_limit = joint.upper_limit * scale_along_axis
@property
def _articulation_view(self):
if self._articulation_view_direct is None:
return None
# Validate that the articulation view is initialized and that if physics is running, the
# view is valid.
if og.sim.is_playing() and self.initialized:
assert self._articulation_view_direct.is_physics_handle_valid() and \
self._articulation_view_direct._physics_view.check(), \
"Articulation view must be valid if physics is running!"
return self._articulation_view_direct
@property
def prim_type(self):
"""
Returns:
str: Type of this entity prim, one of omnigibson.utils.constants.PrimType
"""
return self._prim_type
@property
def articulated(self):
"""
Returns:
bool: Whether this prim is articulated or not
"""
# Note that this is not equivalent to self.n_joints > 0 because articulation root path is
# overridden by the object classes
return self.articulation_root_path is not None
@property
def articulation_root_path(self):
"""
Returns:
None or str: Absolute USD path to the expected prim that represents the articulation root, if it exists. By default,
this corresponds to self.prim_path
"""
return self._prim_path if self.n_joints > 0 else None
@property
def root_link_name(self):
"""
Returns:
str: Name of this entity's root link
"""
return self._root_link_name
@property
def root_link(self):
"""
Returns:
RigidPrim or ClothPrim: Root link of this object prim
"""
return self._links[self.root_link_name]
@property
def root_prim(self):
"""
Returns:
UsdPrim: Root prim object associated with the root link of this object prim
"""
# The root prim belongs to the link with name root_link_name
return self._links[self.root_link_name].prim
@property
def n_dof(self):
"""
Returns:
int: number of DoFs of the object
"""
return self._n_dof
@property
def n_joints(self):
"""
Returns:
int: Number of joints owned by this articulation
"""
if self.initialized:
num = len(self._joints)
else:
# Manually iterate over all links and check for any joints that are not fixed joints!
num = 0
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
prim_type = child_prim.GetPrimTypeInfo().GetTypeName().lower()
if "joint" in prim_type and "fixed" not in prim_type:
num += 1
return num
@cached_property
def n_fixed_joints(self):
"""
Returns:
int: Number of fixed joints owned by this articulation
"""
# Manually iterate over all links and check for any joints that are not fixed joints!
num = 0
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
prim_type = child_prim.GetPrimTypeInfo().GetTypeName().lower()
if "joint" in prim_type and "fixed" in prim_type:
num += 1
return num
@property
def n_links(self):
"""
Returns:
int: Number of links owned by this articulation
"""
return len(list(self._links.keys()))
@property
def joints(self):
"""
Returns:
dict: Dictionary mapping joint names (str) to joint prims (JointPrim) owned by this articulation
"""
return self._joints
@property
def links(self):
"""
Returns:
dict: Dictionary mapping link names (str) to link prims (RigidPrim) owned by this articulation
"""
return self._links
@cached_property
def has_attachment_points(self):
"""
Returns:
bool: Whether this object has any attachment points
"""
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
if "attachment" in child_prim.GetName():
return True
return False
def _compute_articulation_tree(self):
"""
Get a graph of the articulation tree, where nodes are link names and edges
correspond to joint names, where the joint name is accessible on the `joint_name`
data field of the edge, and the joint type on the `joint_type` field.
"""
G = nx.DiGraph()
rename_later = {}
# Add the links
for link_name, link in self.links.items():
prim_path = link.prim_path
G.add_node(prim_path)
rename_later[prim_path] = link_name
# Add the joints
children = list(self.prim.GetChildren())
while children:
child_prim = children.pop()
children.extend(child_prim.GetChildren())
prim_type = child_prim.GetPrimTypeInfo().GetTypeName()
if "Joint" in prim_type:
# Get body 0
body0_targets = child_prim.GetRelationship("physics:body0").GetTargets()
if not body0_targets:
continue
body0 = str(body0_targets[0])
# Get body 1
body1_targets = child_prim.GetRelationship("physics:body1").GetTargets()
if not body1_targets:
continue
body1 = str(body1_targets[0])
# Assert both bodies in links
if body0 not in G.nodes or body1 not in G.nodes:
continue
# Add the joint
joint_type = JointType.get_type(prim_type.split("Physics")[-1])
G.add_edge(body0, body1, joint_name=child_prim.GetName(), joint_type=joint_type)
# Relabel nodes to use link name instead of prim path
nx.relabel_nodes(G, rename_later, copy=False)
# Assert all nodes have in-degree of 1 except root
in_degrees = {node: G.in_degree(node) for node in G.nodes}
assert in_degrees[self.root_link_name] == 0, "Root link should have in-degree of 0!"
assert all([in_degrees[node] == 1 for node in G.nodes if node != self.root_link_name]), \
"All non-root links should have in-degree of 1!"
self._articulation_tree = G
@property
def articulation_tree(self):
return self._articulation_tree
@property
def materials(self):
"""
Loop through each link and their visual meshes to gather all the materials that belong to this object
Returns:
set of MaterialPrim: a set of MaterialPrim that belongs to this object
"""
return self._materials
@property
def visual_only(self):
"""
Returns:
bool: Whether this link is a visual-only link (i.e.: no gravity or collisions applied)
"""
return self._visual_only
@visual_only.setter
def visual_only(self, val):
"""
Sets the visaul only state of this link
Args:
val (bool): Whether this link should be a visual-only link (i.e.: no gravity or collisions applied)
"""
# Iterate over all owned links and set their respective visual-only properties accordingly
for link in self._links.values():
link.visual_only = val
# Also set the internal value
self._visual_only = val
def contact_list(self):
"""
Get list of all current contacts with this object prim
Returns:
list of CsRawData: raw contact info for this rigid body
"""
contacts = []
for link in self._links.values():
contacts += link.contact_list()
return contacts
def enable_gravity(self) -> None:
"""
Enables gravity for this entity
"""
for link in self._links.values():
link.enable_gravity()
def disable_gravity(self) -> None:
"""
Disables gravity for this entity
"""
for link in self._links.values():
link.disable_gravity()
def reset(self):
"""
Resets this entity to some default, pre-defined state
"""
# Make sure simulation is playing, otherwise, we cannot reset because physx requires active running
# simulation in order to set joints
assert og.sim.is_playing(), "Simulator must be playing in order to reset controllable object's joints!"
# If this is a cloth, reset the particle positions
if self.prim_type == PrimType.CLOTH:
self.root_link.reset()
# Otherwise, set all joints to have 0 position and 0 velocity if this object has joints
elif self.n_joints > 0:
self.set_joint_positions(positions=np.zeros(self.n_dof), drive=False)
self.set_joint_velocities(velocities=np.zeros(self.n_dof), drive=False)
def set_joint_positions(self, positions, indices=None, normalized=False, drive=False):
"""
Set the joint positions (both actual value and target values) in simulation. Note: only works if the simulator
is actively running!
Args:
positions (np.ndarray): positions to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @positions must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF positions to set.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint positions should be interpreted as normalized values. Default
is False
drive (bool): Whether the positions being set are values that should be driven naturally by this entity's
motors or manual values to immediately set. Default is False, corresponding to an instantaneous
setting of the positions
"""
# Run sanity checks -- make sure that we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
# Possibly de-normalize the inputs
if normalized:
positions = self._denormalize_positions(positions=positions, indices=indices)
# Set the DOF states
if drive:
self._articulation_view.set_joint_position_targets(positions, joint_indices=indices)
else:
self._articulation_view.set_joint_positions(positions, joint_indices=indices)
PoseAPI.invalidate()
def set_joint_velocities(self, velocities, indices=None, normalized=False, drive=False):
"""
Set the joint velocities (both actual value and target values) in simulation. Note: only works if the simulator
is actively running!
Args:
velocities (np.ndarray): velocities to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @velocities must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF velocities to set.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint velocities should be interpreted as normalized values. Default
is False
drive (bool): Whether the velocities being set are values that should be driven naturally by this entity's
motors or manual values to immediately set. Default is False, corresponding to an instantaneous
setting of the velocities
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
# Possibly de-normalize the inputs
if normalized:
velocities = self._denormalize_velocities(velocities=velocities, indices=indices)
# Set the DOF states
if drive:
self._articulation_view.set_joint_velocity_targets(velocities, joint_indices=indices)
else:
self._articulation_view.set_joint_velocities(velocities, joint_indices=indices)
def set_joint_efforts(self, efforts, indices=None, normalized=False):
"""
Set the joint efforts (both actual value and target values) in simulation. Note: only works if the simulator
is actively running!
Args:
efforts (np.ndarray): efforts to set. This should be n-DOF length if all joints are being set,
or k-length (k < n) if specific indices are being set. In this case, the length of @efforts must
be the same length as @indices!
indices (None or k-array): If specified, should be k (k < n) length array of specific DOF efforts to set.
Default is None, which assumes that all joints are being set.
normalized (bool): Whether the inputted joint efforts should be interpreted as normalized values. Default
is False
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
# Possibly de-normalize the inputs
if normalized:
efforts = self._denormalize_efforts(efforts=efforts, indices=indices)
# Set the DOF states
self._articulation_view.set_joint_efforts(efforts, joint_indices=indices)
def _normalize_positions(self, positions, indices=None):
"""
Normalizes raw joint positions @positions
Args:
positions (n- or k-array): n-DOF raw positions to normalize, or k (k < n) specific positions to normalize.
In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
positions to normalize. Default is None, which assumes the positions correspond to all DOF being
normalized.
Returns:
n- or k-array: normalized positions in range [-1, 1] for the specified DOFs
"""
low, high = self.joint_lower_limits, self.joint_upper_limits
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
return (positions - mean) / magnitude if indices is None else (positions - mean[indices]) / magnitude[indices]
def _denormalize_positions(self, positions, indices=None):
"""
De-normalizes joint positions @positions
Args:
positions (n- or k-array): n-DOF normalized positions or k (k < n) specific positions in range [-1, 1]
to de-normalize. In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
positions to de-normalize. Default is None, which assumes the positions correspond to all DOF being
de-normalized.
Returns:
n- or k-array: de-normalized positions for the specified DOFs
"""
low, high = self.joint_lower_limits, self.joint_upper_limits
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
return positions * magnitude + mean if indices is None else positions * magnitude[indices] + mean[indices]
def _normalize_velocities(self, velocities, indices=None):
"""
Normalizes raw joint velocities @velocities
Args:
velocities (n- or k-array): n-DOF raw velocities to normalize, or k (k < n) specific velocities to normalize.
In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
velocities to normalize. Default is None, which assumes the velocities correspond to all DOF being
normalized.
Returns:
n- or k-array: normalized velocities in range [-1, 1] for the specified DOFs
"""
return velocities / self.max_joint_velocities if indices is None else \
velocities / self.max_joint_velocities[indices]
def _denormalize_velocities(self, velocities, indices=None):
"""
De-normalizes joint velocities @velocities
Args:
velocities (n- or k-array): n-DOF normalized velocities or k (k < n) specific velocities in range [-1, 1]
to de-normalize. In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
velocities to de-normalize. Default is None, which assumes the velocities correspond to all DOF being
de-normalized.
Returns:
n- or k-array: de-normalized velocities for the specified DOFs
"""
return velocities * self.max_joint_velocities if indices is None else \
velocities * self.max_joint_velocities[indices]
def _normalize_efforts(self, efforts, indices=None):
"""
Normalizes raw joint efforts @efforts
Args:
efforts (n- or k-array): n-DOF raw efforts to normalize, or k (k < n) specific efforts to normalize.
In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
efforts to normalize. Default is None, which assumes the efforts correspond to all DOF being
normalized.
Returns:
n- or k-array: normalized efforts in range [-1, 1] for the specified DOFs
"""
return efforts / self.max_joint_efforts if indices is None else efforts / self.max_joint_efforts[indices]
def _denormalize_efforts(self, efforts, indices=None):
"""
De-normalizes joint efforts @efforts
Args:
efforts (n- or k-array): n-DOF normalized efforts or k (k < n) specific efforts in range [-1, 1]
to de-normalize. In the latter case, @indices should be specified
indices (None or k-array): If specified, should be k (k < n) DOF indices corresponding to the specific
efforts to de-normalize. Default is None, which assumes the efforts correspond to all DOF being
de-normalized.
Returns:
n- or k-array: de-normalized efforts for the specified DOFs
"""
return efforts * self.max_joint_efforts if indices is None else efforts * self.max_joint_efforts[indices]
def update_handles(self):
"""
Updates all internal handles for this prim, in case they change since initialization
"""
assert og.sim.is_playing(), "Simulator must be playing if updating handles!"
# Reinitialize the articulation view
if self._articulation_view_direct is not None:
self._articulation_view_direct.initialize(og.sim.physics_sim_view)
# Update all links and joints as well
for link in self._links.values():
if not link.initialized:
link.initialize()
link.update_handles()
for joint in self._joints.values():
if not joint.initialized:
joint.initialize()
joint.update_handles()
def get_joint_positions(self, normalized=False):
"""
Grabs this entity's joint positions
Args:
normalized (bool): Whether returned values should be normalized to range [-1, 1] based on limits or not.
Returns:
n-array: n-DOF length array of positions
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
joint_positions = self._articulation_view.get_joint_positions().reshape(self.n_dof)
# Possibly normalize values when returning
return self._normalize_positions(positions=joint_positions) if normalized else joint_positions
def get_joint_velocities(self, normalized=False):
"""
Grabs this entity's joint velocities
Args:
normalized (bool): Whether returned values should be normalized to range [-1, 1] based on limits or not.
Returns:
n-array: n-DOF length array of velocities
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
joint_velocities = self._articulation_view.get_joint_velocities().reshape(self.n_dof)
# Possibly normalize values when returning
return self._normalize_velocities(velocities=joint_velocities) if normalized else joint_velocities
def get_joint_efforts(self, normalized=False):
"""
Grabs this entity's joint efforts
Args:
normalized (bool): Whether returned values should be normalized to range [-1, 1] based on limits or not.
Returns:
n-array: n-DOF length array of efforts
"""
# Run sanity checks -- make sure we are articulated
assert self.n_joints > 0, "Tried to call method not intended for entity prim with no joints!"
joint_efforts = self._articulation_view.get_applied_joint_efforts().reshape(self.n_dof)
# Possibly normalize values when returning
return self._normalize_efforts(efforts=joint_efforts) if normalized else joint_efforts
def set_linear_velocity(self, velocity: np.ndarray):
"""
Sets the linear velocity of the root prim in stage.
Args:
velocity (np.ndarray): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
self.root_link.set_linear_velocity(velocity)
def get_linear_velocity(self):
"""
Gets the linear velocity of the root prim in stage.
Returns:
velocity (np.ndarray): linear velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
return self.root_link.get_linear_velocity()
def set_angular_velocity(self, velocity):
"""
Sets the angular velocity of the root prim in stage.
Args:
velocity (np.ndarray): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
self.root_link.set_angular_velocity(velocity)
def get_angular_velocity(self):
"""Gets the angular velocity of the root prim in stage.
Returns:
velocity (np.ndarray): angular velocity to set the rigid prim to, in the world frame. Shape (3,).
"""
return self.root_link.get_angular_velocity()
def get_relative_linear_velocity(self):
"""
Returns:
3-array: (x,y,z) Linear velocity of root link in its own frame
"""
return T.quat2mat(self.get_orientation()).T @ self.get_linear_velocity()
def get_relative_angular_velocity(self):
"""
Returns:
3-array: (ax,ay,az) angular velocity of root link in its own frame
"""
return T.quat2mat(self.get_orientation()).T @ self.get_angular_velocity()
def set_position_orientation(self, position=None, orientation=None):
# If kinematic only, clear cache for the root link
if self.kinematic_only:
self.root_link.clear_kinematic_only_cache()
# If the simulation isn't running, we should set this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
XFormPrim.set_position_orientation(self, position=position, orientation=orientation)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
self.root_link.set_position_orientation(position=position, orientation=orientation)
# Sim is running and articulation view exists, so use that physx API backend
else:
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._articulation_view.set_world_poses(position, orientation)
PoseAPI.invalidate()
def get_position_orientation(self):
# If the simulation isn't running, we should read from this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
return XFormPrim.get_position_orientation(self)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
return self.root_link.get_position_orientation()
# Sim is running and articulation view exists, so use that physx API backend
else:
positions, orientations = self._articulation_view.get_world_poses()
return positions[0], orientations[0][[1, 2, 3, 0]]
def set_local_pose(self, position=None, orientation=None):
# If kinematic only, clear cache for the root link
if self.kinematic_only:
self.root_link.clear_kinematic_only_cache()
# If the simulation isn't running, we should set this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
return XFormPrim.set_local_pose(self, position, orientation)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
self.root_link.set_local_pose(position=position, orientation=orientation)
# Sim is running and articulation view exists, so use that physx API backend
else:
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._articulation_view.set_local_poses(position, orientation)
PoseAPI.invalidate()
def get_local_pose(self):
# If the simulation isn't running, we should read from this prim's XForm (object-level) properties directly
if og.sim.is_stopped():
return XFormPrim.get_local_pose(self)
# Delegate to RigidPrim if we are not articulated
elif self._articulation_view is None:
return self.root_link.get_local_pose()
# Sim is running and articulation view exists, so use that physx API backend
else:
positions, orientations = self._articulation_view.get_local_poses()
return positions[0], orientations[0][[1, 2, 3, 0]]
# TODO: Is the omni joint damping (used for driving motors) same as dissipative joint damping (what we had in pb)?
@property
def joint_damping(self):
"""
Returns:
n-array: joint damping values for this prim
"""
return np.concatenate([joint.damping for joint in self._joints.values()])
@property
def joint_lower_limits(self):
"""
Returns:
n-array: minimum values for this robot's joints. If joint does not have a range, returns -1000
for that joint
"""
return np.array([joint.lower_limit for joint in self._joints.values()])
@property
def joint_upper_limits(self):
"""
Returns:
n-array: maximum values for this robot's joints. If joint does not have a range, returns 1000
for that joint
"""
return np.array([joint.upper_limit for joint in self._joints.values()])
@property
def joint_range(self):
"""
Returns:
n-array: joint range values for this robot's joints
"""
return self.joint_upper_limits - self.joint_lower_limits
@property
def max_joint_velocities(self):
"""
Returns:
n-array: maximum velocities for this robot's joints
"""
return np.array([joint.max_velocity for joint in self._joints.values()])
@property
def max_joint_efforts(self):
"""
Returns:
n-array: maximum efforts for this robot's joints
"""
return np.array([joint.max_effort for joint in self._joints.values()])
@property
def joint_position_limits(self):
"""
Returns:
2-tuple:
- n-array: min joint position limits, where each is an n-DOF length array
- n-array: max joint position limits, where each is an n-DOF length array
"""
return self.joint_lower_limits, self.joint_upper_limits
@property
def joint_velocity_limits(self):
"""
Returns:
2-tuple:
- n-array: min joint velocity limits, where each is an n-DOF length array
- n-array: max joint velocity limits, where each is an n-DOF length array
"""
return -self.max_joint_velocities, self.max_joint_velocities
@property
def joint_effort_limits(self):
"""
Returns:
2-tuple:
- n-array: min joint effort limits, where each is an n-DOF length array
- n-array: max joint effort limits, where each is an n-DOF length array
"""
return -self.max_joint_efforts, self.max_joint_efforts
@property
def joint_at_limits(self):
"""
Returns:
n-array: n-DOF length array specifying whether joint is at its limit,
with 1.0 --> at limit, otherwise 0.0
"""
return 1.0 * (np.abs(self.get_joint_positions(normalized=True)) > 0.99)
@property
def joint_has_limits(self):
"""
Returns:
n-array: n-DOF length array specifying whether joint has a limit or not
"""
return np.array([j.has_limit for j in self._joints.values()])
@property
def disabled_collision_link_names(self):
"""
Returns:
list of str: List of link names for this entity whose collisions should be globally disabled
"""
return []
@property
def disabled_collision_pairs(self):
"""
Returns:
list of (str, str): List of rigid body collision pairs to disable within this object prim.
Default is an empty list (no pairs)
"""
return []
@property
def scale(self):
# For the EntityPrim (object) level, @self.scale represents the scale with respect to the original scale of
# the link (RigidPrim or ClothPrim), which might not be uniform ([1, 1, 1]) itself.
return self.root_link.scale / self.root_link.original_scale
@scale.setter
def scale(self, scale):
# For the EntityPrim (object) level, @self.scale represents the scale with respect to the original scale of
# the link (RigidPrim or ClothPrim), which might not be uniform ([1, 1, 1]) itself.
# We iterate over all rigid bodies owned by this object prim and set their individual scales
# We do this because omniverse cannot scale orientation of an articulated prim, so we get mesh mismatches as
# they rotate in the world.
for link in self._links.values():
link.scale = scale * link.original_scale
@property
def solver_position_iteration_count(self):
"""
Returns:
int: How many position iterations to take per physics step by the physx solver
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:solverPositionIterationCount") if \
self.articulated else self.root_link.solver_position_iteration_count
@solver_position_iteration_count.setter
def solver_position_iteration_count(self, count):
"""
Sets how many position iterations to take per physics step by the physx solver
Args:
count (int): How many position iterations to take per physics step by the physx solver
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:solverPositionIterationCount", count)
else:
for link in self._links.values():
link.solver_position_iteration_count = count
@property
def solver_velocity_iteration_count(self):
"""
Returns:
int: How many velocity iterations to take per physics step by the physx solver
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:solverVelocityIterationCount") if \
self.articulated else self.root_link.solver_velocity_iteration_count
@solver_velocity_iteration_count.setter
def solver_velocity_iteration_count(self, count):
"""
Sets how many velocity iterations to take per physics step by the physx solver
Args:
count (int): How many velocity iterations to take per physics step by the physx solver
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:solverVelocityIterationCount", count)
else:
for link in self._links.values():
link.solver_velocity_iteration_count = count
@property
def stabilization_threshold(self):
"""
Returns:
float: threshold for stabilizing this articulation
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:stabilizationThreshold") if \
self.articulated else self.root_link.stabilization_threshold
@stabilization_threshold.setter
def stabilization_threshold(self, threshold):
"""
Sets threshold for stabilizing this articulation
Args:
threshold (float): Stabilization threshold
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:stabilizationThreshold", threshold)
else:
for link in self._links.values():
link.stabilization_threshold = threshold
@property
def is_asleep(self):
"""
Returns:
bool: whether this entity is asleep or not
"""
# If we're kinematic only, immediately return False since it doesn't follow the sleep / wake paradigm
if self.kinematic_only:
return False
else:
return og.sim.psi.is_sleeping(og.sim.stage_id, lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.articulation_root_path)) \
if self.articulated else self.root_link.is_asleep
@property
def sleep_threshold(self):
"""
Returns:
float: threshold for sleeping this articulation
"""
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:sleepThreshold") if \
self.articulated else self.root_link.sleep_threshold
@sleep_threshold.setter
def sleep_threshold(self, threshold):
"""
Sets threshold for sleeping this articulation
Args:
threshold (float): Sleeping threshold
"""
if self.articulated:
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:sleepThreshold", threshold)
else:
for link in self._links.values():
link.sleep_threshold = threshold
@property
def self_collisions(self):
"""
Returns:
bool: Whether self-collisions are enabled for this prim or not
"""
assert self.articulated, "Cannot get self-collision for non-articulated EntityPrim!"
return lazy.omni.isaac.core.utils.prims.get_prim_property(self.articulation_root_path, "physxArticulation:enabledSelfCollisions")
@self_collisions.setter
def self_collisions(self, flag):
"""
Sets whether self-collisions are enabled for this prim or not
Args:
flag (bool): Whether self collisions are enabled for this prim or not
"""
assert self.articulated, "Cannot set self-collision for non-articulated EntityPrim!"
lazy.omni.isaac.core.utils.prims.set_prim_property(self.articulation_root_path, "physxArticulation:enabledSelfCollisions", flag)
@property
def kinematic_only(self):
"""
Returns:
bool: Whether this object is a kinematic-only object (otherwise, it is a rigid body). A kinematic-only
object is not subject to simulator dynamics, and remains fixed unless the user explicitly sets the
body's pose / velocities. See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics/rigid-bodies.html?highlight=rigid%20body%20enabled#kinematic-rigid-bodies
for more information
"""
return self.root_link.kinematic_only
@property
def aabb(self):
# If we're a cloth prim type, we compute the bounding box from the limits of the particles. Otherwise, use the
# normal method for computing bounding box
if self._prim_type == PrimType.CLOTH:
particle_contact_offset = self.root_link.cloth_system.particle_contact_offset
particle_positions = self.root_link.compute_particle_positions()
aabb_lo, aabb_hi = np.min(particle_positions, axis=0) - particle_contact_offset, \
np.max(particle_positions, axis=0) + particle_contact_offset
else:
points_world = [link.collision_boundary_points_world for link in self._links.values()]
all_points = np.concatenate([p for p in points_world if p is not None], axis=0)
aabb_lo = np.min(all_points, axis=0)
aabb_hi = np.max(all_points, axis=0)
return aabb_lo, aabb_hi
@property
def aabb_extent(self):
"""
Get this xform's actual bounding box extent
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.aabb
return max_corner - min_corner
@property
def aabb_center(self):
"""
Get this xform's actual bounding box center
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.aabb
return (max_corner + min_corner) / 2.0
def get_coriolis_and_centrifugal_forces(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N,) shaped per-DOF coriolis and centrifugal forces experienced by the entity, if articulated
"""
assert self.articulated, "Cannot get coriolis and centrifugal forces for non-articulated entity!"
return self._articulation_view.get_coriolis_and_centrifugal_forces(clone=clone).reshape(self.n_dof)
def get_generalized_gravity_forces(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N, N) shaped per-DOF gravity forces, if articulated
"""
assert self.articulated, "Cannot get generalized gravity forces for non-articulated entity!"
return self._articulation_view.get_generalized_gravity_forces(clone=clone).reshape(self.n_dof)
def get_mass_matrix(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N, N) shaped per-DOF mass matrix, if articulated
"""
assert self.articulated, "Cannot get mass matrix for non-articulated entity!"
return self._articulation_view.get_mass_matrices(clone=clone).reshape(self.n_dof, self.n_dof)
def get_jacobian(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N_links - 1 [+ 1], 6, N_dof [+ 6]) shaped per-link jacobian, if articulated. Note that the first
dimension is +1 and the final dimension is +6 if the entity does not have a fixed base
(i.e.: there is an additional "floating" joint tying the robot to the world frame)
"""
assert self.articulated, "Cannot get jacobian for non-articulated entity!"
return self._articulation_view.get_jacobians(clone=clone).squeeze(axis=0)
def get_relative_jacobian(self, clone=True):
"""
Args:
clone (bool): Whether to clone the underlying tensor buffer or not
Returns:
n-array: (N_links - 1 [+ 1], 6, N_dof [+ 6]) shaped per-link relative jacobian, if articulated (expressed in
this entity's base frame). Note that the first dimension is +1 and the final dimension is +6 if the
entity does not have a fixed base (i.e.: there is an additional "floating" joint tying the robot to
the world frame)
"""
jac = self.get_jacobian(clone=clone)
ori_t = T.quat2mat(self.get_orientation()).T.astype(np.float32)
tf = np.zeros((1, 6, 6), dtype=np.float32)
tf[:, :3, :3] = ori_t
tf[:, 3:, 3:] = ori_t
return tf @ jac
def wake(self):
"""
Enable physics for this articulation
"""
if self.articulated:
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.wake_up(og.sim.stage_id, prim_id)
else:
for link in self._links.values():
link.wake()
def sleep(self):
"""
Disable physics for this articulation
"""
if self.articulated:
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.put_to_sleep(og.sim.stage_id, prim_id)
else:
for link in self._links.values():
link.sleep()
def keep_still(self):
"""
Zero out all velocities for this prim
"""
self.set_linear_velocity(velocity=np.zeros(3))
self.set_angular_velocity(velocity=np.zeros(3))
for joint in self._joints.values():
joint.keep_still()
# Make sure object is awake
self.wake()
def create_attachment_point_link(self):
"""
Create a collision-free, invisible attachment point link for the cloth object, and create an attachment between
the ClothPrim and this attachment point link (RigidPrim).
One use case for this is that we can create a fixed joint between this link and the world to enable AG fo cloth.
During simulation, this joint will move and match the robot gripper frame, which will then drive the cloth.
"""
assert self._prim_type == PrimType.CLOTH, "create_attachment_point_link should only be called for Cloth"
link_name = "attachment_point"
stage = lazy.omni.isaac.core.utils.stage.get_current_stage()
link_prim = stage.DefinePrim(f"{self._prim_path}/{link_name}", "Xform")
vis_prim = lazy.pxr.UsdGeom.Sphere.Define(stage, f"{self._prim_path}/{link_name}/visuals").GetPrim()
col_prim = lazy.pxr.UsdGeom.Sphere.Define(stage, f"{self._prim_path}/{link_name}/collisions").GetPrim()
# Set the radius to be 0.03m. In theory, we want this radius to be as small as possible. Otherwise, the cloth
# dynamics will be unrealistic. However, in practice, if the radius is too small, the attachment becomes very
# unstable. Empirically 0.03m works reasonably well.
vis_prim.GetAttribute("radius").Set(0.03)
col_prim.GetAttribute("radius").Set(0.03)
# Need to sync the extents
extent = vis_prim.GetAttribute("extent").Get()
extent[0] = lazy.pxr.Gf.Vec3f(-0.03, -0.03, -0.03)
extent[1] = lazy.pxr.Gf.Vec3f(0.03, 0.03, 0.03)
vis_prim.GetAttribute("extent").Set(extent)
col_prim.GetAttribute("extent").Set(extent)
# Add collision API to collision geom
lazy.pxr.UsdPhysics.CollisionAPI.Apply(col_prim)
lazy.pxr.UsdPhysics.MeshCollisionAPI.Apply(col_prim)
lazy.pxr.PhysxSchema.PhysxCollisionAPI.Apply(col_prim)
# Create a attachment point link
link = RigidPrim(
prim_path=link_prim.GetPrimPath().__str__(),
name=f"{self._name}:{link_name}",
)
link.disable_collisions()
# TODO (eric): Should we disable gravity for this link?
# link.disable_gravity()
link.visible = False
# Set a very small mass
link.mass = 1e-6
link.density = 0.0
self._links[link_name] = link
# Create an attachment between the root link (ClothPrim) and the newly created attachment point link (RigidPrim)
attachment_path = self.root_link.prim.GetPath().AppendElementString("attachment")
lazy.omni.kit.commands.execute("CreatePhysicsAttachment", target_attachment_path=attachment_path,
actor0_path=self.root_link.prim.GetPath(), actor1_path=link.prim.GetPath())
def _dump_state(self):
# We don't call super, instead, this state is simply the root link state and all joint states
state = dict(root_link=self.root_link._dump_state())
joint_state = dict()
for prim_name, prim in self._joints.items():
joint_state[prim_name] = prim._dump_state()
state["joints"] = joint_state
return state
def _load_state(self, state):
# Load base link state and joint states
self.root_link._load_state(state=state["root_link"])
for joint_name, joint_state in state["joints"].items():
self._joints[joint_name]._load_state(state=joint_state)
# Make sure this object is awake
self.wake()
def _serialize(self, state):
# We serialize by first flattening the root link state and then iterating over all joints and
# adding them to the a flattened array
state_flat = [self.root_link.serialize(state=state["root_link"])]
if self.n_joints > 0:
state_flat.append(
np.concatenate(
[prim.serialize(state=state["joints"][prim_name]) for prim_name, prim in self._joints.items()]
)
)
return np.concatenate(state_flat).astype(float)
def _deserialize(self, state):
# We deserialize by first de-flattening the root link state and then iterating over all joints and
# sequentially grabbing from the flattened state array, incrementing along the way
idx = self.root_link.state_size
state_dict = dict(root_link=self.root_link.deserialize(state=state[:idx]))
joint_state_dict = dict()
for prim_name, prim in self._joints.items():
joint_state_dict[prim_name] = prim.deserialize(state=state[idx:idx+prim.state_size])
idx += prim.state_size
state_dict["joints"] = joint_state_dict
return state_dict, idx
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
# Subclass must implement this method for duplication functionality
raise NotImplementedError("Subclass must implement _create_prim_with_same_kwargs() to enable duplication "
"functionality for EntityPrim!")
| 65,273 | Python | 42.285146 | 192 | 0.61563 |
StanfordVL/OmniGibson/omnigibson/prims/material_prim.py | import numpy as np
import asyncio
import os
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.utils.physx_utils import bind_material
from omnigibson.prims.prim_base import BasePrim
class MaterialPrim(BasePrim):
"""
Provides high level functions to deal with a material prim and its attributes/ properties.
If there is a material prim present at the path, it will use it. Otherwise, a new material prim at
the specified prim path will be created.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. Subclasses should define the exact keys expected
for their class. For this material prim, the below values can be specified:
mdl_name (None or str): If specified, should be the name of the mdl preset to load (including .mdl).
None results in default, "OmniPBR.mdl"
mtl_name (None or str): If specified, should be the name of the mtl preset to load.
None results in default, "OmniPBR"
"""
# Persistent dictionary of materials, mapped from prim_path to MaterialPrim
MATERIALS = dict()
@classmethod
def get_material(cls, name, prim_path, load_config=None):
"""
Get a material prim from the persistent dictionary of materials, or create a new one if it doesn't exist.
Args:
name (str): Name for the object.
prim_path (str): prim path of the MaterialPrim.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists.
Returns:
MaterialPrim: Material prim at the specified path
"""
# If the material already exists, return it
if prim_path in cls.MATERIALS:
return cls.MATERIALS[prim_path]
# Otherwise, create a new one and return it
new_material = cls(prim_path=prim_path, name=name, load_config=load_config)
cls.MATERIALS[prim_path] = new_material
return new_material
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._shader = None
# Users of this material: should be a set of BaseObject and BaseSystem
self._users = set()
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# We create a new material at the specified path
mtl_created = []
lazy.omni.kit.commands.execute(
"CreateAndBindMdlMaterialFromLibrary",
mdl_name="OmniPBR.mdl" if self._load_config.get("mdl_name", None) is None else self._load_config["mdl_name"],
mtl_name="OmniPBR" if self._load_config.get("mtl_name", None) is None else self._load_config["mtl_name"],
mtl_created_list=mtl_created,
)
material_path = mtl_created[0]
# Move prim to desired location
lazy.omni.kit.commands.execute("MovePrim", path_from=material_path, path_to=self._prim_path)
# Return generated material
return lazy.omni.isaac.core.utils.prims.get_prim_at_path(self._prim_path)
@classmethod
def clear(cls):
cls.MATERIALS = dict()
@property
def users(self):
"""
Users of this material: should be a list of BaseObject and BaseSystem
"""
return self._users
def add_user(self, user):
"""
Adds a user to the material. This can be a BaseObject or BaseSystem.
Args:
user (BaseObject or BaseSystem): User to add to the material
"""
self._users.add(user)
def remove_user(self, user):
"""
Removes a user from the material. This can be a BaseObject or BaseSystem.
If there are no users left, the material will be removed.
Args:
user (BaseObject or BaseSystem): User to remove from the material
"""
self._users.remove(user)
if len(self._users) == 0:
self.remove()
def remove(self):
# Remove from global sensors dictionary
self.MATERIALS.pop(self._prim_path)
# Run super
super().remove()
def _post_load(self):
# run super first
super()._post_load()
# Add this material to the list of global materials
self.MATERIALS[self._prim_path] = self
# Generate shader reference
self._shader = lazy.omni.usd.get_shader_from_material(self._prim)
def bind(self, target_prim_path):
"""
Bind this material to an arbitrary prim (usually a visual mesh prim)
Args:
target_prim_path (str): prim path of the Prim to bind to
"""
bind_material(prim_path=target_prim_path, material_path=self.prim_path)
async def _load_mdl_parameters(self, render=True):
"""
Loads MDL parameters internally so they can be accessed by our class instance
Args:
render (bool): If True, takes a rendering step before loading the mdl parameters.
Note that a rendering step is necessary to load these parameters, though if a step has already
occurred externally, no additional rendering step is needed
"""
if render:
og.sim.render()
await lazy.omni.usd.get_context().load_mdl_parameters_for_prim_async(self._shader)
def shader_force_populate(self, render=True):
"""
Force populate inputs and outputs of the shader
Args:
render (bool): If True, takes a rendering step before force populating the inputs and outputs.
Note that a rendering step is necessary to load these I/Os, though if a step has already
occurred externally, no additional rendering step is needed
"""
assert self._shader is not None
asyncio.run(self._load_mdl_parameters(render=render))
def shader_update_asset_paths_with_root_path(self, root_path):
"""
Similar to @shader_update_asset_paths, except in this case, root_path is explicitly provided by the caller.
Args:
root_path (str): root to be pre-appended to the original asset paths
"""
for inp_name in self.shader_input_names_by_type("SdfAssetPath"):
inp = self.get_input(inp_name)
# If the input doesn't have any path, skip
if inp is None:
continue
original_path = inp.path if inp.resolvedPath == "" else inp.resolvedPath
# If the input has an empty path, skip
if original_path == "":
continue
new_path = os.path.join(root_path, original_path)
self.set_input(inp_name, new_path)
def get_input(self, inp):
"""
Grabs the input with corresponding name @inp associated with this material and shader
Args:
inp (str): Name of the shader input whose value will be grabbed
Returns:
any: value of the requested @inp
"""
return self._shader.GetInput(inp).Get()
def set_input(self, inp, val):
"""
Sets the input with corresponding name @inp associated with this material and shader
Args:
inp (str): Name of the shader input whose value will be set
val (any): Value to set for the input. This should be the valid type for that attribute.
"""
# Make sure the input exists first, so we avoid segfaults with "invalid null prim"
assert inp in self.shader_input_names, \
f"Got invalid shader input to set! Current inputs are: {self.shader_input_names}. Got: {inp}"
self._shader.GetInput(inp).Set(val)
@property
def is_glass(self):
"""
Returns:
bool: Whether this material is a glass material or not
"""
return "glass_color" in self.shader_input_names
@property
def shader(self):
"""
Returns:
Usd.Shade: Shader associated with this material
"""
return self._shader
@property
def shader_input_names(self):
"""
Returns:
set: All the shader input names associated with this material
"""
return {inp.GetBaseName() for inp in self._shader.GetInputs()}
def shader_input_names_by_type(self, input_type):
"""
Args:
input_type (str): input type
Returns:
set: All the shader input names associated with this material that match the given input type
"""
return {inp.GetBaseName() for inp in self._shader.GetInputs() if inp.GetTypeName().cppTypeName == input_type}
@property
def diffuse_color_constant(self):
"""
Returns:
3-array: this material's applied (R,G,B) color
"""
return np.array(self.get_input(inp="diffuse_color_constant"))
@diffuse_color_constant.setter
def diffuse_color_constant(self, color):
"""
Args:
color (3-array): this material's applied (R,G,B) color
"""
self.set_input(inp="diffuse_color_constant", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def diffuse_texture(self):
"""
Returns:
str: this material's applied diffuse_texture filepath
"""
return self.get_input(inp="diffuse_texture").resolvedPath
@diffuse_texture.setter
def diffuse_texture(self, fpath):
"""
Args:
str: this material's applied diffuse_texture filepath
"""
self.set_input(inp="diffuse_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def albedo_desaturation(self):
"""
Returns:
float: this material's applied albedo_desaturation
"""
return self.get_input(inp="albedo_desaturation")
@albedo_desaturation.setter
def albedo_desaturation(self, desaturation):
"""
Args:
desaturation (float): this material's applied albedo_desaturation
"""
self.set_input(inp="albedo_desaturation", val=desaturation)
@property
def albedo_add(self):
"""
Returns:
float: this material's applied albedo_add
"""
return self.get_input(inp="albedo_add")
@albedo_add.setter
def albedo_add(self, add):
"""
Args:
add (float): this material's applied albedo_add
"""
self.set_input(inp="albedo_add", val=add)
@property
def albedo_brightness(self):
"""
Returns:
float: this material's applied albedo_brightness
"""
return self.get_input(inp="albedo_brightness")
@albedo_brightness.setter
def albedo_brightness(self, brightness):
"""
Args:
brightness (float): this material's applied albedo_brightness
"""
self.set_input(inp="albedo_brightness", val=brightness)
@property
def diffuse_tint(self):
"""
Returns:
3-array: this material's applied (R,G,B) diffuse_tint
"""
return np.array(self.get_input(inp="diffuse_tint"))
@diffuse_tint.setter
def diffuse_tint(self, color):
"""
Args:
color (3-array): this material's applied (R,G,B) diffuse_tint
"""
self.set_input(inp="diffuse_tint", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def reflection_roughness_constant(self):
"""
Returns:
float: this material's applied reflection_roughness_constant
"""
return self.get_input(inp="reflection_roughness_constant")
@reflection_roughness_constant.setter
def reflection_roughness_constant(self, roughness):
"""
Args:
roughness (float): this material's applied reflection_roughness_constant
"""
self.set_input(inp="reflection_roughness_constant", val=roughness)
@property
def reflection_roughness_texture_influence(self):
"""
Returns:
float: this material's applied reflection_roughness_texture_influence
"""
return self.get_input(inp="reflection_roughness_texture_influence")
@reflection_roughness_texture_influence.setter
def reflection_roughness_texture_influence(self, prop):
"""
Args:
prop (float): this material's applied reflection_roughness_texture_influence proportion
"""
self.set_input(inp="reflection_roughness_texture_influence", val=prop)
@property
def reflectionroughness_texture(self):
"""
Returns:
None or str: this material's applied reflectionroughness_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="reflectionroughness_texture")
return None if inp is None else inp.resolvedPath
@reflectionroughness_texture.setter
def reflectionroughness_texture(self, fpath):
"""
Args:
fpath (str): this material's applied reflectionroughness_texture fpath
"""
self.set_input(inp="reflectionroughness_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def metallic_constant(self):
"""
Returns:
float: this material's applied metallic_constant
"""
return self.get_input(inp="metallic_constant")
@metallic_constant.setter
def metallic_constant(self, constant):
"""
Args:
constant (float): this material's applied metallic_constant
"""
self.set_input(inp="metallic_constant", val=constant)
@property
def metallic_texture_influence(self):
"""
Returns:
float: this material's applied metallic_texture_influence
"""
return self.get_input(inp="metallic_texture_influence")
@metallic_texture_influence.setter
def metallic_texture_influence(self, prop):
"""
Args:
prop (float): this material's applied metallic_texture_influence
"""
self.set_input(inp="metallic_texture_influence", val=prop)
@property
def metallic_texture(self):
"""
Returns:
None or str: this material's applied metallic_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="metallic_texture")
return None if inp is None else inp.resolvedPath
@metallic_texture.setter
def metallic_texture(self, fpath):
"""
Args:
fpath (str): this material's applied metallic_texture fpath
"""
self.set_input(inp="metallic_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def specular_level(self):
"""
Returns:
float: this material's applied specular_level
"""
return self.get_input(inp="specular_level")
@specular_level.setter
def specular_level(self, level):
"""
Args:
level (float): this material's applied specular_level
"""
self.set_input(inp="specular_level", val=level)
@property
def enable_ORM_texture(self):
"""
Returns:
bool: this material's applied enable_ORM_texture
"""
return self.get_input(inp="enable_ORM_texture")
@enable_ORM_texture.setter
def enable_ORM_texture(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_ORM_texture
"""
self.set_input(inp="enable_ORM_texture", val=enabled)
@property
def ORM_texture(self):
"""
Returns:
None or str: this material's applied ORM_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="ORM_texture")
return None if inp is None else inp.resolvedPath
@ORM_texture.setter
def ORM_texture(self, fpath):
"""
Args:
fpath (str): this material's applied ORM_texture fpath
"""
self.set_input(inp="ORM_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def ao_to_diffuse(self):
"""
Returns:
float: this material's applied ao_to_diffuse
"""
return self.get_input(inp="ao_to_diffuse")
@ao_to_diffuse.setter
def ao_to_diffuse(self, val):
"""
Args:
val (float): this material's applied ao_to_diffuse
"""
self.set_input(inp="ao_to_diffuse", val=val)
@property
def ao_texture(self):
"""
Returns:
None or str: this material's applied ao_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="ao_texture")
return None if inp is None else inp.resolvedPath
@ao_texture.setter
def ao_texture(self, fpath):
"""
Args:
fpath (str): this material's applied ao_texture fpath
"""
self.set_input(inp="ao_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def enable_emission(self):
"""
Returns:
bool: this material's applied enable_emission
"""
return self.get_input(inp="enable_emission")
@enable_emission.setter
def enable_emission(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_emission
"""
self.set_input(inp="enable_emission", val=enabled)
@property
def emissive_color(self):
"""
Returns:
3-array: this material's applied (R,G,B) emissive_color
"""
return np.array(self.get_input(inp="emissive_color"))
@emissive_color.setter
def emissive_color(self, color):
"""
Args:
color (3-array): this material's applied emissive_color
"""
self.set_input(inp="emissive_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def emissive_color_texture(self):
"""
Returns:
None or str: this material's applied emissive_color_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="emissive_color_texture")
return None if inp is None else inp.resolvedPath
@emissive_color_texture.setter
def emissive_color_texture(self, fpath):
"""
Args:
fpath (str): this material's applied emissive_color_texture fpath
"""
self.set_input(inp="emissive_color_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def emissive_mask_texture(self):
"""
Returns:
None or str: this material's applied emissive_mask_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="emissive_mask_texture")
return None if inp is None else inp.resolvedPath
@emissive_mask_texture.setter
def emissive_mask_texture(self, fpath):
"""
Args:
fpath (str): this material's applied emissive_mask_texture fpath
"""
self.set_input(inp="emissive_mask_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def emissive_intensity(self):
"""
Returns:
float: this material's applied emissive_intensity
"""
return self.get_input(inp="emissive_intensity")
@emissive_intensity.setter
def emissive_intensity(self, intensity):
"""
Args:
intensity (float): this material's applied emissive_intensity
"""
self.set_input(inp="emissive_intensity", val=intensity)
@property
def enable_opacity(self):
"""
Returns:
bool: this material's applied enable_opacity
"""
return self.get_input(inp="enable_opacity")
@enable_opacity.setter
def enable_opacity(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_opacity
"""
self.set_input(inp="enable_opacity", val=enabled)
@property
def enable_opacity_texture(self):
"""
Returns:
bool: this material's applied enable_opacity_texture
"""
return self.get_input(inp="enable_opacity_texture")
@enable_opacity_texture.setter
def enable_opacity_texture(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_opacity_texture
"""
self.set_input(inp="enable_opacity_texture", val=enabled)
@property
def opacity_constant(self):
"""
Returns:
float: this material's applied opacity_constant
"""
return self.get_input(inp="opacity_constant")
@opacity_constant.setter
def opacity_constant(self, constant):
"""
Args:
constant (float): this material's applied opacity_constant
"""
self.set_input(inp="opacity_constant", val=constant)
@property
def opacity_texture(self):
"""
Returns:
None or str: this material's applied opacity_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="opacity_texture")
return None if inp is None else inp.resolvedPath
@opacity_texture.setter
def opacity_texture(self, fpath):
"""
Args:
fpath (str): this material's applied opacity_texture fpath
"""
self.set_input(inp="opacity_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def opacity_mode(self):
"""
Returns:
int: this material's applied opacity_mode
"""
return self.get_input(inp="opacity_mode")
@opacity_mode.setter
def opacity_mode(self, mode):
"""
Args:
mode (int): this material's applied opacity_mode
"""
self.set_input(inp="opacity_mode", val=mode)
@property
def opacity_threshold(self):
"""
Returns:
float: this material's applied opacity_threshold
"""
return self.get_input(inp="opacity_threshold")
@opacity_threshold.setter
def opacity_threshold(self, threshold):
"""
Args:
threshold (float): this material's applied opacity_threshold
"""
self.set_input(inp="opacity_threshold", val=threshold)
@property
def bump_factor(self):
"""
Returns:
float: this material's applied bump_factor
"""
return self.get_input(inp="bump_factor")
@bump_factor.setter
def bump_factor(self, factor):
"""
Args:
factor (float): this material's applied bump_factor
"""
self.set_input(inp="bump_factor", val=factor)
@property
def normalmap_texture(self):
"""
Returns:
None or str: this material's applied normalmap_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="normalmap_texture")
return None if inp is None else inp.resolvedPath
@normalmap_texture.setter
def normalmap_texture(self, fpath):
"""
Args:
fpath (str): this material's applied normalmap_texture fpath
"""
self.set_input(inp="normalmap_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def detail_bump_factor(self):
"""
Returns:
float: this material's applied detail_bump_factor
"""
return self.get_input(inp="detail_bump_factor")
@detail_bump_factor.setter
def detail_bump_factor(self, factor):
"""
Args:
factor (float): this material's applied detail_bump_factor
"""
self.set_input(inp="detail_bump_factor", val=factor)
@property
def detail_normalmap_texture(self):
"""
Returns:
None or str: this material's applied detail_normalmap_texture fpath if there is a texture applied, else
None
"""
inp = self.get_input(inp="detail_normalmap_texture")
return None if inp is None else inp.resolvedPath
@detail_normalmap_texture.setter
def detail_normalmap_texture(self, fpath):
"""
Args:
fpath (str): this material's applied detail_normalmap_texture fpath
"""
self.set_input(inp="detail_normalmap_texture", val=lazy.pxr.Sdf.AssetPath(fpath))
@property
def flip_tangent_u(self):
"""
Returns:
bool: this material's applied flip_tangent_u
"""
return self.get_input(inp="flip_tangent_u")
@flip_tangent_u.setter
def flip_tangent_u(self, flipped):
"""
Args:
flipped (bool): this material's applied flip_tangent_u
"""
self.set_input(inp="flip_tangent_u", val=flipped)
@property
def flip_tangent_v(self):
"""
Returns:
bool: this material's applied flip_tangent_v
"""
return self.get_input(inp="flip_tangent_v")
@flip_tangent_v.setter
def flip_tangent_v(self, flipped):
"""
Args:
flipped (bool): this material's applied flip_tangent_v
"""
self.set_input(inp="flip_tangent_v", val=flipped)
@property
def project_uvw(self):
"""
Returns:
bool: this material's applied project_uvw
"""
return self.get_input(inp="project_uvw")
@project_uvw.setter
def project_uvw(self, projected):
"""
Args:
projected (bool): this material's applied project_uvw
"""
self.set_input(inp="project_uvw", val=projected)
@property
def world_or_object(self):
"""
Returns:
bool: this material's applied world_or_object
"""
return self.get_input(inp="world_or_object")
@world_or_object.setter
def world_or_object(self, val):
"""
Args:
val (bool): this material's applied world_or_object
"""
self.set_input(inp="world_or_object", val=val)
@property
def uv_space_index(self):
"""
Returns:
int: this material's applied uv_space_index
"""
return self.get_input(inp="uv_space_index")
@uv_space_index.setter
def uv_space_index(self, index):
"""
Args:
index (int): this material's applied uv_space_index
"""
self.set_input(inp="uv_space_index", val=index)
@property
def texture_translate(self):
"""
Returns:
2-array: this material's applied texture_translate
"""
return np.array(self.get_input(inp="texture_translate"))
@texture_translate.setter
def texture_translate(self, translate):
"""
Args:
translate (2-array): this material's applied (x,y) texture_translate
"""
self.set_input(inp="texture_translate", val=lazy.pxr.Gf.Vec2f(*np.array(translate, dtype=float)))
@property
def texture_rotate(self):
"""
Returns:
float: this material's applied texture_rotate
"""
return self.get_input(inp="texture_rotate")
@texture_rotate.setter
def texture_rotate(self, rotate):
"""
Args:
rotate (float): this material's applied texture_rotate
"""
self.set_input(inp="texture_rotate", val=rotate)
@property
def texture_scale(self):
"""
Returns:
2-array: this material's applied texture_scale
"""
return np.array(self.get_input(inp="texture_scale"))
@texture_scale.setter
def texture_scale(self, scale):
"""
Args:
scale (2-array): this material's applied (x,y) texture_scale
"""
self.set_input(inp="texture_scale", val=lazy.pxr.Gf.Vec2f(*np.array(scale, dtype=float)))
@property
def detail_texture_translate(self):
"""
Returns:
2-array: this material's applied detail_texture_translate
"""
return np.array(self.get_input(inp="detail_texture_translate"))
@detail_texture_translate.setter
def detail_texture_translate(self, translate):
"""
Args:
translate (2-array): this material's applied detail_texture_translate
"""
self.set_input(inp="detail_texture_translate", val=lazy.pxr.Gf.Vec2f(*np.array(translate, dtype=float)))
@property
def detail_texture_rotate(self):
"""
Returns:
float: this material's applied detail_texture_rotate
"""
return self.get_input(inp="detail_texture_rotate")
@detail_texture_rotate.setter
def detail_texture_rotate(self, rotate):
"""
Args:
rotate (float): this material's applied detail_texture_rotate
"""
self.set_input(inp="detail_texture_rotate", val=rotate)
@property
def detail_texture_scale(self):
"""
Returns:
2-array: this material's applied detail_texture_scale
"""
return np.array(self.get_input(inp="detail_texture_scale"))
@detail_texture_scale.setter
def detail_texture_scale(self, scale):
"""
Args:
scale (2-array): this material's applied detail_texture_scale
"""
self.set_input(inp="detail_texture_scale", val=lazy.pxr.Gf.Vec2f(*np.array(scale, dtype=float)))
@property
def exclude_from_white_mode(self):
"""
Returns:
bool: this material's applied excludeFromWhiteMode
"""
return self.get_input(inp="excludeFromWhiteMode")
@exclude_from_white_mode.setter
def exclude_from_white_mode(self, exclude):
"""
Args:
exclude (bool): this material's applied excludeFromWhiteMode
"""
self.set_input(inp="excludeFromWhiteMode", val=exclude)
@property
def diffuse_reflection_weight(self):
"""
Returns:
float: this material's applied diffuse_reflection_weight
"""
return self.get_input(inp="diffuse_reflection_weight")
@diffuse_reflection_weight.setter
def diffuse_reflection_weight(self, weight):
"""
Args:
weight (float): this material's applied diffuse_reflection_weight
"""
self.set_input(inp="diffuse_reflection_weight", val=weight)
@property
def enable_specular_transmission(self):
"""
Returns:
bool: this material's applied enable_specular_transmission
"""
return self.get_input(inp="enable_specular_transmission")
@enable_specular_transmission.setter
def enable_specular_transmission(self, enabled):
"""
Args:
enabled (bool): this material's applied enable_specular_transmission
"""
self.set_input(inp="enable_specular_transmission", val=enabled)
@property
def specular_transmission_weight(self):
"""
Returns:
float: this material's applied specular_transmission_weight
"""
return self.get_input(inp="specular_transmission_weight")
@specular_transmission_weight.setter
def specular_transmission_weight(self, weight):
"""
Args:
weight (float): this material's applied specular_transmission_weight
"""
self.set_input(inp="specular_transmission_weight", val=weight)
@property
def diffuse_reflection_color(self):
"""
Returns:
3-array: this material's diffuse_reflection_color in (R,G,B)
"""
return np.array(self.get_input(inp="diffuse_reflection_color"))
@diffuse_reflection_color.setter
def diffuse_reflection_color(self, color):
"""
Args:
color (3-array): this material's diffuse_reflection_color in (R,G,B)
"""
self.set_input(inp="diffuse_reflection_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_reflection_color(self):
"""
Returns:
3-array: this material's specular_reflection_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_reflection_color"))
@specular_reflection_color.setter
def specular_reflection_color(self, color):
"""
Args:
color (3-array): this material's specular_reflection_color in (R,G,B)
"""
self.set_input(inp="specular_reflection_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_transmission_color(self):
"""
Returns:
3-array: this material's specular_transmission_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_transmission_color"))
@specular_transmission_color.setter
def specular_transmission_color(self, color):
"""
Args:
color (3-array): this material's specular_transmission_color in (R,G,B)
"""
self.set_input(inp="specular_transmission_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_transmission_scattering_color(self):
"""
Returns:
3-array: this material's specular_transmission_scattering_color in (R,G,B)
"""
return np.array(self.get_input(inp="specular_transmission_scattering_color"))
@specular_transmission_scattering_color.setter
def specular_transmission_scattering_color(self, color):
"""
Args:
color (3-array): this material's specular_transmission_scattering_color in (R,G,B)
"""
self.set_input(inp="specular_transmission_scattering_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
@property
def specular_reflection_ior_preset(self):
"""
Returns:
int: this material's specular_reflection_ior_preset (int corresponding to enum)
"""
return self.get_input(inp="specular_reflection_ior_preset")
@specular_reflection_ior_preset.setter
def specular_reflection_ior_preset(self, preset):
"""
Args:
preset (int): this material's specular_reflection_ior_preset (int corresponding to enum)
"""
self.set_input(inp="specular_reflection_ior_preset", val=preset)
@property
def enable_diffuse_transmission(self):
"""
Returns:
float: this material's applied enable_diffuse_transmission
"""
return self.get_input(inp="enable_diffuse_transmission")
@enable_diffuse_transmission.setter
def enable_diffuse_transmission(self, val):
"""
Args:
val (bool): this material's applied enable_diffuse_transmission
"""
self.set_input(inp="enable_diffuse_transmission", val=val)
@property
def glass_color(self):
"""
Returns:
3-array: this material's applied (R,G,B) glass color (only applicable to OmniGlass materials)
"""
assert self.is_glass, f"Tried to query glass_color shader input, " \
f"but material at {self.prim_path} is not an OmniGlass material!"
return np.array(self.get_input(inp="glass_color"))
@glass_color.setter
def glass_color(self, color):
"""
Args:
color (3-array): this material's applied (R,G,B) glass color (only applicable to OmniGlass materials)
"""
assert self.is_glass, f"Tried to set glass_color shader input, " \
f"but material at {self.prim_path} is not an OmniGlass material!"
self.set_input(inp="glass_color", val=lazy.pxr.Gf.Vec3f(*np.array(color, dtype=float)))
| 36,853 | Python | 31.271454 | 122 | 0.595908 |
StanfordVL/OmniGibson/omnigibson/prims/__init__.py | from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.cloth_prim import ClothPrim
from omnigibson.prims.entity_prim import EntityPrim
from omnigibson.prims.geom_prim import GeomPrim, VisualGeomPrim, CollisionGeomPrim, CollisionVisualGeomPrim
from omnigibson.prims.joint_prim import JointPrim
from omnigibson.prims.rigid_prim import RigidPrim
from omnigibson.prims.xform_prim import XFormPrim
| 408 | Python | 50.124994 | 107 | 0.865196 |
StanfordVL/OmniGibson/omnigibson/prims/prim_base.py | from abc import ABC, abstractmethod
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.utils.python_utils import Serializable, UniquelyNamed, Recreatable
from omnigibson.utils.sim_utils import check_deletable_prim
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
class BasePrim(Serializable, UniquelyNamed, Recreatable, ABC):
"""
Provides high level functions to deal with a basic prim and its attributes/ properties.
If there is an Xform prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created.
Note: the prim will have "xformOp:orient", "xformOp:translate" and "xformOp:scale" only post init,
unless it is a non-root articulation link.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. Subclasses should define the exact keys expected
for their class.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
self._prim_path = prim_path
self._name = name
self._load_config = dict() if load_config is None else load_config
# Other values that will be filled in at runtime
self._applied_visual_material = None
self._loaded = False # Whether this prim exists in the stage or not
self._initialized = False # Whether this prim has its internal handles / info initialized or not (occurs AFTER and INDEPENDENTLY from loading!)
self._prim = None
self._state_size = None
self._n_duplicates = 0 # Simple counter for keeping track of duplicates for unique name indexing
# Run super init
super().__init__()
# Run some post-loading steps if this prim has already been loaded
if lazy.omni.isaac.core.utils.prims.is_prim_path_valid(prim_path=self._prim_path):
log.debug(f"prim {name} already exists, skipping load")
self._prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=self._prim_path)
self._loaded = True
# Run post load.
self._post_load()
def _initialize(self):
"""
Initializes state of this object and sets up any references necessary post-loading. Should be implemented by
sub-class for extended utility
"""
pass
def initialize(self):
"""
Initializes state of this object and sets up any references necessary post-loading. Subclasses should
implement / extend the _initialize() method.
"""
assert not self._initialized, \
f"Prim {self.name} at prim_path {self._prim_path} can only be initialized once! (It is already initialized)"
self._initialize()
# Cache state size
self._state_size = len(self.dump_state(serialized=True))
self._initialized = True
def load(self):
"""
Load this prim into omniverse, and return loaded prim reference.
Returns:
Usd.Prim: Prim object loaded into the simulator
"""
if self._loaded:
raise ValueError(f"Cannot load prim {self.name} multiple times.")
# Load prim
self._prim = self._load()
self._loaded = True
# Run any post-loading logic
self._post_load()
return self._prim
def _post_load(self):
"""
Any actions that should be taken (e.g.: modifying the object's properties such as scale, visibility, additional
joints, etc.) that should be taken after loading the raw object into omniverse but BEFORE we initialize the
object and grab its handles and internal references. By default, this is a no-op.
"""
pass
def remove(self):
"""
Removes this prim from omniverse stage.
"""
if not self._loaded:
raise ValueError("Cannot remove a prim that was never loaded.")
# Remove prim if it can be deleted
if check_deletable_prim(self.prim_path):
lazy.omni.isaac.core.utils.prims.delete_prim(self.prim_path)
# Also clear the name so we can reuse this later
self.remove_names()
def _load(self):
"""
Loads the raw prim into the simulator. Any post-processing should be done in @self._post_load()
"""
raise NotImplementedError()
@property
def loaded(self):
return self._loaded
@property
def initialized(self):
return self._initialized
@property
def state_size(self):
# This is the cached value
return self._state_size
@property
def prim_path(self):
"""
Returns:
str: prim path in the stage.
"""
return self._prim_path
@property
def name(self):
"""
Returns:
str: unique name assigned to this prim
"""
return self._name
@property
def prim(self):
"""
Returns:
Usd.Prim: USD Prim object that this object holds.
"""
return self._prim
@property
def property_names(self):
"""
Returns:
set of str: Set of property names that this prim has (e.g.: visibility, proxyPrim, etc.)
"""
return set(self._prim.GetPropertyNames())
@property
def visible(self):
"""
Returns:
bool: true if the prim is visible in stage. false otherwise.
"""
return lazy.pxr.UsdGeom.Imageable(self.prim).ComputeVisibility(lazy.pxr.Usd.TimeCode.Default()) != lazy.pxr.UsdGeom.Tokens.invisible
@visible.setter
def visible(self, visible):
"""
Sets the visibility of the prim in stage.
Args:
visible (bool): flag to set the visibility of the usd prim in stage.
"""
imageable = lazy.pxr.UsdGeom.Imageable(self.prim)
if visible:
imageable.MakeVisible()
else:
imageable.MakeInvisible()
return
def is_valid(self):
"""
Returns:
bool: True is the current prim path corresponds to a valid prim in stage. False otherwise.
"""
return lazy.omni.isaac.core.utils.prims.is_prim_path_valid(self.prim_path)
def change_prim_path(self, new_prim_path):
"""
Moves prim from the old path to a new one.
Args:
new_prim_path (str): new path of the prim to be moved to.
"""
lazy.omni.isaac.core.utils.prims.move_prim(path_from=self.prim_path, path_to=new_prim_path)
self._prim_path = new_prim_path
self._prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(self._prim_path)
return
def get_attribute(self, attr):
"""
Get this prim's attribute. Should be a valid attribute under self._prim.GetAttributes()
Returns:
any: value of the requested @attribute
"""
return self._prim.GetAttribute(attr).Get()
def set_attribute(self, attr, val):
"""
Set this prim's attribute. Should be a valid attribute under self._prim.GetAttributes()
Args:
attr (str): Attribute to set
val (any): Value to set for the attribute. This should be the valid type for that attribute.
"""
self._prim.GetAttribute(attr).Set(val)
def get_property(self, prop):
"""
Sets property @prop with value @val
Args:
prop (str): Name of the property to get. See Raw USD Properties in the GUI for examples of property names
Returns:
any: Property value
"""
self._prim.GetProperty(prop).Get()
def set_property(self, prop, val):
"""
Sets property @prop with value @val
Args:
prop (str): Name of the property to set. See Raw USD Properties in the GUI for examples of property names
val (any): Value to set for the property. Should be valid for that property
"""
self._prim.GetProperty(prop).Set(val)
def get_custom_data(self):
"""
Get custom data associated with this prim
Returns:
dict: Dictionary of any custom information
"""
return self._prim.GetCustomData()
def _create_prim_with_same_kwargs(self, prim_path, name, load_config):
"""
Generates a new instance of this prim's class with specified @prim_path, @name, and @load_config, but otherwise
all other kwargs should be identical to this instance's values.
Args:
prim_path (str): Absolute path to the newly generated prim
name (str): Name for the newly created prim
load_config (dict): Keyword-mapped kwargs to use to set specific attributes for the created prim's instance
Returns:
BasePrim: Generated prim object (not loaded, and not initialized!)
"""
return self.__class__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def duplicate(self, prim_path):
"""
Duplicates this object, and generates a new instance at @prim_path.
Note that the created object is automatically loaded into the simulator, but is NOT initialized
until a sim step occurs!
Args:
prim_path (str): Absolute path to the newly generated prim
Returns:
BasePrim: Generated prim object
"""
new_prim = self._create_prim_with_same_kwargs(
prim_path=prim_path,
name=f"{self.name}_copy{self._n_duplicates}",
load_config=self._load_config,
)
og.sim.import_object(new_prim, register=False)
# Increment duplicate count
self._n_duplicates += 1
# Set visibility
new_prim.visible = self.visible
return new_prim
| 10,456 | Python | 32.732258 | 177 | 0.605681 |
StanfordVL/OmniGibson/omnigibson/prims/geom_prim.py | from functools import cached_property
import numpy as np
import trimesh
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.utils.python_utils import assert_valid_key
from omnigibson.utils.usd_utils import PoseAPI, mesh_prim_shape_to_trimesh_mesh
import omnigibson.utils.transform_utils as T
class GeomPrim(XFormPrim):
"""
Provides high level functions to deal with a geom prim and its attributes / properties.
If there is an geom prim present at the path, it will use it. By default, a geom prim cannot be directly
created from scratch.at
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. For this mesh prim, the below values can be specified:
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# This should not be called, because this prim cannot be instantiated from scratch!
raise NotImplementedError("By default, a geom prim cannot be created from scratch.")
def _post_load(self):
# run super first
super()._post_load()
# By default, GeomPrim shows up in the rendering.
self.purpose = "default"
def duplicate(self, prim_path):
# Cannot directly duplicate a mesh prim
raise NotImplementedError("Cannot directly duplicate a geom prim!")
@property
def purpose(self):
"""
Returns:
str: the purpose used for this geom, one of {"default", "render", "proxy", "guide"}
"""
return self.get_attribute("purpose")
@purpose.setter
def purpose(self, purpose):
"""
Sets the purpose of this geom
Args:
purpose (str): the purpose used for this geom, one of {"default", "render", "proxy", "guide"}
"""
self.set_attribute("purpose", purpose)
@property
def color(self):
"""
Returns:
None or 3-array: If set, the default RGB color used for this visual geom
"""
if self.has_material():
return self.material.diffuse_color_constant
else:
color = self.get_attribute("primvars:displayColor")
return None if color is None else np.array(color)[0]
@color.setter
def color(self, rgb):
"""
Sets the RGB color of this visual mesh
Args:
3-array: The default RGB color used for this visual geom
"""
if self.has_material():
self.material.diffuse_color_constant = rgb
else:
self.set_attribute("primvars:displayColor", np.array(rgb))
@property
def opacity(self):
"""
Returns:
None or float: If set, the default opacity used for this visual geom
"""
if self.has_material():
return self.material.opacity_constant
else:
opacity = self.get_attribute("primvars:displayOpacity")
return None if opacity is None else np.array(opacity)[0]
@opacity.setter
def opacity(self, opacity):
"""
Sets the opacity of this visual mesh
Args:
opacity: The default opacity used for this visual geom
"""
if self.has_material():
self.material.opacity_constant = opacity
else:
self.set_attribute("primvars:displayOpacity", np.array([opacity]))
@property
def points(self):
"""
Returns:
np.ndarray: Local poses of all points
"""
# If the geom is a mesh we can directly return its points.
mesh = self.prim
mesh_type = mesh.GetPrimTypeInfo().GetTypeName()
if mesh_type == "Mesh":
# If the geom is a mesh we can directly return its points.
return np.array(self.prim.GetAttribute("points").Get())
else:
# Return the vertices of the trimesh
return np.array(mesh_prim_shape_to_trimesh_mesh(mesh).vertices)
@property
def points_in_parent_frame(self):
points = self.points
if points is None:
return None
position, orientation = self.get_local_pose()
scale = self.scale
points_scaled = points * scale
points_rotated = np.dot(T.quat2mat(orientation), points_scaled.T).T
points_transformed = points_rotated + position
return points_transformed
@property
def aabb(self):
world_pose_w_scale = PoseAPI.get_world_pose_with_scale(self.prim_path)
# transform self.points into world frame
points = self.points
points_homogeneous = np.hstack((points, np.ones((points.shape[0], 1))))
points_transformed = (points_homogeneous @ world_pose_w_scale.T)[:,:3]
aabb_lo = np.min(points_transformed, axis=0)
aabb_hi = np.max(points_transformed, axis=0)
return aabb_lo, aabb_hi
@property
def aabb_extent(self):
"""
Bounding box extent of this geom prim
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.aabb
return max_corner - min_corner
@property
def aabb_center(self):
"""
Bounding box center of this geom prim
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.aabb
return (max_corner + min_corner) / 2.0
@cached_property
def extent(self):
"""
Returns:
np.ndarray: The unscaled 3d extent of the mesh in its local frame.
"""
points = self.points
return np.max(points, axis=0) - np.min(points, axis=0)
class CollisionGeomPrim(GeomPrim):
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Store values created at runtime
self._collision_api = None
self._mesh_collision_api = None
self._physx_collision_api = None
self._applied_physics_material = None
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _post_load(self):
# run super first
super()._post_load()
# By default, CollisionGeomPrim does not show up in the rendering.
self.purpose = "guide"
# Create API references
self._collision_api = lazy.pxr.UsdPhysics.CollisionAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.UsdPhysics.CollisionAPI) else lazy.pxr.UsdPhysics.CollisionAPI.Apply(self._prim)
self._physx_collision_api = lazy.pxr.PhysxSchema.PhysxCollisionAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxCollisionAPI) else lazy.pxr.PhysxSchema.PhysxCollisionAPI.Apply(self._prim)
# Optionally add mesh collision API if this is a mesh
if self._prim.GetPrimTypeInfo().GetTypeName() == "Mesh":
self._mesh_collision_api = lazy.pxr.UsdPhysics.MeshCollisionAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.UsdPhysics.MeshCollisionAPI) else lazy.pxr.UsdPhysics.MeshCollisionAPI.Apply(self._prim)
# Set the approximation to be convex hull by default
self.set_collision_approximation(approximation_type="convexHull")
@property
def collision_enabled(self):
"""
Returns:
bool: Whether collisions are enabled for this collision mesh
"""
return self.get_attribute("physics:collisionEnabled")
@collision_enabled.setter
def collision_enabled(self, enabled):
"""
Sets whether collisions are enabled for this mesh
Args:
enabled (bool): Whether collisions should be enabled for this mesh
"""
# Currently, trying to toggle while simulator is playing while using GPU dynamics results in a crash, so we
# assert that the sim is stopped here
if self._initialized and gm.USE_GPU_DYNAMICS:
assert og.sim.is_stopped(), "Cannot toggle collisions while using GPU dynamics unless simulator is stopped!"
self.set_attribute("physics:collisionEnabled", enabled)
# TODO: Maybe this should all be added to RigidPrim instead?
def set_contact_offset(self, offset):
"""
Args:
offset (float): Contact offset of a collision shape. Allowed range [maximum(0, rest_offset), 0].
Default value is -inf, means default is picked by simulation based on the shape extent.
"""
self._physx_collision_api.GetContactOffsetAttr().Set(offset)
return
def get_contact_offset(self):
"""
Returns:
float: contact offset of the collision shape.
"""
return self._physx_collision_api.GetContactOffsetAttr().Get()
def set_rest_offset(self, offset):
"""
Args:
offset (float): Rest offset of a collision shape. Allowed range [-max_float, contact_offset.
Default value is -inf, means default is picked by simulatiion. For rigid bodies its zero.
"""
self._physx_collision_api.GetRestOffsetAttr().Set(offset)
return
def get_rest_offset(self):
"""
Returns:
float: rest offset of the collision shape.
"""
return self._physx_collision_api.GetRestOffsetAttr().Get()
def set_torsional_patch_radius(self, radius):
"""
Args:
radius (float): radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
self._physx_collision_api.GetTorsionalPatchRadiusAttr().Set(radius)
return
def get_torsional_patch_radius(self):
"""
Returns:
float: radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
return self._physx_collision_api.GetTorsionalPatchRadiusAttr().Get()
def set_min_torsional_patch_radius(self, radius):
"""
Args:
radius (float): minimum radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
self._physx_collision_api.GetMinTorsionalPatchRadiusAttr().Set(radius)
return
def get_min_torsional_patch_radius(self):
"""
Returns:
float: minimum radius of the contact patch used to apply torsional friction. Allowed range [0, max_float].
"""
return self._physx_collision_api.GetMinTorsionalPatchRadiusAttr().Get()
def set_collision_approximation(self, approximation_type):
"""
Args:
approximation_type (str): approximation used for collision.
Can be one of: {"none", "convexHull", "convexDecomposition", "meshSimplification", "sdf",
"boundingSphere", "boundingCube"}
If None, the approximation will use the underlying triangle mesh.
"""
assert self._mesh_collision_api is not None, "collision_approximation only applicable for meshes!"
assert_valid_key(
key=approximation_type,
valid_keys={"none", "convexHull", "convexDecomposition", "meshSimplification", "sdf", "boundingSphere", "boundingCube"},
name="collision approximation type",
)
# Make sure to add the appropriate API if we're setting certain values
if approximation_type == "convexHull" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxConvexHullCollisionAPI):
lazy.pxr.PhysxSchema.PhysxConvexHullCollisionAPI.Apply(self._prim)
elif approximation_type == "convexDecomposition" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxConvexDecompositionCollisionAPI):
lazy.pxr.PhysxSchema.PhysxConvexDecompositionCollisionAPI.Apply(self._prim)
elif approximation_type == "meshSimplification" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI):
lazy.pxr.PhysxSchema.PhysxTriangleMeshSimplificationCollisionAPI.Apply(self._prim)
elif approximation_type == "sdf" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxSDFMeshCollisionAPI):
lazy.pxr.PhysxSchema.PhysxSDFMeshCollisionAPI.Apply(self._prim)
elif approximation_type == "none" and not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxTriangleMeshCollisionAPI):
lazy.pxr.PhysxSchema.PhysxTriangleMeshCollisionAPI.Apply(self._prim)
if approximation_type == "convexHull":
pch_api = lazy.pxr.PhysxSchema.PhysxConvexHullCollisionAPI(self._prim)
# Also make sure the maximum vertex count is 60 (max number compatible with GPU)
# https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics/rigid-bodies.html#collision-settings
if pch_api.GetHullVertexLimitAttr().Get() is None:
pch_api.CreateHullVertexLimitAttr()
pch_api.GetHullVertexLimitAttr().Set(60)
self._mesh_collision_api.GetApproximationAttr().Set(approximation_type)
def get_collision_approximation(self):
"""
Returns:
str: approximation used for collision, could be "none", "convexHull" or "convexDecomposition"
"""
assert self._mesh_collision_api is not None, "collision_approximation only applicable for meshes!"
return self._mesh_collision_api.GetApproximationAttr().Get()
def apply_physics_material(self, physics_material, weaker_than_descendants=False):
"""
Used to apply physics material to the held prim and optionally its descendants.
Args:
physics_material (PhysicsMaterial): physics material to be applied to the held prim. This where you want to
define friction, restitution..etc. Note: if a physics material is not
defined, the defaults will be used from PhysX.
weaker_than_descendants (bool, optional): True if the material shouldn't override the descendants
materials, otherwise False. Defaults to False.
"""
if self._binding_api is None:
if self._prim.HasAPI(lazy.pxr.UsdShade.MaterialBindingAPI):
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI(self.prim)
else:
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI.Apply(self.prim)
if weaker_than_descendants:
self._binding_api.Bind(
physics_material.material,
bindingStrength=lazy.pxr.UsdShade.Tokens.weakerThanDescendants,
materialPurpose="physics",
)
else:
self._binding_api.Bind(
physics_material.material,
bindingStrength=lazy.pxr.UsdShade.Tokens.strongerThanDescendants,
materialPurpose="physics",
)
self._applied_physics_material = physics_material
return
def get_applied_physics_material(self):
"""
Returns the current applied physics material in case it was applied using apply_physics_material or not.
Returns:
PhysicsMaterial: the current applied physics material.
"""
if self._binding_api is None:
if self._prim.HasAPI(lazy.pxr.UsdShade.MaterialBindingAPI):
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI(self.prim)
else:
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI.Apply(self.prim)
if self._applied_physics_material is not None:
return self._applied_physics_material
else:
physics_binding = self._binding_api.GetDirectBinding(materialPurpose="physics")
path = physics_binding.GetMaterialPath()
if path == "":
return None
else:
self._applied_physics_material = lazy.omni.isaac.core.materials.PhysicsMaterial(prim_path=path)
return self._applied_physics_material
class VisualGeomPrim(GeomPrim):
pass
class CollisionVisualGeomPrim(CollisionGeomPrim, VisualGeomPrim):
def _post_load(self):
# run super first
super()._post_load()
# The purpose should be default, not guide as set by CollisionGeomPrim
# this is to make sure the geom is visualized, even though it's also collidable
self.purpose = "default"
| 17,061 | Python | 38.133027 | 148 | 0.624465 |
StanfordVL/OmniGibson/omnigibson/prims/rigid_prim.py | from functools import cached_property
from scipy.spatial import ConvexHull, QhullError
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import gm, create_module_macros
from omnigibson.prims.xform_prim import XFormPrim
from omnigibson.prims.geom_prim import CollisionGeomPrim, VisualGeomPrim
from omnigibson.utils.constants import GEOM_TYPES
from omnigibson.utils.sim_utils import CsRawData
from omnigibson.utils.usd_utils import PoseAPI, get_mesh_volume_and_com, check_extent_radius_ratio
import omnigibson.utils.transform_utils as T
from omnigibson.utils.ui_utils import create_module_logger
# Create module logger
log = create_module_logger(module_name=__name__)
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_CONTACT_OFFSET = 0.001
m.DEFAULT_REST_OFFSET = 0.0
class RigidPrim(XFormPrim):
"""
Provides high level functions to deal with a rigid body prim and its attributes/ properties.
If there is an prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created.
Notes: if the prim does not already have a rigid body api applied to it before it is loaded,
it will apply it.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. For this joint prim, the below values can be
specified:
scale (None or float or 3-array): If specified, sets the scale for this object. A single number corresponds
to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.
mass (None or float): If specified, mass of this body in kg
density (None or float): If specified, density of this body in kg / m^3
visual_only (None or bool): If specified, whether this prim should include collisions or not.
Default is True.
kinematic_only (None or bool): If specified, whether this prim should be kinematic-only or not.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._rigid_prim_view_direct = None
self._cs = None # Contact sensor interface
self._body_name = None
self._visual_only = None
self._collision_meshes = None
self._visual_meshes = None
# Caches for kinematic-only objects
# This exists because RigidPrimView uses USD pose read, which is very slow
self._kinematic_world_pose_cache = None
self._kinematic_local_pose_cache = None
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _post_load(self):
# Create the view
# Import now to avoid too-eager load of Omni classes due to inheritance
from omnigibson.utils.deprecated_utils import RigidPrimView
self._rigid_prim_view_direct = RigidPrimView(self._prim_path)
# Set it to be kinematic if necessary
kinematic_only = "kinematic_only" in self._load_config and self._load_config["kinematic_only"]
self.set_attribute("physics:kinematicEnabled", kinematic_only)
self.set_attribute("physics:rigidBodyEnabled", not kinematic_only)
# run super first
super()._post_load()
# Apply rigid body and mass APIs
if not self._prim.HasAPI(lazy.pxr.UsdPhysics.RigidBodyAPI):
lazy.pxr.UsdPhysics.RigidBodyAPI.Apply(self._prim)
if not self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxRigidBodyAPI):
lazy.pxr.PhysxSchema.PhysxRigidBodyAPI.Apply(self._prim)
if not self._prim.HasAPI(lazy.pxr.UsdPhysics.MassAPI):
lazy.pxr.UsdPhysics.MassAPI.Apply(self._prim)
# Only create contact report api if we're not visual only
if not self._visual_only:
lazy.pxr.PhysxSchema.PhysxContactReportAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxContactReportAPI) else \
lazy.pxr.PhysxSchema.PhysxContactReportAPI.Apply(self._prim)
# Store references to owned visual / collision meshes
# We iterate over all children of this object's prim,
# and grab any that are presumed to be meshes
self.update_meshes()
# Possibly set the mass / density
if not self.has_collision_meshes:
# A meta (virtual) link has no collision meshes; set a negligible mass and a zero density (ignored)
self.mass = 1e-6
self.density = 0.0
elif "mass" in self._load_config and self._load_config["mass"] is not None:
self.mass = self._load_config["mass"]
if "density" in self._load_config and self._load_config["density"] is not None:
self.density = self._load_config["density"]
# Set the visual-only attribute
# This automatically handles setting collisions / gravity appropriately
self.visual_only = self._load_config["visual_only"] if \
"visual_only" in self._load_config and self._load_config["visual_only"] is not None else False
# Create contact sensor
self._cs = lazy.omni.isaac.sensor._sensor.acquire_contact_sensor_interface()
# self._create_contact_sensor()
def _initialize(self):
# Run super method first
super()._initialize()
# Initialize all owned meshes
for mesh_group in (self._collision_meshes, self._visual_meshes):
for mesh in mesh_group.values():
mesh.initialize()
# Get contact info first
if self.contact_reporting_enabled:
self._cs.get_rigid_body_raw_data(self._prim_path)
# Grab handle to this rigid body and get name
self.update_handles()
self._body_name = self.prim_path.split("/")[-1]
def remove(self):
# First remove the meshes
if self._collision_meshes is not None:
for collision_mesh in self._collision_meshes.values():
collision_mesh.remove()
# Make sure to clean up all pre-existing names for all visual_meshes
if self._visual_meshes is not None:
for visual_mesh in self._visual_meshes.values():
visual_mesh.remove()
# Then self
super().remove()
def update_meshes(self):
"""
Helper function to refresh owned visual and collision meshes. Useful for synchronizing internal data if
additional bodies are added manually
"""
# Make sure to clean up all pre-existing names for all collision_meshes
if self._collision_meshes is not None:
for collision_mesh in self._collision_meshes.values():
collision_mesh.remove_names()
# Make sure to clean up all pre-existing names for all visual_meshes
if self._visual_meshes is not None:
for visual_mesh in self._visual_meshes.values():
visual_mesh.remove_names()
self._collision_meshes, self._visual_meshes = dict(), dict()
prims_to_check = []
coms, vols = [], []
for prim in self._prim.GetChildren():
prims_to_check.append(prim)
for child in prim.GetChildren():
prims_to_check.append(child)
for prim in prims_to_check:
mesh_type = prim.GetPrimTypeInfo().GetTypeName()
if mesh_type in GEOM_TYPES:
mesh_name, mesh_path = prim.GetName(), prim.GetPrimPath().__str__()
mesh_prim = lazy.omni.isaac.core.utils.prims.get_prim_at_path(prim_path=mesh_path)
is_collision = mesh_prim.HasAPI(lazy.pxr.UsdPhysics.CollisionAPI)
mesh_kwargs = {"prim_path": mesh_path, "name": f"{self._name}:{'collision' if is_collision else 'visual'}_{mesh_name}"}
if is_collision:
mesh = CollisionGeomPrim(**mesh_kwargs)
# We also modify the collision mesh's contact and rest offsets, since omni's default values result
# in lightweight objects sometimes not triggering contacts correctly
mesh.set_contact_offset(m.DEFAULT_CONTACT_OFFSET)
mesh.set_rest_offset(m.DEFAULT_REST_OFFSET)
self._collision_meshes[mesh_name] = mesh
volume, com = get_mesh_volume_and_com(mesh_prim)
# We need to transform the volume and CoM from the mesh's local frame to the link's local frame
local_pos, local_orn = mesh.get_local_pose()
vols.append(volume * np.product(mesh.scale))
coms.append(T.quat2mat(local_orn) @ (com * mesh.scale) + local_pos)
# If the ratio between the max extent and min radius is too large (i.e. shape too oblong), use
# boundingCube approximation for the underlying collision approximation for GPU compatibility
if not check_extent_radius_ratio(mesh_prim):
log.warning(f"Got overly oblong collision mesh: {mesh.name}; use boundingCube approximation")
mesh.set_collision_approximation("boundingCube")
else:
self._visual_meshes[mesh_name] = VisualGeomPrim(**mesh_kwargs)
# If we have any collision meshes, we aggregate their center of mass and volume values to set the center of mass
# for this link
if len(coms) > 0:
com = (np.array(coms) * np.array(vols).reshape(-1, 1)).sum(axis=0) / np.sum(vols)
self.set_attribute("physics:centerOfMass", lazy.pxr.Gf.Vec3f(*com))
def enable_collisions(self):
"""
Enable collisions for this RigidPrim
"""
# Iterate through all owned collision meshes and toggle on their collisions
for col_mesh in self._collision_meshes.values():
col_mesh.collision_enabled = True
def disable_collisions(self):
"""
Disable collisions for this RigidPrim
"""
# Iterate through all owned collision meshes and toggle off their collisions
for col_mesh in self._collision_meshes.values():
col_mesh.collision_enabled = False
def update_handles(self):
"""
Updates all internal handles for this prim, in case they change since initialization
"""
# We only do this for non-kinematic objects, because while the USD APIs for kinematic-only
# and dynamic objects are the same, physx tensor APIs do NOT exist for kinematic-only
# objects, meaning initializing the view actively breaks the view.
if not self.kinematic_only:
self._rigid_prim_view_direct.initialize(og.sim.physics_sim_view)
def contact_list(self):
"""
Get list of all current contacts with this rigid body
Returns:
list of CsRawData: raw contact info for this rigid body
"""
# Make sure we have the ability to grab contacts for this object
contacts = []
if self.contact_reporting_enabled:
raw_data = self._cs.get_rigid_body_raw_data(self._prim_path)
for c in raw_data:
# convert handles to prim paths for comparison
c = [*c] # CsRawData enforces body0 and body1 types to be ints, but we want strings
c[2] = self._cs.decode_body_name(c[2])
c[3] = self._cs.decode_body_name(c[3])
contacts.append(CsRawData(*c))
return contacts
def set_linear_velocity(self, velocity):
"""
Sets the linear velocity of the prim in stage.
Args:
velocity (np.ndarray): linear velocity to set the rigid prim to. Shape (3,).
"""
self._rigid_prim_view.set_linear_velocities(velocity[None, :])
def get_linear_velocity(self):
"""
Returns:
np.ndarray: current linear velocity of the the rigid prim. Shape (3,).
"""
return self._rigid_prim_view.get_linear_velocities()[0]
def set_angular_velocity(self, velocity):
"""
Sets the angular velocity of the prim in stage.
Args:
velocity (np.ndarray): angular velocity to set the rigid prim to. Shape (3,).
"""
self._rigid_prim_view.set_angular_velocities(velocity[None, :])
def get_angular_velocity(self):
"""
Returns:
np.ndarray: current angular velocity of the the rigid prim. Shape (3,).
"""
return self._rigid_prim_view.get_angular_velocities()[0]
def set_position_orientation(self, position=None, orientation=None):
# Invalidate kinematic-only object pose caches when new pose is set
if self.kinematic_only:
self.clear_kinematic_only_cache()
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._rigid_prim_view.set_world_poses(positions=position, orientations=orientation)
PoseAPI.invalidate()
def get_position_orientation(self):
# Return cached pose if we're kinematic-only
if self.kinematic_only and self._kinematic_world_pose_cache is not None:
return self._kinematic_world_pose_cache
pos, ori = self._rigid_prim_view.get_world_poses()
assert np.isclose(np.linalg.norm(ori), 1, atol=1e-3), \
f"{self.prim_path} orientation {ori} is not a unit quaternion."
pos = pos[0]
ori = ori[0][[1, 2, 3, 0]]
if self.kinematic_only:
self._kinematic_world_pose_cache = (pos, ori)
return pos, ori
def set_local_pose(self, position=None, orientation=None):
# Invalidate kinematic-only object pose caches when new pose is set
if self.kinematic_only:
self.clear_kinematic_only_cache()
if position is not None:
position = np.asarray(position)[None, :]
if orientation is not None:
orientation = np.asarray(orientation)[None, [3, 0, 1, 2]]
self._rigid_prim_view.set_local_poses(position, orientation)
PoseAPI.invalidate()
def get_local_pose(self):
# Return cached pose if we're kinematic-only
if self.kinematic_only and self._kinematic_local_pose_cache is not None:
return self._kinematic_local_pose_cache
positions, orientations = self._rigid_prim_view.get_local_poses()
positions = positions[0]
orientations = orientations[0][[1, 2, 3, 0]]
if self.kinematic_only:
self._kinematic_local_pose_cache = (positions, orientations)
return positions, orientations
@property
def _rigid_prim_view(self):
if self._rigid_prim_view_direct is None:
return None
# Validate that the if physics is running, the view is valid.
if not self.kinematic_only and og.sim.is_playing() and self.initialized:
assert self._rigid_prim_view_direct.is_physics_handle_valid() and \
self._rigid_prim_view_direct._physics_view.check(), \
"Rigid prim view must be valid if physics is running!"
assert not (og.sim.is_playing() and not self._rigid_prim_view_direct.is_valid), \
"Rigid prim view must be valid if physics is running!"
return self._rigid_prim_view_direct
@property
def body_name(self):
"""
Returns:
str: Name of this body
"""
return self._body_name
@property
def collision_meshes(self):
"""
Returns:
dict: Dictionary mapping collision mesh names (str) to mesh prims (CollisionMeshPrim) owned by
this rigid body
"""
return self._collision_meshes
@property
def visual_meshes(self):
"""
Returns:
dict: Dictionary mapping visual mesh names (str) to mesh prims (VisualMeshPrim) owned by
this rigid body
"""
return self._visual_meshes
@property
def visual_only(self):
"""
Returns:
bool: Whether this link is a visual-only link (i.e.: no gravity or collisions applied)
"""
return self._visual_only
@property
def has_collision_meshes(self):
"""
Returns:
bool: Whether this link has any collision mesh
"""
return len(self._collision_meshes) > 0
@visual_only.setter
def visual_only(self, val):
"""
Sets the visaul only state of this link
Args:
val (bool): Whether this link should be a visual-only link (i.e.: no gravity or collisions applied)
"""
# Set gravity and collisions based on value
if val:
self.disable_collisions()
self.disable_gravity()
else:
self.enable_collisions()
self.enable_gravity()
# Also set the internal value
self._visual_only = val
@property
def volume(self):
"""
Note: Currently it doesn't support Capsule type yet
Returns:
float: total volume of all the collision meshes of the rigid body in m^3.
"""
# TODO (eric): revise this once omni exposes API to query volume of GeomPrims
return sum(get_mesh_volume_and_com(collision_mesh.prim, world_frame=True)[0] for collision_mesh in self._collision_meshes.values())
@volume.setter
def volume(self, volume):
raise NotImplementedError("Cannot set volume directly for an link!")
@property
def mass(self):
"""
Returns:
float: mass of the rigid body in kg.
"""
mass = self._rigid_prim_view.get_masses()[0]
# Fallback to analytical computation of volume * density
if mass == 0:
return self.volume * self.density
return mass
@mass.setter
def mass(self, mass):
"""
Args:
mass (float): mass of the rigid body in kg.
"""
self._rigid_prim_view.set_masses([mass])
@property
def density(self):
"""
Returns:
float: density of the rigid body in kg / m^3.
"""
mass = self._rigid_prim_view.get_masses()[0]
# We first check if the mass is specified, since mass overrides density. If so, density = mass / volume.
# Otherwise, we try to directly grab the raw usd density value, and if that value does not exist,
# we return 1000 since that is the canonical density assigned by omniverse
if mass != 0.0:
density = mass / self.volume
else:
density = self._rigid_prim_view.get_densities()[0]
if density == 0.0:
density = 1000.0
return density
@density.setter
def density(self, density):
"""
Args:
density (float): density of the rigid body in kg / m^3.
"""
self._rigid_prim_view.set_densities([density])
@property
def kinematic_only(self):
"""
Returns:
bool: Whether this object is a kinematic-only object (otherwise, it is a rigid body). A kinematic-only
object is not subject to simulator dynamics, and remains fixed unless the user explicitly sets the
body's pose / velocities. See https://docs.omniverse.nvidia.com/app_create/prod_extensions/ext_physics/rigid-bodies.html?highlight=rigid%20body%20enabled#kinematic-rigid-bodies
for more information
"""
return self.get_attribute("physics:kinematicEnabled")
@property
def solver_position_iteration_count(self):
"""
Returns:
int: How many position iterations to take per physics step by the physx solver
"""
return self.get_attribute("physxRigidBody:solverPositionIterationCount")
@solver_position_iteration_count.setter
def solver_position_iteration_count(self, count):
"""
Sets how many position iterations to take per physics step by the physx solver
Args:
count (int): How many position iterations to take per physics step by the physx solver
"""
self.set_attribute("physxRigidBody:solverPositionIterationCount", count)
@property
def solver_velocity_iteration_count(self):
"""
Returns:
int: How many velocity iterations to take per physics step by the physx solver
"""
return self.get_attribute("physxRigidBody:solverVelocityIterationCount")
@solver_velocity_iteration_count.setter
def solver_velocity_iteration_count(self, count):
"""
Sets how many velocity iterations to take per physics step by the physx solver
Args:
count (int): How many velocity iterations to take per physics step by the physx solver
"""
self.set_attribute("physxRigidBody:solverVelocityIterationCount", count)
@property
def stabilization_threshold(self):
"""
Returns:
float: threshold for stabilizing this rigid body
"""
return self.get_attribute("physxRigidBody:stabilizationThreshold")
@stabilization_threshold.setter
def stabilization_threshold(self, threshold):
"""
Sets threshold for stabilizing this rigid body
Args:
threshold (float): stabilizing threshold
"""
self.set_attribute("physxRigidBody:stabilizationThreshold", threshold)
@property
def is_asleep(self):
"""
Returns:
bool: whether this rigid prim is asleep or not
"""
# If we're kinematic only, immediately return False since it doesn't follow the sleep / wake paradigm
return False if self.kinematic_only \
else og.sim.psi.is_sleeping(og.sim.stage_id, lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path))
@property
def sleep_threshold(self):
"""
Returns:
float: threshold for sleeping this rigid body
"""
return self.get_attribute("physxRigidBody:sleepThreshold")
@sleep_threshold.setter
def sleep_threshold(self, threshold):
"""
Sets threshold for sleeping this rigid body
Args:
threshold (float): Sleeping threshold
"""
self.set_attribute("physxRigidBody:sleepThreshold", threshold)
@property
def ccd_enabled(self):
"""
Returns:
bool: whether CCD is enabled or not for this link
"""
return self.get_attribute("physxRigidBody:enableCCD")
@ccd_enabled.setter
def ccd_enabled(self, enabled):
"""
Args:
enabled (bool): whether CCD should be enabled or not for this link
"""
self.set_attribute("physxRigidBody:enableCCD", enabled)
@property
def contact_reporting_enabled(self):
"""
Returns:
bool: Whether contact reporting is enabled for this rigid prim or not
"""
return self._prim.HasAPI(lazy.pxr.PhysxSchema.PhysxContactReportAPI)
def _compute_points_on_convex_hull(self, visual):
"""
Returns:
np.ndarray or None: points on the convex hull of all points from child geom prims
"""
meshes = self._visual_meshes if visual else self._collision_meshes
points = []
for mesh in meshes.values():
mesh_points = mesh.points_in_parent_frame
if mesh_points is not None and len(mesh_points) > 0:
points.append(mesh_points)
if not points:
return None
points = np.concatenate(points, axis=0)
try:
hull = ConvexHull(points)
return points[hull.vertices, :]
except:
# Handle the case where a convex hull cannot be formed (e.g., collinear points)
# return all the points in this case
return points
@cached_property
def visual_boundary_points_local(self):
"""
Returns:
np.ndarray: local coords of points on the convex hull of all points from child geom prims
"""
return self._compute_points_on_convex_hull(visual=True)
@property
def visual_boundary_points_world(self):
"""
Returns:
np.ndarray: world coords of points on the convex hull of all points from child geom prims
"""
local_points = self.visual_boundary_points_local
if local_points is None:
return None
return self.transform_local_points_to_world(local_points)
@cached_property
def collision_boundary_points_local(self):
"""
Returns:
np.ndarray: local coords of points on the convex hull of all points from child geom prims
"""
return self._compute_points_on_convex_hull(visual=False)
@property
def collision_boundary_points_world(self):
"""
Returns:
np.ndarray: world coords of points on the convex hull of all points from child geom prims
"""
local_points = self.collision_boundary_points_local
if local_points is None:
return None
return self.transform_local_points_to_world(local_points)
@property
def aabb(self):
position, _ = self.get_position_orientation()
hull_points = self.collision_boundary_points_world
if hull_points is None:
# When there's no points on the collision meshes
return position, position
aabb_lo = np.min(hull_points, axis=0)
aabb_hi = np.max(hull_points, axis=0)
return aabb_lo, aabb_hi
@property
def aabb_extent(self):
"""
Get this xform's actual bounding box extent
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.aabb
return max_corner - min_corner
@property
def aabb_center(self):
"""
Get this xform's actual bounding box center
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.aabb
return (max_corner + min_corner) / 2.0
@property
def visual_aabb(self):
hull_points = self.visual_boundary_points_world
assert hull_points is not None, "No visual boundary points found for this rigid prim"
# Calculate and return the AABB
aabb_lo = np.min(hull_points, axis=0)
aabb_hi = np.max(hull_points, axis=0)
return aabb_lo, aabb_hi
@property
def visual_aabb_extent(self):
"""
Get this xform's actual bounding box extent
Returns:
3-array: (x,y,z) bounding box
"""
min_corner, max_corner = self.visual_aabb
return max_corner - min_corner
@property
def visual_aabb_center(self):
"""
Get this xform's actual bounding box center
Returns:
3-array: (x,y,z) bounding box center
"""
min_corner, max_corner = self.visual_aabb
return (max_corner + min_corner) / 2.0
def enable_gravity(self):
"""
Enables gravity for this rigid body
"""
self._rigid_prim_view.enable_gravities()
def disable_gravity(self):
"""
Disables gravity for this rigid body
"""
self._rigid_prim_view.disable_gravities()
def wake(self):
"""
Enable physics for this rigid body
"""
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.wake_up(og.sim.stage_id, prim_id)
def sleep(self):
"""
Disable physics for this rigid body
"""
prim_id = lazy.pxr.PhysicsSchemaTools.sdfPathToInt(self.prim_path)
og.sim.psi.put_to_sleep(og.sim.stage_id, prim_id)
def clear_kinematic_only_cache(self):
"""
Clears the internal kinematic only cached pose. Useful if the parent prim's pose
changes without explicitly calling this prim's pose setter
"""
assert self.kinematic_only
self._kinematic_local_pose_cache = None
self._kinematic_world_pose_cache = None
def _dump_state(self):
# Grab pose from super class
state = super()._dump_state()
state["lin_vel"] = self.get_linear_velocity()
state["ang_vel"] = self.get_angular_velocity()
return state
def _load_state(self, state):
# Call super first
super()._load_state(state=state)
# Set velocities if not kinematic
self.set_linear_velocity(np.array(state["lin_vel"]))
self.set_angular_velocity(np.array(state["ang_vel"]))
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
return np.concatenate([
state_flat,
state["lin_vel"],
state["ang_vel"],
]).astype(float)
def _deserialize(self, state):
# Call supermethod first
state_dic, idx = super()._deserialize(state=state)
# We deserialize deterministically by knowing the order of values -- lin_vel, ang_vel
state_dic["lin_vel"] = state[idx: idx+3]
state_dic["ang_vel"] = state[idx + 3: idx + 6]
return state_dic, idx + 6
| 30,312 | Python | 36.562577 | 192 | 0.611342 |
StanfordVL/OmniGibson/omnigibson/prims/cloth_prim.py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros, gm
from omnigibson.prims.geom_prim import GeomPrim
from omnigibson.systems import get_system
import omnigibson.utils.transform_utils as T
from omnigibson.utils.sim_utils import CsRawData
from omnigibson.utils.usd_utils import array_to_vtarray, mesh_prim_to_trimesh_mesh, sample_mesh_keypoints
from omnigibson.utils.python_utils import classproperty
import omnigibson as og
import numpy as np
from collections.abc import Iterable
# Create settings for this module
m = create_module_macros(module_path=__file__)
# Subsample cloth particle points to boost performance
m.N_CLOTH_KEYPOINTS = 1000
m.KEYPOINT_COVERAGE_THRESHOLD = 0.75
m.N_CLOTH_KEYFACES = 500
class ClothPrim(GeomPrim):
"""
Provides high level functions to deal with a cloth prim and its attributes/ properties.
If there is an prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created.
Notes: if the prim does not already have a cloth api applied to it before it is loaded,
it will apply it.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. Note that this is only needed if the prim does not already exist at
@prim_path -- it will be ignored if it already exists. For this joint prim, the below values can be
specified:
scale (None or float or 3-array): If specified, sets the scale for this object. A single number corresponds
to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.
mass (None or float): If specified, mass of this body in kg
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Internal vars stored
self._centroid_idx = None
self._keypoint_idx = None
self._keyface_idx = None
# Run super init
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _post_load(self):
# run super first
super()._post_load()
# Make sure flatcache is not being used -- if so, raise an error, since we lose most of our needed functionality
# (such as R/W to specific particle states) when flatcache is enabled
assert not gm.ENABLE_FLATCACHE, "Cannot use flatcache with ClothPrim!"
self._mass_api = lazy.pxr.UsdPhysics.MassAPI(self._prim) if self._prim.HasAPI(lazy.pxr.UsdPhysics.MassAPI) else \
lazy.pxr.UsdPhysics.MassAPI.Apply(self._prim)
# Possibly set the mass / density
if "mass" in self._load_config and self._load_config["mass"] is not None:
self.mass = self._load_config["mass"]
# Clothify this prim, which is assumed to be a mesh
ClothPrim.cloth_system.clothify_mesh_prim(mesh_prim=self._prim, remesh=self._load_config.get("remesh", True))
# Track generated particle count
positions = self.compute_particle_positions()
self._n_particles = len(positions)
# Sample mesh keypoints / keyvalues and sanity check the AABB of these subsampled points vs. the actual points
success = False
for i in range(10):
self._keypoint_idx, self._keyface_idx = sample_mesh_keypoints(
mesh_prim=self._prim,
n_keypoints=m.N_CLOTH_KEYPOINTS,
n_keyfaces=m.N_CLOTH_KEYFACES,
seed=i,
)
keypoint_positions = positions[self._keypoint_idx]
keypoint_aabb = keypoint_positions.min(axis=0), keypoint_positions.max(axis=0)
true_aabb = positions.min(axis=0), positions.max(axis=0)
overlap_vol = max(min(true_aabb[1][0], keypoint_aabb[1][0]) - max(true_aabb[0][0], keypoint_aabb[0][0]), 0) * \
max(min(true_aabb[1][1], keypoint_aabb[1][1]) - max(true_aabb[0][1], keypoint_aabb[0][1]), 0) * \
max(min(true_aabb[1][2], keypoint_aabb[1][2]) - max(true_aabb[0][2], keypoint_aabb[0][2]), 0)
true_vol = np.product(true_aabb[1] - true_aabb[0])
if true_vol == 0.0 or overlap_vol / true_vol > m.KEYPOINT_COVERAGE_THRESHOLD:
success = True
break
assert success, f"Did not adequately subsample keypoints for cloth {self.name}!"
# Compute centroid particle idx based on AABB
aabb_min, aabb_max = np.min(positions, axis=0), np.max(positions, axis=0)
aabb_center = (aabb_min + aabb_max) / 2.0
dists = np.linalg.norm(positions - aabb_center.reshape(1, 3), axis=-1)
self._centroid_idx = np.argmin(dists)
def _initialize(self):
super()._initialize()
# TODO (eric): hacky way to get cloth rendering to work (otherwise, there exist some rendering artifacts).
self._prim.CreateAttribute("primvars:isVolume", lazy.pxr.Sdf.ValueTypeNames.Bool, False).Set(True)
self._prim.GetAttribute("primvars:isVolume").Set(False)
# Store the default position of the points in the local frame
self._default_positions = np.array(self.get_attribute(attr="points"))
@property
def visual_aabb(self):
return self.aabb
@property
def visual_aabb_extent(self):
return self.aabb_extent
@property
def visual_aabb_center(self):
return self.aabb_center
@classproperty
def cloth_system(cls):
return get_system("cloth")
@property
def n_particles(self):
"""
Returns:
int: Number of particles owned by this cloth prim
"""
return self._n_particles
@property
def kinematic_only(self):
"""
Returns:
bool: Whether this object is a kinematic-only object. For ClothPrim, always return False.
"""
return False
def compute_particle_positions(self, idxs=None):
"""
Compute individual particle positions for this cloth prim
Args:
idxs (n-array or None): If set, will only calculate the requested indexed particle state
Returns:
np.array: (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
t, r = self.get_position_orientation()
r = T.quat2mat(r)
s = self.scale
# Don't copy to save compute, since we won't be returning a reference to the underlying object anyways
p_local = np.array(self.get_attribute(attr="points"), copy=False)
p_local = p_local[idxs] if idxs is not None else p_local
p_world = (r @ (p_local * s).T).T + t
return p_world
def set_particle_positions(self, positions, idxs=None):
"""
Sets individual particle positions for this cloth prim
Args:
positions (n-array): (N, 3) numpy array, where each of the N particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
idxs (n-array or None): If set, will only set the requested indexed particle state
"""
n_expected = self._n_particles if idxs is None else len(idxs)
assert len(positions) == n_expected, \
f"Got mismatch in particle setting size: {len(positions)}, vs. number of expected particles {n_expected}!"
r = T.quat2mat(self.get_orientation())
t = self.get_position()
s = self.scale
p_local = (r.T @ (positions - t).T).T / s
# Fill the idxs if requested
if idxs is not None:
p_local_old = np.array(self.get_attribute(attr="points"))
p_local_old[idxs] = p_local
p_local = p_local_old
self.set_attribute(attr="points", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(p_local))
@property
def keypoint_idx(self):
"""
Returns:
n-array: (N,) array specifying the keypoint particle IDs
"""
return self._keypoint_idx
@property
def keyface_idx(self):
"""
Returns:
n-array: (N,) array specifying the keyface IDs
"""
return self._keyface_idx
@property
def faces(self):
"""
Grabs particle indexes defining each of the faces for this cloth prim
Returns:
np.array: (N, 3) numpy array, where each of the N faces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return np.array(self.get_attribute("faceVertexIndices")).reshape(-1, 3)
@property
def keyfaces(self):
"""
Grabs particle indexes defining each of the keyfaces for this cloth prim.
Total number of keyfaces is m.N_CLOTH_KEYFACES
Returns:
np.array: (N, 3) numpy array, where each of the N keyfaces are defined by the 3 particle indices
corresponding to that face's vertices
"""
return self.faces[self._keyface_idx]
@property
def keypoint_particle_positions(self):
"""
Grabs individual keypoint particle positions for this cloth prim.
Total number of keypoints is m.N_CLOTH_KEYPOINTS
Returns:
np.array: (N, 3) numpy array, where each of the N keypoint particles' positions are expressed in (x,y,z)
cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=self._keypoint_idx)
@property
def centroid_particle_position(self):
"""
Grabs the individual particle that was pre-computed to be the closest to the centroid of this cloth prim.
Returns:
np.array: centroid particle's (x,y,z) cartesian coordinates relative to the world frame
"""
return self.compute_particle_positions(idxs=[self._centroid_idx])[0]
@property
def particle_velocities(self):
"""
Grabs individual particle velocities for this cloth prim
Returns:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# the velocities attribute is w.r.t the world frame already
return np.array(self.get_attribute(attr="velocities"))
@particle_velocities.setter
def particle_velocities(self, vel):
"""
Set the particle velocities of this cloth
Args:
np.array: (N, 3) numpy array, where each of the N particles' velocities are expressed in (x,y,z)
cartesian coordinates with respect to the world frame
"""
assert vel.shape[0] == self._n_particles, \
f"Got mismatch in particle setting size: {vel.shape[0]}, vs. number of particles {self._n_particles}!"
# the velocities attribute is w.r.t the world frame already
self.set_attribute(attr="velocities", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(vel))
def compute_face_normals(self, face_ids=None):
"""
Grabs individual face normals for this cloth prim
Args:
face_ids (None or n-array): If specified, list of face IDs whose corresponding normals should be computed
If None, all faces will be used
Returns:
np.array: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
faces = self.faces if face_ids is None else self.faces[face_ids]
points = self.compute_particle_positions(idxs=faces.flatten()).reshape(-1, 3, 3)
return self.compute_face_normals_from_particle_positions(positions=points)
def compute_face_normals_from_particle_positions(self, positions):
"""
Grabs individual face normals for this cloth prim
Args:
positions (n-array): (N, 3, 3) array specifying the per-face particle positions
Returns:
np.array: (N, 3) numpy array, where each of the N faces' normals are expressed in (x,y,z)
cartesian coordinates with respect to the world frame.
"""
# Shape [F, 3]
v1 = positions[:, 2, :] - positions[:, 0, :]
v2 = positions[:, 1, :] - positions[:, 0, :]
normals = np.cross(v1, v2)
return normals / np.linalg.norm(normals, axis=1).reshape(-1, 1)
def contact_list(self, keypoints_only=True):
"""
Get list of all current contacts with this cloth body
Args:
keypoints_only (bool): If True, will only check contact with this cloth's keypoints
Returns:
list of CsRawData: raw contact info for this cloth body
"""
contacts = []
def report_hit(hit):
contacts.append(CsRawData(
time=0.0, # dummy value
dt=0.0, # dummy value
body0=self.prim_path,
body1=hit.rigid_body,
position=pos,
normal=np.zeros(3), # dummy value
impulse=np.zeros(3), # dummy value
))
return True
positions = self.keypoint_particle_positions if keypoints_only else self.compute_particle_positions()
for pos in positions:
og.sim.psqi.overlap_sphere(ClothPrim.cloth_system.particle_contact_offset, pos, report_hit, False)
return contacts
def update_handles(self):
# no handles to update
pass
@property
def volume(self):
mesh = mesh_prim_to_trimesh_mesh(self.prim, include_normals=False, include_texcoord=False, world_frame=True)
return mesh.volume if mesh.is_volume else mesh.convex_hull.volume
@volume.setter
def volume(self, volume):
raise NotImplementedError("Cannot set volume directly for a link!")
@property
def mass(self):
"""
Returns:
float: mass of the rigid body in kg.
"""
# We have to read the mass directly in the cloth prim
return self._mass_api.GetMassAttr().Get()
@mass.setter
def mass(self, mass):
"""
Args:
mass (float): mass of the rigid body in kg.
"""
# We have to set the mass directly in the cloth prim
self._mass_api.GetMassAttr().Set(mass)
@property
def density(self):
raise NotImplementedError("Cannot get density for ClothPrim")
@density.setter
def density(self, density):
raise NotImplementedError("Cannot set density for ClothPrim")
@property
def body_name(self):
"""
Returns:
str: Name of this body
"""
return self.prim_path.split("/")[-1]
def get_linear_velocity(self):
"""
Returns:
np.ndarray: current average linear velocity of the particles of the cloth prim. Shape (3,).
"""
return np.array(self._prim.GetAttribute("velocities").Get()).mean(axis=0)
def get_angular_velocity(self):
"""
Returns:
np.ndarray: zero vector as a placeholder because a cloth prim doesn't have an angular velocity. Shape (3,).
"""
return np.zeros(3)
def set_linear_velocity(self, velocity):
"""
Sets the linear velocity of all the particles of the cloth prim.
Args:
velocity (np.ndarray): linear velocity to set all the particles of the cloth prim to. Shape (3,).
"""
vel = self.particle_velocities
vel[:] = velocity
self.particle_velocities = vel
def set_angular_velocity(self, velocity):
"""
Simply returns because a cloth prim doesn't have an angular velocity
Args:
velocity (np.ndarray): linear velocity to set all the particles of the cloth prim to. Shape (3,).
"""
return
def wake(self):
# TODO (eric): Just a pass through for now.
return
@property
def bend_stiffness(self):
"""
Returns:
float: spring bend stiffness of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springBendStiffness")
@bend_stiffness.setter
def bend_stiffness(self, bend_stiffness):
"""
Args:
bend_stiffness (float): spring bend stiffness of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springBendStiffness", bend_stiffness)
@property
def damping(self):
"""
Returns:
float: spring damping of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springDamping")
@damping.setter
def damping(self, damping):
"""
Args:
damping (float): spring damping of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springDamping", damping)
@property
def shear_stiffness(self):
"""
Returns:
float: spring shear_stiffness of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springShearStiffness")
@shear_stiffness.setter
def shear_stiffness(self, shear_stiffness):
"""
Args:
shear_stiffness (float): spring shear_stiffness of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springShearStiffness", shear_stiffness)
@property
def stretch_stiffness(self):
"""
Returns:
float: spring stretch_stiffness of the particle system
"""
return self.get_attribute("physxAutoParticleCloth:springStretchStiffness")
@stretch_stiffness.setter
def stretch_stiffness(self, stretch_stiffness):
"""
Args:
stretch_stiffness (float): spring stretch_stiffness of the particle system
"""
self.set_attribute("physxAutoParticleCloth:springStretchStiffness", stretch_stiffness)
@property
def particle_group(self):
"""
Returns:
int: Particle group this instancer belongs to
"""
return self.get_attribute(attr="physxParticle:particleGroup")
@particle_group.setter
def particle_group(self, group):
"""
Args:
group (int): Particle group this instancer belongs to
"""
self.set_attribute(attr="physxParticle:particleGroup", val=group)
def _dump_state(self):
# Run super first
state = super()._dump_state()
state["particle_group"] = self.particle_group
state["n_particles"] = self.n_particles
state["particle_positions"] = self.compute_particle_positions()
state["particle_velocities"] = self.particle_velocities
return state
def _load_state(self, state):
# Run super first
super()._load_state(state=state)
# Sanity check the identification number and particle group
assert self.particle_group == state["particle_group"], f"Got mismatch in particle group for this cloth " \
f"when loading state! Should be: {self.particle_group}, got: {state['particle_group']}."
# Set values appropriately
self._n_particles = state["n_particles"]
# Make sure the loaded state is a numpy array, it could have been accidentally casted into a list during
# JSON-serialization
self.particle_velocities = np.array(state["particle_velocities"]) if not isinstance(state["particle_velocities"], np.ndarray) else state["particle_velocities"]
self.set_particle_positions(positions=np.array(state["particle_positions"]) if not isinstance(state["particle_positions"], np.ndarray) else state["particle_positions"])
def _serialize(self, state):
# Run super first
state_flat = super()._serialize(state=state)
return np.concatenate([
state_flat,
[state["particle_group"], state["n_particles"]],
state["particle_positions"].reshape(-1),
state["particle_velocities"].reshape(-1),
]).astype(float)
def _deserialize(self, state):
# Run super first
state_dict, idx = super()._deserialize(state=state)
particle_group = int(state[idx])
n_particles = int(state[idx + 1])
# Sanity check the identification number
assert self.particle_group == particle_group, f"Got mismatch in particle group for this particle " \
f"instancer when deserializing state! Should be: {self.particle_group}, got: {particle_group}."
# De-compress from 1D array
state_dict["particle_group"] = particle_group
state_dict["n_particles"] = n_particles
# Process remaining keys and reshape automatically
keys = ("particle_positions", "particle_velocities")
sizes = ((n_particles, 3), (n_particles, 3))
idx += 2
for key, size in zip(keys, sizes):
length = np.product(size)
state_dict[key] = state[idx: idx + length].reshape(size)
idx += length
return state_dict, idx
def reset(self):
"""
Reset the points to their default positions in the local frame, and also zeroes out velocities
"""
if self.initialized:
self.set_attribute(attr="points", val=lazy.pxr.Vt.Vec3fArray.FromNumpy(self._default_positions))
self.particle_velocities = np.zeros((self._n_particles, 3))
| 22,311 | Python | 36.311037 | 176 | 0.622653 |
StanfordVL/OmniGibson/omnigibson/prims/xform_prim.py | from collections.abc import Iterable
import numpy as np
import omnigibson as og
from omnigibson.macros import gm
import omnigibson.lazy as lazy
from omnigibson.prims.prim_base import BasePrim
from omnigibson.prims.material_prim import MaterialPrim
from omnigibson.utils.transform_utils import quat2euler
from omnigibson.utils.usd_utils import PoseAPI
import omnigibson.utils.transform_utils as T
from scipy.spatial.transform import Rotation as R
from omnigibson.macros import gm
import trimesh.transformations
class XFormPrim(BasePrim):
"""
Provides high level functions to deal with an Xform prim and its attributes/ properties.
If there is an Xform prim present at the path, it will use it. Otherwise, a new XForm prim at
the specified prim path will be created when self.load(...) is called.
Note: the prim will have "xformOp:orient", "xformOp:translate" and "xformOp:scale" only post init,
unless it is a non-root articulation link.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. For this xform prim, the below values can be specified:
scale (None or float or 3-array): If specified, sets the scale for this object. A single number corresponds
to uniform scaling along the x,y,z axes, whereas a 3-array specifies per-axis scaling.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
):
# Other values that will be filled in at runtime
self._binding_api = None
self._material = None
self._collision_filter_api = None
self.original_scale = None
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
return og.sim.stage.DefinePrim(self._prim_path, "Xform")
def _post_load(self):
# run super first
super()._post_load()
# Make sure all xforms have pose and scaling info
self._set_xform_properties()
# Cache the original scale from the USD so that when EntityPrim sets the scale for each link (Rigid/ClothPrim),
# the new scale is with respect to the original scale. XFormPrim's scale always matches the scale in the USD.
self.original_scale = np.array(self.get_attribute("xformOp:scale"))
# Create collision filter API
self._collision_filter_api = lazy.pxr.UsdPhysics.FilteredPairsAPI(self._prim) if \
self._prim.HasAPI(lazy.pxr.UsdPhysics.FilteredPairsAPI) else lazy.pxr.UsdPhysics.FilteredPairsAPI.Apply(self._prim)
# Create binding API
self._binding_api = lazy.pxr.UsdShade.MaterialBindingAPI(self.prim) if \
self._prim.HasAPI(lazy.pxr.UsdShade.MaterialBindingAPI) else lazy.pxr.UsdShade.MaterialBindingAPI.Apply(self.prim)
# Grab the attached material if it exists
if self.has_material():
material_prim_path = self._binding_api.GetDirectBinding().GetMaterialPath().pathString
material_name = f"{self.name}:material"
material = MaterialPrim.get_material(prim_path=material_prim_path, name=material_name)
assert material.loaded, f"Material prim path {material_prim_path} doesn't exist on stage."
material.add_user(self)
self._material = material
# Optionally set the scale and visibility
if "scale" in self._load_config and self._load_config["scale"] is not None:
self.scale = self._load_config["scale"]
def remove(self):
# Remove the material prim if one exists
if self._material is not None:
self._material.remove_user(self)
# Remove the prim
super().remove()
def _set_xform_properties(self):
current_position, current_orientation = self.get_position_orientation()
properties_to_remove = [
"xformOp:rotateX",
"xformOp:rotateXZY",
"xformOp:rotateY",
"xformOp:rotateYXZ",
"xformOp:rotateYZX",
"xformOp:rotateZ",
"xformOp:rotateZYX",
"xformOp:rotateZXY",
"xformOp:rotateXYZ",
"xformOp:transform",
]
prop_names = self.prim.GetPropertyNames()
xformable = lazy.pxr.UsdGeom.Xformable(self.prim)
xformable.ClearXformOpOrder()
# TODO: wont be able to delete props for non root links on articulated objects
for prop_name in prop_names:
if prop_name in properties_to_remove:
self.prim.RemoveProperty(prop_name)
if "xformOp:scale" not in prop_names:
xform_op_scale = xformable.AddXformOp(lazy.pxr.UsdGeom.XformOp.TypeScale, lazy.pxr.UsdGeom.XformOp.PrecisionDouble, "")
xform_op_scale.Set(lazy.pxr.Gf.Vec3d([1.0, 1.0, 1.0]))
else:
xform_op_scale = lazy.pxr.UsdGeom.XformOp(self._prim.GetAttribute("xformOp:scale"))
if "xformOp:translate" not in prop_names:
xform_op_translate = xformable.AddXformOp(
lazy.pxr.UsdGeom.XformOp.TypeTranslate, lazy.pxr.UsdGeom.XformOp.PrecisionDouble, ""
)
else:
xform_op_translate = lazy.pxr.UsdGeom.XformOp(self._prim.GetAttribute("xformOp:translate"))
if "xformOp:orient" not in prop_names:
xform_op_rot = xformable.AddXformOp(lazy.pxr.UsdGeom.XformOp.TypeOrient, lazy.pxr.UsdGeom.XformOp.PrecisionDouble, "")
else:
xform_op_rot = lazy.pxr.UsdGeom.XformOp(self._prim.GetAttribute("xformOp:orient"))
xformable.SetXformOpOrder([xform_op_translate, xform_op_rot, xform_op_scale])
self.set_position_orientation(position=current_position, orientation=current_orientation)
new_position, new_orientation = self.get_position_orientation()
r1 = R.from_quat(current_orientation).as_matrix()
r2 = R.from_quat(new_orientation).as_matrix()
# Make sure setting is done correctly
assert np.allclose(new_position, current_position, atol=1e-4) and np.allclose(r1, r2, atol=1e-4), \
f"{self.prim_path}: old_pos: {current_position}, new_pos: {new_position}, " \
f"old_orn: {current_orientation}, new_orn: {new_orientation}"
def has_material(self):
"""
Returns:
bool: True if there is a visual material bound to this prim. False otherwise
"""
material_path = self._binding_api.GetDirectBinding().GetMaterialPath().pathString
return False if material_path == "" else True
def set_position_orientation(self, position=None, orientation=None):
"""
Sets prim's pose with respect to the world frame
Args:
position (None or 3-array): if specified, (x,y,z) position in the world frame
Default is None, which means left unchanged.
orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the world frame.
Default is None, which means left unchanged.
"""
current_position, current_orientation = self.get_position_orientation()
position = current_position if position is None else np.array(position, dtype=float)
orientation = current_orientation if orientation is None else np.array(orientation, dtype=float)
assert np.isclose(np.linalg.norm(orientation), 1, atol=1e-3), \
f"{self.prim_path} desired orientation {orientation} is not a unit quaternion."
my_world_transform = T.pose2mat((position, orientation))
parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(self._prim)
parent_path = str(parent_prim.GetPath())
parent_world_transform = PoseAPI.get_world_pose_with_scale(parent_path)
local_transform = np.linalg.inv(parent_world_transform) @ my_world_transform
self.set_local_pose(*T.mat2pose(local_transform))
def get_position_orientation(self):
"""
Gets prim's pose with respect to the world's frame.
Returns:
2-tuple:
- 3-array: (x,y,z) position in the world frame
- 4-array: (x,y,z,w) quaternion orientation in the world frame
"""
return PoseAPI.get_world_pose(self._prim_path)
def set_position(self, position):
"""
Set this prim's position with respect to the world frame
Args:
position (3-array): (x,y,z) global cartesian position to set
"""
self.set_position_orientation(position=position)
def get_position(self):
"""
Get this prim's position with respect to the world frame
Returns:
3-array: (x,y,z) global cartesian position of this prim
"""
return self.get_position_orientation()[0]
def set_orientation(self, orientation):
"""
Set this prim's orientation with respect to the world frame
Args:
orientation (4-array): (x,y,z,w) global quaternion orientation to set
"""
self.set_position_orientation(orientation=orientation)
def get_orientation(self):
"""
Get this prim's orientation with respect to the world frame
Returns:
4-array: (x,y,z,w) global quaternion orientation of this prim
"""
return self.get_position_orientation()[1]
def get_rpy(self):
"""
Get this prim's orientation with respect to the world frame
Returns:
3-array: (roll, pitch, yaw) global euler orientation of this prim
"""
return quat2euler(self.get_orientation())
def get_2d_orientation(self):
"""
Get this prim's orientation on the XY plane of the world frame. This is obtained by
projecting the forward vector onto the XY plane and then computing the angle.
"""
fwd = R.from_quat(self.get_orientation()).apply([1, 0, 0])
fwd[2] = 0.
# If the object is facing close to straight up, then we can't compute a 2D orientation
# in that case, we return zero.
if np.linalg.norm(fwd) < 1e-4:
return 0.
fwd /= np.linalg.norm(fwd)
return np.arctan2(fwd[1], fwd[0])
def get_local_pose(self):
"""
Gets prim's pose with respect to the prim's local frame (its parent frame)
Returns:
2-tuple:
- 3-array: (x,y,z) position in the local frame
- 4-array: (x,y,z,w) quaternion orientation in the local frame
"""
pos, ori = lazy.omni.isaac.core.utils.xforms.get_local_pose(self.prim_path)
return pos, ori[[1, 2, 3, 0]]
def set_local_pose(self, position=None, orientation=None):
"""
Sets prim's pose with respect to the local frame (the prim's parent frame).
Args:
position (None or 3-array): if specified, (x,y,z) position in the local frame of the prim
(with respect to its parent prim). Default is None, which means left unchanged.
orientation (None or 4-array): if specified, (x,y,z,w) quaternion orientation in the local frame of the prim
(with respect to its parent prim). Default is None, which means left unchanged.
"""
properties = self.prim.GetPropertyNames()
if position is not None:
position = lazy.pxr.Gf.Vec3d(*np.array(position, dtype=float))
if "xformOp:translate" not in properties:
lazy.carb.log_error(
"Translate property needs to be set for {} before setting its position".format(self.name)
)
self.set_attribute("xformOp:translate", position)
if orientation is not None:
orientation = np.array(orientation, dtype=float)[[3, 0, 1, 2]]
if "xformOp:orient" not in properties:
lazy.carb.log_error(
"Orient property needs to be set for {} before setting its orientation".format(self.name)
)
xform_op = self._prim.GetAttribute("xformOp:orient")
if xform_op.GetTypeName() == "quatf":
rotq = lazy.pxr.Gf.Quatf(*orientation)
else:
rotq = lazy.pxr.Gf.Quatd(*orientation)
xform_op.Set(rotq)
PoseAPI.invalidate()
if gm.ENABLE_FLATCACHE:
# If flatcache is on, make sure the USD local pose is synced to the fabric local pose.
# Ideally we should call usdrt's set local pose directly, but there is no such API.
# The only available API is SetLocalXformFromUsd, so we update USD first, and then sync to fabric.
xformable_prim = lazy.usdrt.Rt.Xformable(lazy.omni.isaac.core.utils.prims.get_prim_at_path(self.prim_path, fabric=True))
assert not xformable_prim.HasWorldXform(), "Fabric's world pose is set for a non-rigid prim which is unexpected. Please report this."
xformable_prim.SetLocalXformFromUsd()
return
def get_world_scale(self):
"""
Gets prim's scale with respect to the world's frame.
Returns:
np.ndarray: scale applied to the prim's dimensions in the world frame. shape is (3, ).
"""
prim_tf = lazy.pxr.UsdGeom.Xformable(self._prim).ComputeLocalToWorldTransform(lazy.pxr.Usd.TimeCode.Default())
transform = lazy.pxr.Gf.Transform()
transform.SetMatrix(prim_tf)
return np.array(transform.GetScale())
@property
def scaled_transform(self):
"""
Returns the scaled transform of this prim.
"""
return PoseAPI.get_world_pose_with_scale(self._prim_path)
def transform_local_points_to_world(self, points):
return trimesh.transformations.transform_points(points, self.scaled_transform)
@property
def scale(self):
"""
Gets prim's scale with respect to the local frame (the parent's frame).
Returns:
np.ndarray: scale applied to the prim's dimensions in the local frame. shape is (3, ).
"""
return np.array(self.get_attribute("xformOp:scale"))
@scale.setter
def scale(self, scale):
"""
Sets prim's scale with respect to the local frame (the prim's parent frame).
Args:
scale (float or np.ndarray): scale to be applied to the prim's dimensions. shape is (3, ).
Defaults to None, which means left unchanged.
"""
scale = np.array(scale, dtype=float) if isinstance(scale, Iterable) else np.ones(3) * scale
scale = lazy.pxr.Gf.Vec3d(*scale)
properties = self.prim.GetPropertyNames()
if "xformOp:scale" not in properties:
lazy.carb.log_error("Scale property needs to be set for {} before setting its scale".format(self.name))
self.set_attribute("xformOp:scale", scale)
@property
def material(self):
"""
Returns:
None or MaterialPrim: The bound material to this prim, if there is one
"""
return self._material
@material.setter
def material(self, material):
"""
Set the material @material for this prim. This will also bind the material to this prim
Args:
material (MaterialPrim): Material to bind to this prim
"""
self._binding_api.Bind(lazy.pxr.UsdShade.Material(material.prim), bindingStrength=lazy.pxr.UsdShade.Tokens.weakerThanDescendants)
self._material = material
def add_filtered_collision_pair(self, prim):
"""
Adds a collision filter pair with another prim
Args:
prim (XFormPrim): Another prim to filter collisions with
"""
# Add to both this prim's and the other prim's filtered pair
self._collision_filter_api.GetFilteredPairsRel().AddTarget(prim.prim_path)
prim._collision_filter_api.GetFilteredPairsRel().AddTarget(self._prim_path)
def remove_filtered_collision_pair(self, prim):
"""
Removes a collision filter pair with another prim
Args:
prim (XFormPrim): Another prim to remove filter collisions with
"""
# Add to both this prim's and the other prim's filtered pair
self._collision_filter_api.GetFilteredPairsRel().RemoveTarget(prim.prim_path)
prim._collision_filter_api.GetFilteredPairsRel().RemoveTarget(self._prim_path)
def _dump_state(self):
pos, ori = self.get_position_orientation()
return dict(pos=pos, ori=ori)
def _load_state(self, state):
self.set_position_orientation(np.array(state["pos"]), np.array(state["ori"]))
def _serialize(self, state):
return np.concatenate([state["pos"], state["ori"]]).astype(float)
def _deserialize(self, state):
# We deserialize deterministically by knowing the order of values -- pos, ori
return dict(pos=state[0:3], ori=state[3:7]), 7
| 17,280 | Python | 41.669136 | 145 | 0.631771 |
StanfordVL/OmniGibson/omnigibson/prims/joint_prim.py | from collections.abc import Iterable
import numpy as np
import omnigibson as og
import omnigibson.lazy as lazy
from omnigibson.macros import create_module_macros
from omnigibson.prims.prim_base import BasePrim
from omnigibson.utils.usd_utils import PoseAPI, create_joint
from omnigibson.utils.constants import JointType, JointAxis
from omnigibson.utils.python_utils import assert_valid_key
import omnigibson.utils.transform_utils as T
from omnigibson.controllers.controller_base import ControlType
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_MAX_POS = 1000.0
m.DEFAULT_MAX_PRISMATIC_VEL = 1.0
m.DEFAULT_MAX_REVOLUTE_VEL = 15.0
m.DEFAULT_MAX_EFFORT = 100.0
m.INF_POS_THRESHOLD = 1e5
m.INF_VEL_THRESHOLD = 1e5
m.INF_EFFORT_THRESHOLD = 1e10
m.COMPONENT_SUFFIXES = ["x", "y", "z", "rx", "ry", "rz"]
# TODO: Split into non-articulated / articulated Joint Prim classes?
# TODO: Add logic for non Prismatic / Revolute joints (D6, spherical)
class JointPrim(BasePrim):
"""
Provides high level functions to deal with a joint prim and its attributes/ properties.
If there is an joint prim present at the path, it will use it. Otherwise, a new joint prim at
the specified prim path will be created when self.load(...) is called.
Note: the prim will have "xformOp:orient", "xformOp:translate" and "xformOp:scale" only post init,
unless it is a non-root articulation link.
Args:
prim_path (str): prim path of the Prim to encapsulate or create.
name (str): Name for the object. Names need to be unique per scene.
load_config (None or dict): If specified, should contain keyword-mapped values that are relevant for
loading this prim at runtime. For this joint prim, the below values can be specified:
joint_type (str): If specified, should be the joint type to create. Valid options are:
{"Joint", "FixedJoint", "PrismaticJoint", "RevoluteJoint", "SphericalJoint"}
(equivalently, one of JointType)
body0 (None or str): If specified, should be the absolute prim path to the parent body that this joint
is connected to. None can also be valid, which corresponds to cases where only a single body may be
specified (e.g.: fixed joints)
body1 (None or str): If specified, should be the absolute prim path to the child body that this joint
is connected to. None can also be valid, which corresponds to cases where only a single body may be
specified (e.g.: fixed joints)
articulation (None or int): if specified, should be handle to pre-existing articulation. This will enable
additional features for this joint prim, e.g.: polling / setting this joint's state. Note that in this
case, the joint must already exist prior to this class instance. Default is None,
which corresponds to a non-articulated joint.
"""
def __init__(
self,
prim_path,
name,
load_config=None,
articulation_view=None,
):
# Grab dynamic control reference and set properties
self._articulation_view_direct = articulation_view
# Other values that will be filled in at runtime
self._joint_type = None
self._control_type = None
self._driven = None
# The following values will only be valid if this joint is part of an articulation
self._n_dof = None # The number of degrees of freedom this joint provides
self._joint_idx = None # The index of this joint in the parent articulation's joint array
self._joint_dof_offset = None # The starting index of the DOFs for this joint in the parent articulation's DOF array
self._joint_name = None # The name of this joint in the parent's articulation tree
# Run super method
super().__init__(
prim_path=prim_path,
name=name,
load_config=load_config,
)
def _load(self):
# Make sure this joint isn't articulated
assert not self.articulated, "Joint cannot be created, since this is an articulated joint! We are assuming" \
"the joint already exists in the stage."
# Define a joint prim at the current stage
prim = create_joint(
prim_path=self._prim_path,
joint_type=self._load_config.get("joint_type", JointType.JOINT),
)
return prim
def _post_load(self):
# run super first
super()._post_load()
# Check whether this joint is driven or not
self._driven = self._prim.HasAPI(lazy.pxr.UsdPhysics.DriveAPI)
# Add joint state API if this is a revolute or prismatic joint
self._joint_type = JointType.get_type(self._prim.GetTypeName().split("Physics")[-1])
if self.is_single_dof:
# We MUST already have the joint state API defined beforehand in the USD
# This is because physx complains if we try to add physx APIs AFTER a simulation step occurs, which
# happens because joint prims are usually created externally during an EntityPrim's initialization phase
assert self._prim.HasAPI(lazy.pxr.PhysxSchema.JointStateAPI), \
"Revolute or Prismatic joints must already have JointStateAPI added!"
# Possibly set the bodies
if "body0" in self._load_config and self._load_config["body0"] is not None:
self.body0 = self._load_config["body0"]
if "body1" in self._load_config and self._load_config["body1"] is not None:
self.body1 = self._load_config["body1"]
def _initialize(self):
# Always run super first
super()._initialize()
# Update the joint indices etc.
self.update_handles()
# Get control type
if self.articulated:
control_types = []
stiffnesses, dampings = self._articulation_view.get_gains(joint_indices=self.dof_indices)
for i, (kp, kd) in enumerate(zip(stiffnesses[0], dampings[0])):
# Infer control type based on whether kp and kd are 0 or not, as well as whether this joint is driven or not
# TODO: Maybe assert mutual exclusiveness here?
if not self._driven:
control_type = ControlType.NONE
elif kp == 0.0:
control_type = ControlType.EFFORT if kd == 0.0 else ControlType.VELOCITY
else:
control_type = ControlType.POSITION
control_types.append(control_type)
# Make sure all the control types are the same -- if not, we had something go wrong!
assert len(set(control_types)) == 1, f"Got multiple control types for this single joint: {control_types}"
self._control_type = control_types[0]
def update_handles(self):
"""
Updates all internal handles for this prim, in case they change since initialization
"""
# It's a bit tricky to get the joint index here. We need to find the first dof at this prim path
# first, then get the corresponding joint index from that dof offset.
self._joint_dof_offset = list(self._articulation_view._dof_paths[0]).index(self._prim_path)
joint_dof_offsets = self._articulation_view._metadata.joint_dof_offsets
# Note that we are finding the last occurrence of the dof offset, since that corresponds to the joint index
# The first occurrence can be a fixed link that is 0-dof, meaning the offset will be repeated.
self._joint_idx = next(i for i in reversed(range(len(joint_dof_offsets))) if joint_dof_offsets[i] == self._joint_dof_offset)
self._joint_name = self._articulation_view._metadata.joint_names[self._joint_idx]
self._n_dof = self._articulation_view._metadata.joint_dof_counts[self._joint_idx]
def set_control_type(self, control_type, kp=None, kd=None):
"""
Sets the control type for this joint.
Args:
control_type (ControlType): What type of control to use for this joint.
Valid options are: {ControlType.POSITION, ControlType.VELOCITY, ControlType.EFFORT}
kp (None or float): If specified, sets the kp gain value for this joint. Should only be set if
setting ControlType.POSITION
kd (None or float): If specified, sets the kd gain value for this joint. Should only be set if
setting ControlType.VELOCITY
"""
# Sanity check inputs
assert_valid_key(key=control_type, valid_keys=ControlType.VALID_TYPES, name="control type")
if control_type == ControlType.POSITION:
assert kp is not None, "kp gain must be specified for setting POSITION control!"
assert kd is None, "kd gain must not be specified for setting POSITION control!"
kd = 0.0
elif control_type == ControlType.VELOCITY:
assert kp is None, "kp gain must not be specified for setting VELOCITY control!"
assert kd is not None, "kd gain must be specified for setting VELOCITY control!"
kp = 0.0
else: # Efforts
assert kp is None, "kp gain must not be specified for setting EFFORT control!"
assert kd is None, "kd gain must not be specified for setting EFFORT control!"
kp, kd = 0.0, 0.0
# Set values
kps = np.full((1, self._n_dof), kp)
kds = np.full((1, self._n_dof), kd)
self._articulation_view.set_gains(kps=kps, kds=kds, joint_indices=self.dof_indices)
# Update control type
self._control_type = control_type
@property
def _articulation_view(self):
if self._articulation_view_direct is None:
return None
# Validate that the articulation view is initialized and that if physics is running, the
# view is valid.
if og.sim.is_playing() and self.initialized:
assert self._articulation_view_direct.is_physics_handle_valid() and \
self._articulation_view_direct._physics_view.check(), \
"Articulation view must be valid if physics is running!"
return self._articulation_view_direct
@property
def body0(self):
"""
Gets this joint's body0 relationship.
Returns:
None or str: Absolute prim path to the body prim to set as this joint's parent link, or None if there is
no body0 specified.
"""
targets = self._prim.GetRelationship("physics:body0").GetTargets()
return targets[0].__str__() if len(targets) > 0 else None
@body0.setter
def body0(self, body0):
"""
Sets this joint's body0 relationship.
Args:
body0 (str): Absolute prim path to the body prim to set as this joint's parent link.
"""
# Make sure prim path is valid
assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body0), f"Invalid body0 path specified: {body0}"
self._prim.GetRelationship("physics:body0").SetTargets([lazy.pxr.Sdf.Path(body0)])
@property
def body1(self):
"""
Gets this joint's body1 relationship.
Returns:
None or str: Absolute prim path to the body prim to set as this joint's child link, or None if there is
no body1 specified.
"""
targets = self._prim.GetRelationship("physics:body1").GetTargets()
return targets[0].__str__()
@body1.setter
def body1(self, body1):
"""
Sets this joint's body1 relationship.
Args:
body1 (str): Absolute prim path to the body prim to set as this joint's child link.
"""
# Make sure prim path is valid
assert lazy.omni.isaac.core.utils.prims.is_prim_path_valid(body1), f"Invalid body1 path specified: {body1}"
self._prim.GetRelationship("physics:body1").SetTargets([lazy.pxr.Sdf.Path(body1)])
@property
def local_orientation(self):
"""
Returns:
4-array: (x,y,z,w) local quaternion orientation of this joint, relative to the parent link
"""
# Grab local rotation to parent and child links
quat0 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(self.get_attribute("physics:localRot0"))[[1, 2, 3, 0]]
quat1 = lazy.omni.isaac.core.utils.rotations.gf_quat_to_np_array(self.get_attribute("physics:localRot1"))[[1, 2, 3, 0]]
# Invert the child link relationship, and multiply the two rotations together to get the final rotation
return T.quat_multiply(quaternion1=T.quat_inverse(quat1), quaternion0=quat0)
@property
def joint_name(self):
"""
Returns:
str: Name of this joint
"""
return self._joint_name
@property
def joint_type(self):
"""
Gets this joint's type (ignoring the "Physics" prefix)
Returns:
JointType: Joint's type. Should be one corresponding to:
{JOINT_PRISMATIC, JOINT_REVOLUTE, JOINT_FIXED, JOINT_SPHERICAL}
"""
return self._joint_type
@property
def driven(self):
"""
Returns:
bool: Whether this joint can be driven by a motor or not
"""
return self._driven
@property
def control_type(self):
"""
Gets the control types for this joint
Returns:
ControlType: control type for this joint
"""
return self._control_type
@property
def max_velocity(self):
"""
Gets this joint's maximum velocity
Returns:
float: maximum velocity for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_vel = self._articulation_view.get_max_velocities(joint_indices=self.dof_indices)[0][0]
default_max_vel = m.DEFAULT_MAX_REVOLUTE_VEL if self.joint_type == JointType.JOINT_REVOLUTE else m.DEFAULT_MAX_PRISMATIC_VEL
return default_max_vel if raw_vel is None or np.abs(raw_vel) > m.INF_VEL_THRESHOLD else raw_vel
@max_velocity.setter
def max_velocity(self, vel):
"""
Sets this joint's maximum velocity
Args:
vel (float): Velocity to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_velocities(np.array([[vel]]), joint_indices=self.dof_indices)
@property
def max_effort(self):
"""
Gets this joint's maximum effort
Returns:
float: maximum effort for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_effort = self._articulation_view.get_max_efforts(joint_indices=self.dof_indices)[0][0]
return m.DEFAULT_MAX_EFFORT if raw_effort is None or np.abs(raw_effort) > m.INF_EFFORT_THRESHOLD else raw_effort
@max_effort.setter
def max_effort(self, effort):
"""
Sets this joint's maximum effort
Args:
effort (float): effort to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_max_efforts(np.array([[effort]]), joint_indices=self.dof_indices)
@property
def stiffness(self):
"""
Gets this joint's stiffness
Returns:
float: stiffness for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
stiffnesses = self._articulation_view.get_gains(joint_indices=self.dof_indices)[0]
return stiffnesses[0][0]
@stiffness.setter
def stiffness(self, stiffness):
"""
Sets this joint's stiffness
Args:
stiffness (float): stiffness to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kps=np.array([[stiffness]]), joint_indices=self.dof_indices)
@property
def damping(self):
"""
Gets this joint's damping
Returns:
float: damping for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
dampings = self._articulation_view.get_gains(joint_indices=self.dof_indices)[1]
return dampings[0][0]
@damping.setter
def damping(self, damping):
"""
Sets this joint's damping
Args:
damping (float): damping to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_gains(kds=np.array([[damping]]), joint_indices=self.dof_indices)
@property
def friction(self):
"""
Gets this joint's friction
Returns:
float: friction for this joint
"""
return self._articulation_view.get_friction_coefficients(joint_indices=self.dof_indices)[0][0] \
if og.sim.is_playing() else self.get_attribute("physxJoint:jointFriction")
@friction.setter
def friction(self, friction):
"""
Sets this joint's friction
Args:
friction (float): friction to set
"""
self.set_attribute("physxJoint:jointFriction", friction)
if og.sim.is_playing():
self._articulation_view.set_friction_coefficients(np.array([[friction]]), joint_indices=self.dof_indices)
@property
def lower_limit(self):
"""
Gets this joint's lower_limit
Returns:
float: lower_limit for this joint
"""
# TODO: Add logic for non Prismatic / Revolute joints (D6, spherical)
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_pos_lower, raw_pos_upper = self._articulation_view.get_joint_limits(joint_indices=self.dof_indices).flatten()
return -m.DEFAULT_MAX_POS \
if raw_pos_lower is None or raw_pos_lower == raw_pos_upper or np.abs(raw_pos_lower) > m.INF_POS_THRESHOLD \
else raw_pos_lower
@lower_limit.setter
def lower_limit(self, lower_limit):
"""
Sets this joint's lower_limit
Args:
lower_limit (float): lower_limit to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(np.array([[lower_limit, self.upper_limit]]), joint_indices=self.dof_indices)
@property
def upper_limit(self):
"""
Gets this joint's upper_limit
Returns:
float: upper_limit for this joint
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
# We either return the raw value or a default value if there is no max specified
raw_pos_lower, raw_pos_upper = self._articulation_view.get_joint_limits(joint_indices=self.dof_indices).flatten()
return m.DEFAULT_MAX_POS \
if raw_pos_upper is None or raw_pos_lower == raw_pos_upper or np.abs(raw_pos_upper) > m.INF_POS_THRESHOLD \
else raw_pos_upper
@upper_limit.setter
def upper_limit(self, upper_limit):
"""
Sets this joint's upper_limit
Args:
upper_limit (float): upper_limit to set
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
self._articulation_view.set_joint_limits(np.array([[self.lower_limit, upper_limit]]), joint_indices=self.dof_indices)
@property
def has_limit(self):
"""
Returns:
bool: True if this joint has a limit, else False
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
return np.all(np.abs(self._articulation_view.get_joint_limits(joint_indices=self.dof_indices)) < m.INF_POS_THRESHOLD)
@property
def axis(self):
"""
Gets this joint's axis
Returns:
str: axis for this joint, one of "X", "Y, "Z"
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
return self.get_attribute("physics:axis")
@axis.setter
def axis(self, axis):
"""
Sets this joint's axis
Args:
str: axis for this joint, one of "X", "Y, "Z"
"""
# Only support revolute and prismatic joints for now
assert self.is_single_dof, "Joint properties only supported for a single DOF currently!"
assert axis in JointAxis, f"Invalid joint axis specified: {axis}!"
self.set_attribute("physics:axis", axis)
@property
def n_dof(self):
"""
Returns:
int: Number of degrees of freedom this joint has
"""
return self._n_dof
@property
def dof_indices(self):
"""
Returns:
list of int: Indices of this joint's DOFs in the parent articulation's DOF array
"""
assert self.articulated, "Can only get DOF indices for articulated joints!"
return list(range(self._joint_dof_offset, self._joint_dof_offset + self._n_dof))
@property
def articulated(self):
"""
Returns:
bool: Whether this joint is articulated or not
"""
return self._articulation_view is not None
@property
def is_revolute(self):
"""
Returns:
bool: Whether this joint is revolute or not
"""
return self._joint_type == JointType.JOINT_REVOLUTE
@property
def is_single_dof(self):
"""
Returns:
bool: Whether this joint has a single DOF or not
"""
return self._joint_type in {JointType.JOINT_REVOLUTE, JointType.JOINT_PRISMATIC}
def get_state(self, normalized=False):
"""
(pos, vel, effort) state of this joint
Args:
normalized (bool): If True, will return normalized state of this joint, where pos, vel, and effort values
are in range [-1, 1].
Returns:
3-tuple:
- n-array: position of this joint, where n = number of DOF for this joint
- n-array: velocity of this joint, where n = number of DOF for this joint
- n-array: effort of this joint, where n = number of DOF for this joint
"""
# Make sure we only call this if we're an articulated joint
assert self.articulated, "Can only get state for articulated joints!"
# Grab raw states
pos = self._articulation_view.get_joint_positions(joint_indices=self.dof_indices)[0]
vel = self._articulation_view.get_joint_velocities(joint_indices=self.dof_indices)[0]
effort = self._articulation_view.get_applied_joint_efforts(joint_indices=self.dof_indices)[0]
# Potentially normalize if requested
if normalized:
pos, vel, effort = self._normalize_pos(pos), self._normalize_vel(vel), self._normalize_effort(effort)
return pos, vel, effort
def get_target(self, normalized=False):
"""
(pos, vel) target of this joint
Args:
normalized (bool): If True, will return normalized target of this joint
Returns:
2-tuple:
- n-array: target position of this joint, where n = number of DOF for this joint
- n-array: target velocity of this joint, where n = number of DOF for this joint
"""
# Make sure we only call this if we're an articulated joint
assert self.articulated, "Can only get targets for articulated joints!"
# Grab raw states
targets = self._articulation_view.get_applied_actions()
pos = targets.joint_positions[0][self.dof_indices]
vel = targets.joint_velocities[0][self.dof_indices]
# Potentially normalize if requested
if normalized:
pos, vel = self._normalize_pos(pos), self._normalize_vel(vel)
return pos, vel
def _normalize_pos(self, pos):
"""
Normalizes raw joint positions @pos
Args:
pos (n-array): n-DOF raw positions to normalize
Returns:
n-array: n-DOF normalized positions in range [-1, 1]
"""
low, high = self.lower_limit, self.upper_limit
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
pos = (pos - mean) / magnitude
return pos
def _denormalize_pos(self, pos):
"""
De-normalizes joint positions @pos
Args:
pos (n-array): n-DOF normalized positions in range [-1, 1]
Returns:
n-array: n-DOF de-normalized positions
"""
low, high = self.lower_limit, self.upper_limit
mean = (low + high) / 2.0
magnitude = (high - low) / 2.0
pos = pos * magnitude + mean
return pos
def _normalize_vel(self, vel):
"""
Normalizes raw joint velocities @vel
Args:
vel (n-array): n-DOF raw velocities to normalize
Returns:
n-array: n-DOF normalized velocities in range [-1, 1]
"""
return vel / self.max_velocity
def _denormalize_vel(self, vel):
"""
De-normalizes joint velocities @vel
Args:
vel (n-array): n-DOF normalized velocities in range [-1, 1]
Returns:
n-array: n-DOF de-normalized velocities
"""
return vel * self.max_velocity
def _normalize_effort(self, effort):
"""
Normalizes raw joint effort @effort
Args:
effort (n-array): n-DOF raw effort to normalize
Returns:
n-array: n-DOF normalized effort in range [-1, 1]
"""
return effort / self.max_effort
def _denormalize_effort(self, effort):
"""
De-normalizes joint effort @effort
Args:
effort (n-array): n-DOF normalized effort in range [-1, 1]
Returns:
n-array: n-DOF de-normalized effort
"""
return effort * self.max_effort
def set_pos(self, pos, normalized=False, drive=False):
"""
Set the position of this joint in metric space
Args:
pos (float or n-array of float): Set the position(s) for this joint. Can be a single float or 1-array of
float if the joint only has a single DOF, otherwise it should be an n-array of floats.
normalized (bool): Whether the input is normalized to [-1, 1] (in this case, the values will be
de-normalized first before being executed). Default is False
drive (bool): Whether the joint should be driven naturally via its motor to the position being set or
whether it should be instantaneously set. Default is False, corresponding to an
instantaneous setting of the position
"""
# Sanity checks -- make sure we're the correct control type if we're setting a target and that we're articulated
assert self.articulated, "Can only set position for articulated joints!"
if drive:
assert self._driven, "Can only use set_pos with drive=True if this joint is driven!"
assert self._control_type == ControlType.POSITION, \
"Trying to set joint position target, but control type is not position!"
# Standardize input
pos = np.array([pos]) if self._n_dof == 1 and not isinstance(pos, Iterable) else np.array(pos)
# Potentially de-normalize if the input is normalized
if normalized:
pos = self._denormalize_pos(pos)
# Set the DOF(s) in this joint
if not drive:
self._articulation_view.set_joint_positions(positions=pos, joint_indices=self.dof_indices)
PoseAPI.invalidate()
# Also set the target
self._articulation_view.set_joint_position_targets(positions=pos, joint_indices=self.dof_indices)
def set_vel(self, vel, normalized=False, drive=False):
"""
Set the velocity of this joint in metric space
Args:
vel (float or n-array of float): Set the velocity(s) for this joint. Can be a single float or 1-array of
float if the joint only has a single DOF, otherwise it should be an n-array of floats.
normalized (bool): Whether the input is normalized to [-1, 1] (in this case, the values will be
de-normalized first before being executed). Default is False
drive (bool): Whether the joint should be driven naturally via its motor to the velocity being set or
whether it should be instantaneously set. Default is False, corresponding to an
instantaneous setting of the velocity
"""
# Sanity checks -- make sure we're the correct control type if we're setting a target and that we're articulated
assert self.articulated, "Can only set velocity for articulated joints!"
if drive:
assert self._driven, "Can only use set_vel with drive=True if this joint is driven!"
assert self._control_type == ControlType.VELOCITY, \
f"Trying to set joint velocity target for joint {self.name}, but control type is not velocity!"
# Standardize input
vel = np.array([vel]) if self._n_dof == 1 and not isinstance(vel, Iterable) else np.array(vel)
# Potentially de-normalize if the input is normalized
if normalized:
vel = self._denormalize_vel(vel)
# Set the DOF(s) in this joint
if not drive:
self._articulation_view.set_joint_velocities(velocities=vel, joint_indices=self.dof_indices)
# Also set the target
self._articulation_view.set_joint_velocity_targets(velocities=vel, joint_indices=self.dof_indices)
def set_effort(self, effort, normalized=False):
"""
Set the effort of this joint in metric space
Args:
effort (float or n-array of float): Set the effort(s) for this joint. Can be a single float or 1-array of
float if the joint only has a single DOF, otherwise it should be an n-array of floats.
normalized (bool): Whether the input is normalized to [-1, 1] (in this case, the values will be
de-normalized first before being executed). Default is False
"""
# Sanity checks -- make sure that we're articulated (no control type check like position and velocity
# because we can't set effort targets) and that we're driven
assert self.articulated, "Can only set effort for articulated joints!"
# Standardize input
effort = np.array([effort]) if self._n_dof == 1 and not isinstance(effort, Iterable) else np.array(effort)
# Potentially de-normalize if the input is normalized
if normalized:
effort = self._denormalize_effort(effort)
# Set the DOF(s) in this joint
self._articulation_view.set_joint_efforts(efforts=effort, joint_indices=self.dof_indices)
def keep_still(self):
"""
Zero out all velocities for this prim
"""
self.set_vel(np.zeros(self.n_dof))
# If not driven, set torque equal to zero as well
if not self.driven:
self.set_effort(np.zeros(self.n_dof))
def _dump_state(self):
pos, vel, effort = self.get_state() if self.articulated else (np.array([]), np.array([]), np.array([]))
target_pos, target_vel = self.get_target() if self.articulated else (np.array([]), np.array([]))
return dict(
pos=pos,
vel=vel,
effort=effort,
target_pos=target_pos,
target_vel=target_vel,
)
def _load_state(self, state):
if self.articulated:
self.set_pos(state["pos"], drive=False)
self.set_vel(state["vel"], drive=False)
if self.driven:
self.set_effort(state["effort"])
if self._control_type == ControlType.POSITION:
self.set_pos(state["target_pos"], drive=True)
elif self._control_type == ControlType.VELOCITY:
self.set_vel(state["target_vel"], drive=True)
def _serialize(self, state):
return np.concatenate([
state["pos"],
state["vel"],
state["effort"],
state["target_pos"],
state["target_vel"],
]).astype(float)
def _deserialize(self, state):
# We deserialize deterministically by knowing the order of values -- pos, vel, effort
return dict(
pos=state[0:self.n_dof],
vel=state[self.n_dof:2*self.n_dof],
effort=state[2*self.n_dof:3*self.n_dof],
target_pos=state[3*self.n_dof:4*self.n_dof],
target_vel=state[4*self.n_dof:5*self.n_dof],
), 5*self.n_dof
def duplicate(self, prim_path):
# Cannot directly duplicate a joint prim
raise NotImplementedError("Cannot directly duplicate a joint prim!")
| 34,732 | Python | 39.623392 | 132 | 0.62179 |
StanfordVL/OmniGibson/omnigibson/configs/turtlebot_nav.yaml | env:
action_frequency: 60 # (int): environment executes action at the action_frequency rate
physics_frequency: 60 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
automatic_reset: false # (bool): whether to automatic reset after an episode finishes
flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array
flatten_obs_space: false # (bool): whether the observation space should be flattened when generated
use_external_obs: false # (bool): Whether to use external observations or not
initial_pos_z_offset: 0.1
external_sensors: null # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation
render:
viewer_width: 1280
viewer_height: 720
scene:
type: InteractiveTraversableScene
scene_model: Rs_int
trav_map_resolution: 0.1
default_erosion_radius: 0.0
trav_map_with_objects: true
num_waypoints: 1
waypoint_resolution: 0.2
load_object_categories: null
not_load_object_categories: null
load_room_types: null
load_room_instances: null
seg_map_resolution: 0.1
scene_source: OG
include_robots: true
robots:
- type: Turtlebot
obs_modalities: [scan, rgb, depth]
scale: 1.0
self_collision: false
action_normalize: true
action_type: continuous
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController
objects: []
task:
type: PointNavigationTask
robot_idn: 0
floor: 0
initial_pos: null
initial_quat: null
goal_pos: null
goal_tolerance: 0.36 # turtlebot bodywidth
goal_in_polar: false
path_range: [1.0, 10.0]
visualize_goal: true
visualize_path: false
n_vis_waypoints: 25
reward_type: geodesic
termination_config:
max_collisions: 500
max_steps: 500
fall_height: 0.03
reward_config:
r_potential: 1.0
r_collision: 0.1
r_pointgoal: 10.0
| 2,478 | YAML | 31.618421 | 307 | 0.668281 |
StanfordVL/OmniGibson/omnigibson/configs/fetch_behavior.yaml | env:
action_frequency: 60 # (int): environment executes action at the action_frequency rate
physics_frequency: 60 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
automatic_reset: false # (bool): whether to automatic reset after an episode finishes
flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array
flatten_obs_space: false # (bool): whether the observation space should be flattened when generated
use_external_obs: false # (bool): Whether to use external observations or not
initial_pos_z_offset: 0.1
external_sensors: # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation
- sensor_type: VisionSensor
modalities: [rgb, depth]
sensor_kwargs:
image_height: 128
image_width: 128
local_position: [0, 0, 1.0]
local_orientation: [0.707, 0.0, 0.0, 0.707]
render:
viewer_width: 1280
viewer_height: 720
scene:
type: InteractiveTraversableScene
scene_model: Rs_int
trav_map_resolution: 0.1
default_erosion_radius: 0.0
trav_map_with_objects: true
num_waypoints: 1
waypoint_resolution: 0.2
not_load_object_categories: null
load_room_types: null
load_room_instances: null
seg_map_resolution: 0.1
scene_source: OG
include_robots: true
robots:
- type: Fetch
obs_modalities: [scan, rgb, depth]
scale: 1.0
self_collision: false
action_normalize: true
action_type: continuous
grasping_mode: physical
rigid_trunk: false
default_trunk_offset: 0.365
default_arm_pose: diagonal30
default_reset_mode: tuck
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController
arm_0:
name: InverseKinematicsController
gripper_0:
name: MultiFingerGripperController
mode: binary
camera:
name: JointController
use_delta_commands: False
objects: []
task:
type: BehaviorTask
activity_name: prepare_sea_salt_soak
activity_definition_id: 0
activity_instance_id: 0
predefined_problem: null
online_object_sampling: false
debug_object_sampling: null
highlight_task_relevant_objects: false
termination_config:
max_steps: 500
reward_config:
r_potential: 1.0
| 2,875 | YAML | 32.057471 | 307 | 0.662261 |
StanfordVL/OmniGibson/omnigibson/configs/default_cfg.yaml | env:
action_frequency: 60 # (int): environment executes action at the action_frequency rate
physics_frequency: 60 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
render:
viewer_width: 1280
viewer_height: 720
scene:
type: Scene
robots: []
objects: []
task:
type: DummyTask
| 452 | YAML | 22.842104 | 125 | 0.643805 |
StanfordVL/OmniGibson/omnigibson/configs/fetch_primitives.yaml | env:
action_frequency: 30 # (int): environment executes action at the action_frequency rate
physics_frequency: 120 # (int): physics frequency (1 / physics_timestep for physx)
device: null # (None or str): specifies the device to be used if running on the gpu with torch backend
automatic_reset: false # (bool): whether to automatic reset after an episode finishes
flatten_action_space: false # (bool): whether to flatten the action space as a sinle 1D-array
flatten_obs_space: false # (bool): whether the observation space should be flattened when generated
use_external_obs: false # (bool): Whether to use external observations or not
initial_pos_z_offset: 0.1
external_sensors: null # (None or list): If specified, list of sensor configurations for external sensors to add. Should specify sensor "type" and any additional kwargs to instantiate the sensor. Each entry should be the kwargs passed to @create_sensor, in addition to position, orientation
render:
viewer_width: 1280
viewer_height: 720
scene:
type: InteractiveTraversableScene
scene_model: Rs_int
trav_map_resolution: 0.1
default_erosion_radius: 0.0
trav_map_with_objects: true
num_waypoints: 1
waypoint_resolution: 0.2
load_object_categories: null
not_load_object_categories: null
load_room_types: null
load_room_instances: null
load_task_relevant_only: false
seg_map_resolution: 0.1
scene_source: OG
include_robots: false
robots:
- type: Fetch
obs_modalities: [scan, rgb, depth]
scale: 1.0
self_collisions: true
action_normalize: false
action_type: continuous
grasping_mode: sticky
rigid_trunk: false
default_trunk_offset: 0.365
default_arm_pose: diagonal30
controller_config:
base:
name: DifferentialDriveController
arm_0:
name: InverseKinematicsController
command_input_limits: default
command_output_limits:
- [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]
- [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]
mode: pose_absolute_ori
kp: 300.0
gripper_0:
name: JointController
motor_type: position
command_input_limits: [-1, 1]
command_output_limits: null
use_delta_commands: true
camera:
name: JointController
use_delta_commands: False
objects: []
task:
type: DummyTask
scene_graph:
egocentric: true
full_obs: true
only_true: true
merge_parallel_edges: false | 2,579 | YAML | 33.864864 | 307 | 0.65917 |
StanfordVL/OmniGibson/omnigibson/configs/sensors/scan.yaml | # Example ScanSensor sensor config
# See omnigibson/sensors/__init__/create_sensor and omnigibson/sensors/scan_sensor for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
sensor_config:
ScanSensor:
modalities: [scan, occupancy_grid] # if specified, this will override the values in robots_config["obs_modalities"]
enabled: true
noise_type: null
noise_kwargs: null
sensor_kwargs:
# Basic LIDAR kwargs
min_range: 0.05
max_range: 10.0
horizontal_fov: 360.0
vertical_fov: 1.0
yaw_offset: 0.0
horizontal_resolution: 1.0
vertical_resolution: 1.0
rotation_rate: 0.0
draw_points: false
draw_lines: false
# Occupancy Grid kwargs
occupancy_grid_resolution: 128
occupancy_grid_range: 5.0
occupancy_grid_inner_radius: 0.5
occupancy_grid_local_link: null | 1,043 | YAML | 33.799999 | 122 | 0.66443 |
StanfordVL/OmniGibson/omnigibson/configs/sensors/vision.yaml | # Example VisionSensor sensor config
# See omnigibson/sensors/__init__/create_sensor and omnigibson/sensors/vision_sensor for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
sensor_config:
VisionSensor:
modalities: [rgb, depth] # if specified, this will override the values in robots_config["obs_modalities"]
enabled: true
noise_type: null
noise_kwargs: null
sensor_kwargs:
image_height: 128
image_width: 128 | 597 | YAML | 41.714283 | 112 | 0.730318 |
StanfordVL/OmniGibson/omnigibson/configs/controllers/joint.yaml | # Example Joint control config (shown for arm control)
# See omnigibson/controllers/joint_controller for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
controller_config:
arm:
name: JointController
motor_type: velocity
command_input_limits: default
command_output_limits: default
use_delta_commands: false
| 479 | YAML | 35.923074 | 91 | 0.757829 |
StanfordVL/OmniGibson/omnigibson/configs/controllers/dd.yaml | # Example Differential Drive control config (shown for base control)
# See omnigibson/controllers/dd_controller for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
controller_config:
base:
name: DifferentialDriveController
command_input_limits: default
command_output_limits: default | 443 | YAML | 43.399996 | 91 | 0.785553 |
StanfordVL/OmniGibson/omnigibson/configs/controllers/multi_finger_gripper.yaml | # Example Multi Finger Gripper control config (shown for arm control)
# See omnigibson/controllers/parallel_jaw_gripper_controller for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
controller_config:
arm:
name: MultiFingerGripperController
motor_type: position
command_input_limits: default
command_output_limits: default
mode: binary
open_qpos: null
closed_qpos: null
limit_tolerance: 0.01 | 582 | YAML | 37.866664 | 91 | 0.745704 |
StanfordVL/OmniGibson/omnigibson/configs/controllers/null_gripper.yaml | # Example Null Gripper control config (shown for gripper control)
# See omnigibson/controllers/null_gripper_controller for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
controller_config:
gripper:
name: NullJointController
# no other args to specify (this is a dummy controller) | 434 | YAML | 47.333328 | 91 | 0.78341 |
StanfordVL/OmniGibson/omnigibson/configs/controllers/ik.yaml | # Example IK config (shown for arm control)
# See omnigibson/controllers/ik_controller for docstring of arguments
# Arguments below are the arguments that should be specified by external user (other kwargs
# used in constructor are generated automatically at runtime)
robot:
controller_config:
arm:
name: InverseKinematicsController
command_input_limits: default
command_output_limits:
- [-0.2, -0.2, -0.2, -0.5, -0.5, -0.5]
- [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]
kv: 2.0
mode: pose_delta_ori
smoothing_filter_size: 2
workspace_pose_limiter: null
joint_range_tolerance: 0.01 | 638 | YAML | 36.588233 | 91 | 0.669279 |
StanfordVL/OmniGibson/omnigibson/configs/robots/husky.yaml | # Example Husky config
robot:
name: Husky
action_type: continuous
action_normalize: true
proprio_obs:
- joint_qpos
- joint_qvel
reset_joint_pos: null
base_name: null
scale: 1.0
self_collision: false
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: JointController | 479 | YAML | 18.999999 | 27 | 0.624217 |
StanfordVL/OmniGibson/omnigibson/configs/robots/freight.yaml | # Example Freight config
robot:
name: Freight
action_type: continuous
action_normalize: true
proprio_obs:
- dd_base_lin_vel
- dd_base_ang_vel
reset_joint_pos: null
base_name: null
scale: 1.0
self_collision: false
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController | 505 | YAML | 20.083332 | 39 | 0.635644 |
StanfordVL/OmniGibson/omnigibson/configs/robots/fetch.yaml | # Example Fetch config
robot:
name: Fetch
action_type: continuous
action_normalize: true
proprio_obs:
- eef_0_pos
- eef_0_quat
- trunk_qpos
- arm_0_qpos_sin
- arm_0_qpos_cos
- gripper_0_qpos
- grasp_main
reset_joint_pos: null
base_name: null
scale: 1.0
self_collision: true
grasping_mode: physical
rigid_trunk: false
default_trunk_offset: 0.365
default_arm_pose: vertical
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController
arm_0:
name: InverseKinematicsController
gripper_0:
name: MultiFingerGripperController
camera:
name: JointController | 839 | YAML | 20.538461 | 40 | 0.64124 |
StanfordVL/OmniGibson/omnigibson/configs/robots/turtlebot.yaml | # Example Turtlebot config
robot:
name: Turtlebot
action_type: continuous
action_normalize: true
proprio_obs:
- dd_base_lin_vel
- dd_base_ang_vel
reset_joint_pos: null
base_name: null
scale: 1.0
self_collision: false
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController | 509 | YAML | 20.249999 | 39 | 0.638507 |
StanfordVL/OmniGibson/omnigibson/configs/robots/locobot.yaml | # Example Locobot config
robot:
name: Locobot
action_type: continuous
action_normalize: true
proprio_obs:
- dd_base_lin_vel
- dd_base_ang_vel
reset_joint_pos: null
base_name: null
scale: 1.0
self_collision: false
sensor_config:
VisionSensor:
sensor_kwargs:
image_height: 128
image_width: 128
ScanSensor:
sensor_kwargs:
min_range: 0.05
max_range: 10.0
controller_config:
base:
name: DifferentialDriveController | 505 | YAML | 20.083332 | 39 | 0.635644 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/timeout.py | from omnigibson.termination_conditions.termination_condition_base import FailureCondition
class Timeout(FailureCondition):
"""
Timeout (failure condition)
Episode terminates if max_step steps have passed
Args:
max_steps (int): Maximum number of episode steps before timeout occurs
"""
def __init__(self, max_steps=500):
# Store internal vars
self._max_steps = max_steps
# Run super
super().__init__()
def _step(self, task, env, action):
# Terminate if number of steps passed exceeds threshold
return env.episode_steps >= self._max_steps
| 627 | Python | 26.304347 | 89 | 0.658692 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/falling.py | from omnigibson.termination_conditions.termination_condition_base import FailureCondition
class Falling(FailureCondition):
"""
Falling (failure condition) used for any navigation-type tasks
Episode terminates if the robot falls out of the world (i.e.: falls below the floor height by at least
@fall_height
Args:
robot_idn (int): robot identifier to evaluate condition with. Default is 0, corresponding to the first
robot added to the scene
fall_height (float): distance (m) > 0 below the scene's floor height under which the the robot is considered
to be falling out of the world
"""
def __init__(self, robot_idn=0, fall_height=0.03):
# Store internal vars
self._robot_idn = robot_idn
self._fall_height = fall_height
# Run super init
super().__init__()
def _step(self, task, env, action):
# Terminate if the specified robot is falling out of the scene
robot_z = env.scene.robots[self._robot_idn].get_position()[2]
return robot_z < (env.scene.get_floor_height() - self._fall_height)
| 1,124 | Python | 37.793102 | 116 | 0.662811 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/__init__.py | from omnigibson.termination_conditions.termination_condition_base import REGISTERED_TERMINATION_CONDITIONS, \
REGISTERED_SUCCESS_CONDITIONS, REGISTERED_FAILURE_CONDITIONS, BaseTerminationCondition
from omnigibson.termination_conditions.falling import Falling
from omnigibson.termination_conditions.max_collision import MaxCollision
from omnigibson.termination_conditions.point_goal import PointGoal
from omnigibson.termination_conditions.predicate_goal import PredicateGoal
from omnigibson.termination_conditions.reaching_goal import ReachingGoal
from omnigibson.termination_conditions.timeout import Timeout
| 613 | Python | 67.222215 | 109 | 0.880914 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/reaching_goal.py | from omnigibson.termination_conditions.termination_condition_base import SuccessCondition
import omnigibson.utils.transform_utils as T
class ReachingGoal(SuccessCondition):
"""
ReachingGoal (success condition) used for reaching-type tasks
Episode terminates if reaching goal is reached within @distance_tol by the @robot_idn robot's base
Args:
Args:
robot_idn (int): robot identifier to evaluate point goal with. Default is 0, corresponding to the first
robot added to the scene
distance_tol (float): Distance (m) tolerance between goal position and @robot_idn's robot eef position
that is accepted as a success
"""
def __init__(self, robot_idn=0, distance_tol=0.5):
self._robot_idn = robot_idn
self._distance_tol = distance_tol
# Run super init
super().__init__()
def _step(self, task, env, action):
# Terminate if point goal is reached (distance below threshold)
return T.l2_distance(env.scene.robots[self._robot_idn].get_eef_position(), task.goal_pos) < \
self._distance_tol
| 1,120 | Python | 36.366665 | 111 | 0.675893 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/point_goal.py | from omnigibson.termination_conditions.termination_condition_base import SuccessCondition
import omnigibson.utils.transform_utils as T
class PointGoal(SuccessCondition):
"""
PointGoal (success condition) used for PointNavFixed/RandomTask
Episode terminates if point goal is reached within @distance_tol by the @robot_idn robot's base
Args:
robot_idn (int): robot identifier to evaluate point goal with. Default is 0, corresponding to the first
robot added to the scene
distance_tol (float): Distance (m) tolerance between goal position and @robot_idn's robot base position
that is accepted as a success
distance_axes (str): Which axes to calculate distances when calculating the goal. Any combination of "x",
"y", and "z" is valid (e.g.: "xy" or "xyz" or "y")
"""
def __init__(self, robot_idn=0, distance_tol=0.5, distance_axes="xyz"):
self._robot_idn = robot_idn
self._distance_tol = distance_tol
self._distance_axes = [i for i, axis in enumerate("xyz") if axis in distance_axes]
# Run super init
super().__init__()
def _step(self, task, env, action):
# Make sure task is of type PointNavigation -- we import at runtime to avoid circular imports
from omnigibson.tasks.point_navigation_task import PointNavigationTask
assert isinstance(task, PointNavigationTask), \
f"Cannot use {self.__class__.__name__} with a non-PointNavigationTask task instance!"
# Terminate if point goal is reached (distance below threshold)
return T.l2_distance(task.get_current_pos(env)[self._distance_axes], task.get_goal_pos()[self._distance_axes]) \
< self._distance_tol
| 1,748 | Python | 48.971427 | 120 | 0.677918 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/termination_condition_base.py | from abc import ABCMeta, abstractmethod
from omnigibson.utils.python_utils import classproperty, Registerable
REGISTERED_TERMINATION_CONDITIONS = dict()
REGISTERED_SUCCESS_CONDITIONS = dict()
REGISTERED_FAILURE_CONDITIONS = dict()
def register_success_condition(cls):
if cls.__name__ not in REGISTERED_SUCCESS_CONDITIONS:
REGISTERED_SUCCESS_CONDITIONS[cls.__name__] = cls
def register_failure_condition(cls):
if cls.__name__ not in REGISTERED_FAILURE_CONDITIONS:
REGISTERED_FAILURE_CONDITIONS[cls.__name__] = cls
class BaseTerminationCondition(Registerable, metaclass=ABCMeta):
"""
Base TerminationCondition class
Condition-specific _step() method is implemented in subclasses
"""
def __init__(self):
# Initialize internal vars that will be filled in at runtime
self._done = None
@abstractmethod
def _step(self, task, env, action):
"""
Step the termination condition and return whether the episode should terminate. Overwritten by subclasses.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
bool: whether environment should terminate or not
"""
raise NotImplementedError()
def step(self, task, env, action):
"""
Step the termination condition and return whether the episode should terminate.
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
action (n-array): 1D flattened array of actions executed by all agents in the environment
Returns:
2-tuple:
- bool: whether environment should terminate or not
- bool: whether a success was reached under this termination condition
"""
# Step internally and store the done state internally as well
self._done = self._step(task=task, env=env, action=action)
# We are successful if done is True AND this is a success condition
success = self._done and self._terminate_is_success
return self._done, success
def reset(self, task, env):
"""
Termination condition-specific reset
Args:
task (BaseTask): Task instance
env (Environment): Environment instance
"""
# Reset internal vars
self._done = None
@property
def done(self):
"""
Returns:
bool: Whether this termination condition has triggered or not
"""
assert self._done is not None, "At least one step() must occur before done can be calculated!"
return self._done
@property
def success(self):
"""
Returns:
bool: Whether this termination condition has been evaluated as a success or not
"""
assert self._done is not None, "At least one step() must occur before success can be calculated!"
return self._done and self._terminate_is_success
@classproperty
def _terminate_is_success(cls):
"""
Returns:
bool: Whether this termination condition corresponds to a success
"""
raise NotImplementedError()
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("BaseTerminationCondition")
return classes
@classproperty
def _cls_registry(cls):
# Global registry
global REGISTERED_TERMINATION_CONDITIONS
return REGISTERED_TERMINATION_CONDITIONS
class SuccessCondition(BaseTerminationCondition):
"""
Termination condition corresponding to a success
"""
def __init_subclass__(cls, **kwargs):
# Register as part of locomotion controllers
super().__init_subclass__(**kwargs)
register_success_condition(cls)
@classproperty
def _terminate_is_success(cls):
# Done --> success
return True
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("SuccessCondition")
return classes
class FailureCondition(BaseTerminationCondition):
"""
Termination condition corresponding to a failure
"""
def __init_subclass__(cls, **kwargs):
# Register as part of locomotion controllers
super().__init_subclass__(**kwargs)
register_failure_condition(cls)
@classproperty
def _terminate_is_success(cls):
# Done --> not success
return False
@classproperty
def _do_not_register_classes(cls):
# Don't register this class since it's an abstract template
classes = super()._do_not_register_classes
classes.add("FailureCondition")
return classes
| 5,049 | Python | 30.962025 | 114 | 0.643494 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/max_collision.py | from omnigibson.termination_conditions.termination_condition_base import FailureCondition
from omnigibson.object_states.contact_bodies import ContactBodies
class MaxCollision(FailureCondition):
"""
MaxCollision (failure condition) used for navigation tasks
Episode terminates if the robot has collided more than max_collisions_allowed times
Note that we ignore collisions with any floor objects.
Args:
robot_idn (int): robot identifier to evaluate collision checking with. Default is 0, corresponding to the first
robot added to the scene
ignore_self_collisions (bool): Whether to ignore robot self-collisions or not
max_collisions (int): Maximum number of collisions allowed for any robots in the scene before a termination
is triggered
"""
def __init__(self, robot_idn=0, ignore_self_collisions=True, max_collisions=500):
self._robot_idn = robot_idn
self._ignore_self_collisions = ignore_self_collisions
self._max_collisions = max_collisions
self._n_collisions = 0
# Run super init
super().__init__()
def reset(self, task, env):
# Call super first
super().reset(task, env)
# Also reset collision counter
self._n_collisions = 0
def _step(self, task, env, action):
# Terminate if the robot has collided more than self._max_collisions times
robot = env.robots[self._robot_idn]
floors = list(env.scene.object_registry("category", "floors", []))
ignore_objs = floors if self._ignore_self_collisions is None else floors + [robot]
in_contact = len(env.robots[self._robot_idn].states[ContactBodies].get_value(ignore_objs=tuple(ignore_objs))) > 0
self._n_collisions += int(in_contact)
return self._n_collisions > self._max_collisions
| 1,855 | Python | 42.16279 | 121 | 0.68248 |
StanfordVL/OmniGibson/omnigibson/termination_conditions/predicate_goal.py | from bddl.activity import evaluate_goal_conditions
from omnigibson.termination_conditions.termination_condition_base import SuccessCondition
class PredicateGoal(SuccessCondition):
"""
PredicateGoal (success condition) used for BehaviorTask
Episode terminates if all the predicates are satisfied
Args:
goal_fcn (method): function for calculating goal(s). Function signature should be:
goals = goal_fcn()
where @goals is a list of bddl.condition_evaluation.HEAD -- compiled BDDL goal conditions
"""
def __init__(self, goal_fcn):
# Store internal vars
self._goal_fcn = goal_fcn
self._goal_status = None
# Run super
super().__init__()
def reset(self, task, env):
# Run super first
super().reset(task, env)
# Reset status
self._goal_status = {"satisfied": [], "unsatisfied": []}
def _step(self, task, env, action):
# Terminate if all goal conditions are met in the task
done, self._goal_status = evaluate_goal_conditions(self._goal_fcn())
return done
@property
def goal_status(self):
"""
Returns:
dict: Current goal status for the active predicate(s), mapping "satisfied" and "unsatisfied" to a list
of the predicates matching either of those conditions
"""
return self._goal_status
| 1,417 | Python | 29.826086 | 114 | 0.631616 |
StanfordVL/OmniGibson/omnigibson/object_states/sliceable.py | import numpy as np
from omnigibson.object_states.object_state_base import BaseObjectRequirement
class SliceableRequirement(BaseObjectRequirement):
"""
Class for sanity checking objects that request the "sliceable" ability
"""
@classmethod
def is_compatible(cls, obj, **kwargs):
# Avoid circular imports
from omnigibson.objects.dataset_object import DatasetObject
# Make sure object is dataset object
if not isinstance(obj, DatasetObject):
return False, f"Only compatible with DatasetObject, but {obj} is of type {type(obj)}"
# Check to make sure object parts are properly annotated in this object's metadata
if not obj.metadata["object_parts"]:
return False, f"Missing required metadata 'object_parts'."
return True, None
@classmethod
def is_compatible_asset(cls, prim, **kwargs):
# Check to make sure object parts are properly annotated in this object's metadata
metadata = prim.GetCustomData().get("metadata", dict())
if not metadata.get("object_parts", None):
return False, f"Missing required metadata 'object_parts'."
return True, None
| 1,198 | Python | 37.677418 | 97 | 0.683639 |
StanfordVL/OmniGibson/omnigibson/object_states/adjacency.py | from collections import namedtuple
import numpy as np
import omnigibson as og
from omnigibson.macros import create_module_macros
from omnigibson.object_states.aabb import AABB
from omnigibson.object_states.object_state_base import AbsoluteObjectState
from omnigibson.utils.sampling_utils import raytest_batch, raytest
from omnigibson.utils.constants import PrimType
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.MAX_DISTANCE_VERTICAL = 5.0
m.MAX_DISTANCE_HORIZONTAL = 5.0
# How many 2-D bases to try during horizontal adjacency check. When 1, only the standard axes will be considered.
# When 2, standard axes + 45 degree rotated will be considered. The tried axes will be equally spaced. The higher
# this number, the lower the possibility of false negatives in Inside and NextTo.
m.HORIZONTAL_AXIS_COUNT = 5
AxisAdjacencyList = namedtuple("AxisAdjacencyList", ("positive_neighbors", "negative_neighbors"))
def flatten_planes(planes):
# Converts the body-by-plane logic to a flat body-by-axis setup,
# for when we don't care about the axes' relationship with each other.
return (axis for axes_by_plane in planes for axis in axes_by_plane)
def get_equidistant_coordinate_planes(n_planes):
"""Given a number, sample that many equally spaced coordinate planes.
The samples will cover all 360 degrees (although rotational symmetry
is assumed, e.g. if you take into account the axis index and the
positive/negative directions, only 1/4 of the possible coordinate (1 quadrant, np.pi / 2.0)
planes will be sampled: the ones where the first axis' positive direction
is in the first quadrant).
Args:
n_planes (int): number of planes to sample
Returns:
3D-array: (n_planes, 2, 3) array where the first dimension
is the sampled plane index, the second dimension is the axis index
(0/1), and the third dimension is the 3-D world-coordinate vector
corresponding to the axis.
"""
# Compute the positive directions of the 1st axis of each plane.
first_axis_angles = np.linspace(0, np.pi / 2, n_planes)
first_axes = np.stack(
[np.cos(first_axis_angles), np.sin(first_axis_angles), np.zeros_like(first_axis_angles)], axis=1
)
# Compute the positive directions of the 2nd axes. These axes are
# orthogonal to both their corresponding first axes and to the Z axis.
second_axes = np.cross([0, 0, 1], first_axes)
# Return the axes in the shape (n_planes, 2, 3)
return np.stack([first_axes[:, None, :], second_axes[:, None, :]], axis=1)
def compute_adjacencies(obj, axes, max_distance, use_aabb_center=True):
"""
Given an object and a list of axes, find the adjacent objects in the axes'
positive and negative directions.
If @obj is of PrimType.CLOTH, then adjacent objects are found with respect to the
@obj's centroid particle position
Args:
obj (StatefulObject): The object to check adjacencies of.
axes (2D-array): (n_axes, 3) array defining the axes to check in.
Note that each axis will be checked in both its positive and negative direction.
use_aabb_center (bool): If True and @obj is not of PrimType.CLOTH, will shoot rays from @obj's aabb center.
Otherwise, will dynamically compute starting points based on the requested @axes
Returns:
list of AxisAdjacencyList: List of length len(axes) containing the adjacencies.
"""
# Get vectors for each of the axes' directions.
# The ordering is axes1+, axis1-, axis2+, axis2- etc.
directions = np.empty((len(axes) * 2, 3))
directions[0::2] = axes
directions[1::2] = -axes
# Prepare this object's info for ray casting.
if obj.prim_type == PrimType.CLOTH:
ray_starts = np.tile(obj.root_link.centroid_particle_position, (len(directions), 1))
else:
aabb_lower, aabb_higher = obj.states[AABB].get_value()
object_position = (aabb_lower + aabb_higher) / 2.0
ray_starts = np.tile(object_position, (len(directions), 1))
if not use_aabb_center:
# Dynamically compute start points by iterating over the directions and pre-shooting rays from
# which to shoot back from
# For a given direction, we go in the negative (opposite) direction to the edge of the object extent,
# and then proceed with an additional offset before shooting rays
shooting_offset = 0.01
direction_half_extent = directions * (aabb_higher - aabb_lower).reshape(1, 3) / 2.0
pre_start = object_position.reshape(1, 3) + (direction_half_extent + directions * shooting_offset)
pre_end = object_position.reshape(1, 3) - direction_half_extent
idx = 0
obj_link_paths = {link.prim_path for link in obj.links.values()}
def _ray_callback(hit):
# Check for self-hit -- if so, record the position and terminate early
should_continue = True
if hit.rigid_body in obj_link_paths:
ray_starts[idx] = np.array(hit.position)
should_continue = False
return should_continue
for ray_start, ray_end in zip(pre_start, pre_end):
raytest(
start_point=ray_start,
end_point=ray_end,
only_closest=False,
callback=_ray_callback,
)
idx += 1
# Prepare the rays to cast.
ray_endpoints = ray_starts + (directions * max_distance)
# Cast time.
prim_paths = obj.link_prim_paths
ray_results = raytest_batch(
ray_starts,
ray_endpoints,
only_closest=False,
ignore_bodies=prim_paths,
ignore_collisions=prim_paths
)
# Add the results to the appropriate lists
# For now, we keep our result in the dimensionality of (direction, hit_object_order).
# We convert the hit link into unique objects encountered
objs_by_direction = []
for results in ray_results:
unique_objs = set()
for result in results:
# Check if the inferred hit object is not None, we add it to our set
obj_prim_path = "/".join(result["rigidBody"].split("/")[:-1])
obj = og.sim.scene.object_registry("prim_path", obj_prim_path, None)
if obj is not None:
unique_objs.add(obj)
objs_by_direction.append(unique_objs)
# Reshape so that these have the following indices:
# (axis_idx, direction-one-or-zero, hit_idx)
objs_by_axis = [
AxisAdjacencyList(positive_neighbors, negative_neighbors)
for positive_neighbors, negative_neighbors in zip(objs_by_direction[::2], objs_by_direction[1::2])
]
return objs_by_axis
class VerticalAdjacency(AbsoluteObjectState):
"""
State representing the object's vertical adjacencies.
Value is a AxisAdjacencyList object.
"""
def _get_value(self):
# Call the adjacency computation with th Z axis.
bodies_by_axis = compute_adjacencies(self.obj, np.array([[0, 0, 1]]), m.MAX_DISTANCE_VERTICAL, use_aabb_center=False)
# Return the adjacencies from the only axis we passed in.
return bodies_by_axis[0]
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(AABB)
return deps
# Nothing needs to be done to save/load adjacency since it will happen due to pose caching.
class HorizontalAdjacency(AbsoluteObjectState):
"""
State representing the object's horizontal adjacencies in a preset number of directions.
The HorizontalAdjacency state returns adjacency lists for equally spaced coordinate planes.
Each plane consists of 2 orthogonal axes, and adjacencies are checked for both the positive
and negative directions of each axis.
The value of the state is List[List[AxisAdjacencyList]], where the list dimensions are
m.HORIZONTAL_AXIS_COUNT and 2. The first index is used to choose between the different planes,
the second index to choose between the orthogonal axes of that plane. Given a plane/axis combo,
the item in the list is a AxisAdjacencyList containing adjacencies in both directions of the
axis.
If the idea of orthogonal bases is not relevant (and your use case simply requires checking
adjacencies in each direction), the flatten_planes() function can be used on the state value
to reduce the output to List[AxisAdjacencyList], a list of adjacency lists for all
2 * m.HORIZONTAL_AXIS_COUNT directions.
"""
def _get_value(self):
coordinate_planes = get_equidistant_coordinate_planes(m.HORIZONTAL_AXIS_COUNT)
# Flatten the axis dimension and input into compute_adjacencies.
bodies_by_axis = compute_adjacencies(self.obj, coordinate_planes.reshape(-1, 3), m.MAX_DISTANCE_HORIZONTAL, use_aabb_center=True)
# Now reshape the bodies_by_axis to group by coordinate planes.
bodies_by_plane = list(zip(bodies_by_axis[::2], bodies_by_axis[1::2]))
# Return the adjacencies.
return bodies_by_plane
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(AABB)
return deps
# Nothing needs to be done to save/load adjacency since it will happen due to pose caching.
| 9,467 | Python | 41.267857 | 137 | 0.672652 |
StanfordVL/OmniGibson/omnigibson/object_states/frozen.py | import numpy as np
from omnigibson.macros import create_module_macros
from omnigibson.object_states.object_state_base import AbsoluteObjectState, BooleanStateMixin
from omnigibson.object_states.temperature import Temperature
# Create settings for this module
m = create_module_macros(module_path=__file__)
m.DEFAULT_FREEZE_TEMPERATURE = 0.0
# When an object is set as frozen, we will sample it between
# the freeze temperature and these offsets.
m.FROZEN_SAMPLING_RANGE_MAX = -10.0
m.FROZEN_SAMPLING_RANGE_MIN = -50.0
class Frozen(AbsoluteObjectState, BooleanStateMixin):
def __init__(self, obj, freeze_temperature=m.DEFAULT_FREEZE_TEMPERATURE):
super(Frozen, self).__init__(obj)
self.freeze_temperature = freeze_temperature
@classmethod
def get_dependencies(cls):
deps = super().get_dependencies()
deps.add(Temperature)
return deps
def _set_value(self, new_value):
if new_value:
temperature = np.random.uniform(
self.freeze_temperature + m.FROZEN_SAMPLING_RANGE_MIN,
self.freeze_temperature + m.FROZEN_SAMPLING_RANGE_MAX,
)
return self.obj.states[Temperature].set_value(temperature)
else:
# We'll set the temperature just one degree above freezing. Hopefully the object
# isn't in a fridge.
return self.obj.states[Temperature].set_value(self.freeze_temperature + 1.0)
def _get_value(self):
return self.obj.states[Temperature].get_value() <= self.freeze_temperature
@staticmethod
def get_texture_change_params():
# Increase all channels by 0.3 (to make it white)
albedo_add = 0.3
# No final scaling
diffuse_tint = (1.0, 1.0, 1.0)
return albedo_add, diffuse_tint
# Nothing needs to be done to save/load Frozen since it will happen due to temperature caching.
| 1,910 | Python | 34.388888 | 99 | 0.676963 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.