file_path
stringlengths
21
207
content
stringlengths
5
1.02M
size
int64
5
1.02M
lang
stringclasses
9 values
avg_line_length
float64
2.5
98.5
max_line_length
int64
5
993
alphanum_fraction
float64
0.27
0.91
StanfordVL/OmniGibson/docker/README.md
# Requirements - Modern Linux distribution (Ubuntu 20.04, Fedora 36, etc.) - RTX capable Nvidia graphics card (20 series or newer,) - Up-to-date NVIDIA drivers # Usage **The below instructions concern the usage of OmniGibson containers with self-built images. Please see the BEHAVIOR-1K docs for instructions on how to pull and run a cloud image.** 1. Set up the NVIDIA Docker Runtime and login to the NVIDIA Container Registry See [here](https://www.pugetsystems.com/labs/hpc/how-to-setup-nvidia-docker-and-ngc-registry-on-your-workstation-part-4-accessing-the-ngc-registry-1115/) for details. 2. Build the container. **From the OmniGibson root**, run: `./docker/build_docker.sh` 3. Run the container * To get a shell inside a container with GUI: `sudo ./docker/run_docker_gui.sh` * To get a jupyter notebook: `sudo ./docker/run_docker_notebook.sh` * To get access to a shell inside a headless container `sudo ./docker/run_docker.sh` # Development To push a Docker container, run: `sudo ./docker/push_docker.sh`
1,019
Markdown
45.363634
180
0.758587
StanfordVL/OmniGibson/tests/test_symbolic_primitives.py
import os import pytest import yaml from omnigibson.macros import gm gm.USE_GPU_DYNAMICS = True gm.USE_FLATCACHE = True import omnigibson as og from omnigibson import object_states from omnigibson.action_primitives.symbolic_semantic_action_primitives import SymbolicSemanticActionPrimitiveSet, SymbolicSemanticActionPrimitives from omnigibson.systems import get_system def start_env(): og.sim.stop() config = { "env": { "initial_pos_z_offset": 0.1 }, "render": { "viewer_width": 1280, "viewer_height": 720 }, "scene": { "type": "InteractiveTraversableScene", "scene_model": "Wainscott_0_int", "load_object_categories": ["floors", "walls", "countertop", "fridge", "sink", "stove"], "scene_source": "OG", }, "robots": [ { "type": "Fetch", "obs_modalities": [ "scan", "rgb", "depth" ], "scale": 1, "self_collisions": True, "action_normalize": False, "action_type": "continuous", "grasping_mode": "sticky", "disable_grasp_handling": True, "rigid_trunk": False, "default_trunk_offset": 0.365, "default_arm_pose": "diagonal30", "default_reset_mode": "tuck", "controller_config": { "base": { "name": "DifferentialDriveController" }, "arm_0": { "name": "JointController", "motor_type": "position", "command_input_limits": None, "command_output_limits": None, "use_delta_commands": False }, "gripper_0": { "name": "JointController", "motor_type": "position", "command_input_limits": [ -1, 1 ], "command_output_limits": None, "use_delta_commands": True, }, "camera": { "name": "JointController", "use_delta_commands": False } } } ], "objects": [ { "type": "DatasetObject", "name": "pan", "category": "frying_pan", "model": "mhndon", "position": [5.31, 10.75, 1.], }, { "type": "DatasetObject", "name": "knife", "category": "carving_knife", "model": "awvoox", "position": [5.31, 10.75, 1.2], }, { "type": "DatasetObject", "name": "apple", "category": "apple", "model": "agveuv", "position": [4.75, 10.75, 1.], "bounding_box": [0.098, 0.098, 0.115] }, { "type": "DatasetObject", "name": "sponge", "category": "sponge", "model": "qewotb", "position": [4.75, 10.75, 1.], }, ] } env = og.Environment(configs=config) return env @pytest.fixture(scope="module") def shared_env(): """Load the environment just once using module scope.""" return start_env() @pytest.fixture(scope="function") def env(shared_env): """Reset the environment before each test function.""" og.sim.scene.reset() return shared_env @pytest.fixture def robot(env): return env.robots[0] @pytest.fixture def prim_gen(env): return SymbolicSemanticActionPrimitives(env) @pytest.fixture def countertop(env): return next(iter(env.scene.object_registry("category", "countertop"))) @pytest.fixture def fridge(env): return next(iter(env.scene.object_registry("category", "fridge"))) @pytest.fixture def stove(env): return next(iter(env.scene.object_registry("category", "stove"))) @pytest.fixture def sink(env): return next(iter(env.scene.object_registry("category", "sink"))) @pytest.fixture def pan(env): return next(iter(env.scene.object_registry("category", "frying_pan"))) @pytest.fixture def apple(env): return next(iter(env.scene.object_registry("category", "apple"))) @pytest.fixture def sponge(env): return next(iter(env.scene.object_registry("category", "sponge"))) @pytest.fixture def knife(env): return next(iter(env.scene.object_registry("category", "carving_knife"))) class TestSymbolicPrimitives: @pytest.mark.skip(reason="primitives are broken") def test_in_hand_state(self, env, robot, prim_gen, apple): assert not robot.states[object_states.IsGrasping].get_value(apple) for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple): env.step(action) assert robot.states[object_states.IsGrasping].get_value(apple) # def test_navigate(): # pass @pytest.mark.skip(reason="primitives are broken") def test_open(self, env, prim_gen, fridge): assert not fridge.states[object_states.Open].get_value() for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.OPEN, fridge): env.step(action) assert fridge.states[object_states.Open].get_value() @pytest.mark.skip(reason="primitives are broken") def test_close(self, env, prim_gen, fridge): fridge.states[object_states.Open].set_value(True) assert fridge.states[object_states.Open].get_value() for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.CLOSE, fridge): env.step(action) assert not fridge.states[object_states.Open].get_value() @pytest.mark.skip(reason="primitives are broken") def test_place_inside(self, env, prim_gen, apple, fridge): assert not apple.states[object_states.Inside].get_value(fridge) assert not fridge.states[object_states.Open].get_value() for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.OPEN, fridge): env.step(action) for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple): env.step(action) for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_INSIDE, fridge): env.step(action) assert apple.states[object_states.Inside].get_value(fridge) @pytest.mark.skip(reason="primitives are broken") def test_place_ontop(self, env, prim_gen, apple, pan): assert not apple.states[object_states.OnTop].get_value(pan) for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple): env.step(action) for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, pan): env.step(action) assert apple.states[object_states.OnTop].get_value(pan) @pytest.mark.skip(reason="primitives are broken") def test_toggle_on(self, env, prim_gen, stove): assert not stove.states[object_states.ToggledOn].get_value() for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, stove): env.step(action) assert stove.states[object_states.ToggledOn].get_value() @pytest.mark.skip(reason="primitives are broken") def test_soak_under(self, env, prim_gen, robot, sponge, sink): water_system = get_system("water", force_active=True) assert not sponge.states[object_states.Saturated].get_value(water_system) assert not sink.states[object_states.ToggledOn].get_value() # First toggle on the sink for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, sink): env.step(action) assert sink.states[object_states.ToggledOn].get_value() # Then grasp the sponge for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, sponge): env.step(action) assert robot.states[object_states.IsGrasping].get_value(sponge) # Then soak the sponge under the water for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.SOAK_UNDER, sink): env.step(action) assert sponge.states[object_states.Saturated].get_value(water_system) # def test_soak_inside(): # pass @pytest.mark.skip(reason="primitives are broken") def test_wipe(self, env, prim_gen, sponge, sink, countertop): # Some pre-assertions water_system = get_system("water", force_active=True) assert not sponge.states[object_states.Saturated].get_value(water_system) assert not sink.states[object_states.ToggledOn].get_value() # Dirty the countertop as the setup mud_system = get_system("mud", force_active=True) countertop.states[object_states.Covered].set_value(mud_system, True) assert countertop.states[object_states.Covered].get_value(mud_system) # First toggle on the sink for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.TOGGLE_ON, sink): env.step(action) assert sink.states[object_states.ToggledOn].get_value() # Then grasp the sponge for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, sponge): env.step(action) assert robot.states[object_states.IsGrasping].get_value(sponge) # Then soak the sponge under the water for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.SOAK_UNDER, sink): env.step(action) assert sponge.states[object_states.Saturated].get_value(water_system) # Wipe the countertop with the sponge for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.WIPE, countertop): env.step(action) assert not countertop.states[object_states.Covered].get_value(mud_system) @pytest.mark.skip(reason="primitives are broken") def test_cut(self, env, prim_gen, apple, knife, countertop): # assert not apple.states[object_states.Cut].get_value(knife) print("Grasping knife") for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, knife): env.step(action) for _ in range(60): env.step(prim_gen._empty_action()) print("Cutting apple") for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.CUT, apple): env.step(action) for _ in range(60): env.step(prim_gen._empty_action()) print("Putting knife back on countertop") for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.PLACE_ON_TOP, countertop): env.step(action) def test_persistent_sticky_grasping(self, env, robot, prim_gen, apple): assert not robot.states[object_states.IsGrasping].get_value(apple) for action in prim_gen.apply_ref(SymbolicSemanticActionPrimitiveSet.GRASP, apple): env.step(action) assert robot.states[object_states.IsGrasping].get_value(apple) state = og.sim.dump_state() og.sim.stop() og.sim.play() og.sim.load_state(state) assert robot.states[object_states.IsGrasping].get_value(apple) for _ in range(10): env.step(prim_gen._empty_action()) assert robot.states[object_states.IsGrasping].get_value(apple) # def test_place_near_heating_element(): # pass # def test_wait_for_cooked(): # pass def teardown_class(cls): og.sim.clear() def main(): env = start_env() prim_gen = SymbolicSemanticActionPrimitives(env) apple = next(iter(env.scene.object_registry("category", "apple"))) knife = next(iter(env.scene.object_registry("category", "carving_knife"))) countertop = next(iter(env.scene.object_registry("category", "countertop"))) print("Will start in 3 seconds") for _ in range(180): env.step(prim_gen._empty_action()) try: test_cut(env, prim_gen, apple, knife, countertop) except: raise while True: og.sim.step() if __name__ == "__main__": main()
11,284
Python
32.888889
145
0.663506
StanfordVL/OmniGibson/tests/test_transition_rules.py
from omnigibson.macros import macros as m from omnigibson.object_states import * from omnigibson.systems import get_system, is_physical_particle_system, is_visual_particle_system from omnigibson.utils.constants import PrimType from omnigibson.utils.physx_utils import apply_force_at_pos, apply_torque import omnigibson.utils.transform_utils as T from omnigibson.objects import DatasetObject from omnigibson.transition_rules import REGISTERED_RULES import omnigibson as og from omnigibson.macros import macros as m from scipy.spatial.transform import Rotation as R from utils import og_test, get_random_pose, place_objA_on_objB_bbox, place_obj_on_floor_plane, retrieve_obj_cfg, remove_all_systems import pytest import numpy as np @pytest.mark.skip(reason="dryer is not fillable yet.") @og_test def test_dryer_rule(): assert len(REGISTERED_RULES) > 0, "No rules registered!" clothes_dryer = og.sim.scene.object_registry("name", "clothes_dryer") remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel") bowl = og.sim.scene.object_registry("name", "bowl") water = get_system("water") place_obj_on_floor_plane(clothes_dryer) og.sim.step() # Place the two objects inside the dryer remover_dishtowel.set_position_orientation([0.0, 0.0, 0.4], [0, 0, 0, 1]) bowl.set_position_orientation([0.0, 0.0, 0.5], [0, 0, 0, 1]) og.sim.step() assert remover_dishtowel.states[Saturated].set_value(water, True) assert bowl.states[Covered].set_value(water, True) og.sim.step() assert remover_dishtowel.states[Saturated].get_value(water) assert clothes_dryer.states[Contains].get_value(water) # The rule will not execute if Open is True clothes_dryer.states[Open].set_value(True) og.sim.step() assert remover_dishtowel.states[Saturated].get_value(water) assert clothes_dryer.states[Contains].get_value(water) clothes_dryer.states[Open].set_value(False) clothes_dryer.states[ToggledOn].set_value(True) # The rule will execute when Open is False and ToggledOn is True og.sim.step() # Need to take one more step for the state setters to take effect og.sim.step() assert not remover_dishtowel.states[Saturated].get_value(water) assert not clothes_dryer.states[Contains].get_value(water) # Clean up remove_all_systems() @og_test def test_washer_rule(): assert len(REGISTERED_RULES) > 0, "No rules registered!" baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") washer = og.sim.scene.object_registry("name", "washer") remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel") bowl = og.sim.scene.object_registry("name", "bowl") water = get_system("water") dust = get_system("dust") # always remove salt = get_system("salt") # always remove (not explicitly specified) rust = get_system("rust") # never remove spray_paint = get_system("spray_paint") # requires acetone acetone = get_system("acetone") # solvent for spray paint cooking_oil = get_system("cooking_oil") # requires vinegar, lemon_juice, vinegar, etc. place_obj_on_floor_plane(washer) og.sim.step() # Place the two objects inside the washer # (Hacky) use baking_sheet as a stepping stone to elevate the objects so that they are inside the container volume. baking_sheet.set_position_orientation([0.0, 0.0, 0.04], T.euler2quat([np.pi, 0, 0])) remover_dishtowel.set_position_orientation([0.0, 0.0, 0.05], [0, 0, 0, 1]) bowl.set_position_orientation([0.10, 0.0, 0.08], [0, 0, 0, 1]) og.sim.step() assert bowl.states[Covered].set_value(dust, True) assert bowl.states[Covered].set_value(salt, True) assert bowl.states[Covered].set_value(rust, True) assert bowl.states[Covered].set_value(spray_paint, True) assert bowl.states[Covered].set_value(acetone, True) assert bowl.states[Covered].set_value(cooking_oil, True) assert not remover_dishtowel.states[Saturated].get_value(water) assert not bowl.states[Covered].get_value(water) # The rule will not execute if Open is True washer.states[Open].set_value(True) og.sim.step() assert bowl.states[Covered].get_value(dust) assert bowl.states[Covered].get_value(salt) assert bowl.states[Covered].get_value(rust) assert bowl.states[Covered].get_value(spray_paint) assert bowl.states[Covered].get_value(acetone) assert bowl.states[Covered].get_value(cooking_oil) assert not remover_dishtowel.states[Saturated].get_value(water) assert not bowl.states[Covered].get_value(water) washer.states[Open].set_value(False) washer.states[ToggledOn].set_value(True) # The rule will execute when Open is False and ToggledOn is True og.sim.step() # Need to take one more step for the state setters to take effect og.sim.step() assert not bowl.states[Covered].get_value(dust) assert not bowl.states[Covered].get_value(salt) assert bowl.states[Covered].get_value(rust) assert not bowl.states[Covered].get_value(spray_paint) assert not bowl.states[Covered].get_value(acetone) assert bowl.states[Covered].get_value(cooking_oil) assert remover_dishtowel.states[Saturated].get_value(water) assert bowl.states[Covered].get_value(water) # Clean up remove_all_systems() @og_test def test_slicing_rule(): assert len(REGISTERED_RULES) > 0, "No rules registered!" apple = og.sim.scene.object_registry("name", "apple") table_knife = og.sim.scene.object_registry("name", "table_knife") deleted_objs = [apple] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] assert apple.states[Cooked].set_value(True) initial_half_apples = og.sim.scene.object_registry("category", "half_apple", set()).copy() place_obj_on_floor_plane(apple) og.sim.step() table_knife.set_position_orientation([-0.05, 0.0, 0.15], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() assert not table_knife.states[Touching].get_value(apple) final_half_apples = og.sim.scene.object_registry("category", "half_apple", set()).copy() assert len(final_half_apples) == len(initial_half_apples) for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is not None table_knife.set_position_orientation([-0.05, 0.0, 0.10], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() final_half_apples = og.sim.scene.object_registry("category", "half_apple", set()).copy() assert len(final_half_apples) > len(initial_half_apples) for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # One more step for the half apples to be initialized og.sim.step() # All new half_apple should be cooked new_half_apples = final_half_apples - initial_half_apples for half_apple in new_half_apples: assert half_apple.states[Cooked].get_value() # Clean up og.sim.remove_object(new_half_apples) og.sim.step() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_dicing_rule_cooked(): assert len(REGISTERED_RULES) > 0, "No rules registered!" half_apple = og.sim.scene.object_registry("name", "half_apple") table_knife = og.sim.scene.object_registry("name", "table_knife") cooked_diced_apple = get_system("cooked__diced__apple") deleted_objs = [half_apple] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] half_apple.set_orientation(T.euler2quat([0, -np.pi / 2, 0])) place_obj_on_floor_plane(half_apple) og.sim.step() assert half_apple.states[Cooked].set_value(True) assert cooked_diced_apple.n_particles == 0 table_knife.set_position_orientation([-0.05, 0.0, 0.15], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() assert not table_knife.states[Touching].get_value(half_apple) assert cooked_diced_apple.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is not None table_knife.set_position_orientation([-0.05, 0.0, 0.07], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() assert cooked_diced_apple.n_particles > 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Move the knife away so that it doesn't immediately dice the half_apple again once it's imported back table_knife.set_position_orientation([-0.05, 0.0, 1.15], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_dicing_rule_uncooked(): assert len(REGISTERED_RULES) > 0, "No rules registered!" half_apple = og.sim.scene.object_registry("name", "half_apple") table_knife = og.sim.scene.object_registry("name", "table_knife") diced_apple = get_system("diced__apple") deleted_objs = [half_apple] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] half_apple.set_orientation(T.euler2quat([0, -np.pi / 2, 0])) place_obj_on_floor_plane(half_apple) og.sim.step() assert diced_apple.n_particles == 0 table_knife.set_position_orientation([-0.05, 0.0, 0.15], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() assert not table_knife.states[Touching].get_value(half_apple) assert diced_apple.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is not None table_knife.set_position_orientation([-0.05, 0.0, 0.07], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() assert diced_apple.n_particles > 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Move the knife away so that it doesn't immediately dice the half_apple again once it's imported back table_knife.set_position_orientation([-0.05, 0.0, 1.15], T.euler2quat([-np.pi / 2, 0, 0])) og.sim.step() # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_melting_rule(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") swiss_cheese = og.sim.scene.object_registry("name", "swiss_cheese") melted_swiss_cheese = get_system("melted__swiss_cheese") deleted_objs = [swiss_cheese] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) swiss_cheese.set_position_orientation([-0.24, 0.11, 0.92], [0, 0, 0, 1]) og.sim.step() assert swiss_cheese.states[Inside].get_value(stockpot) assert melted_swiss_cheese.n_particles == 0 # To save time, directly set the temperature of the swiss cheese to be below the melting point assert swiss_cheese.states[Temperature].set_value(m.transition_rules.MELTING_TEMPERATURE - 1) og.sim.step() assert melted_swiss_cheese.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is not None # To save time, directly set the temperature of the swiss cheese to be above the melting point assert swiss_cheese.states[Temperature].set_value(m.transition_rules.MELTING_TEMPERATURE + 1) og.sim.step() # Recipe should execute successfully: new melted swiss cheese should be created, and the ingredients should be deleted assert melted_swiss_cheese.n_particles > 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_cooking_physical_particle_rule_failure_recipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") arborio_rice = get_system("arborio_rice") water = get_system("water") cooked_water = get_system("cooked__water") cooked_arborio_rice = get_system("cooked__arborio_rice") place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) arborio_rice.generate_particles(positions=[[-0.25, 0.13, 0.95]]) # This fails the recipe because water (recipe system) is not in the stockpot water.generate_particles(positions=[[-0.25, 0.17, 1.95]]) assert stockpot.states[Contains].get_value(arborio_rice) assert not stockpot.states[Contains].get_value(water) assert cooked_arborio_rice.n_particles == 0 # To save time, directly set the stockpot to be heated assert stockpot.states[Heated].set_value(True) og.sim.step() # Recipe should fail: no cooked arborio rice should be created assert water.n_particles > 0 assert cooked_water.n_particles == 0 assert arborio_rice.n_particles > 0 assert cooked_arborio_rice.n_particles == 0 # Clean up remove_all_systems() @og_test def test_cooking_physical_particle_rule_success(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") arborio_rice = get_system("arborio_rice") water = get_system("water") cooked_water = get_system("cooked__water") cooked_arborio_rice = get_system("cooked__arborio_rice") place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) arborio_rice.generate_particles(positions=[[-0.25, 0.13, 0.95]]) water.generate_particles(positions=[[-0.25, 0.17, 0.95]]) assert stockpot.states[Contains].get_value(arborio_rice) assert stockpot.states[Contains].get_value(water) assert cooked_arborio_rice.n_particles == 0 assert cooked_water.n_particles == 0 # To save time, directly set the stockpot to be heated assert stockpot.states[Heated].set_value(True) og.sim.step() assert water.n_particles == 0 assert cooked_water.n_particles > 0 assert arborio_rice.n_particles > 0 assert cooked_arborio_rice.n_particles == 0 # Recipe should execute successfully: new cooked arborio rice should be created, and the ingredients should be deleted og.sim.step() assert water.n_particles == 0 assert cooked_water.n_particles == 0 assert arborio_rice.n_particles == 0 assert cooked_arborio_rice.n_particles > 0 # Clean up remove_all_systems() @og_test def test_mixing_rule_failure_recipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" bowl = og.sim.scene.object_registry("name", "bowl") tablespoon = og.sim.scene.object_registry("name", "tablespoon") water = get_system("water") granulated_sugar = get_system("granulated_sugar") lemon_juice = get_system("lemon_juice") lemonade = get_system("lemonade") sludge = get_system("sludge") place_obj_on_floor_plane(bowl) og.sim.step() water.generate_particles(positions=[[-0.02, 0.0, 0.02]]) granulated_sugar.generate_particles(positions=[[0.0, 0.0, 0.02]]) # This fails the recipe because lemon juice (recipe system) is not in the bowl lemon_juice.generate_particles(positions=[[0.02, 0.0, 1.02]]) assert bowl.states[Contains].get_value(water) assert bowl.states[Contains].get_value(granulated_sugar) assert not bowl.states[Contains].get_value(lemon_juice) assert lemonade.n_particles == 0 assert sludge.n_particles == 0 tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1]) og.sim.step() assert tablespoon.states[Touching].get_value(bowl) # Recipe should fail: no milkshake should be created, and sludge should be created. assert lemonade.n_particles == 0 assert sludge.n_particles > 0 assert water.n_particles == 0 assert granulated_sugar.n_particles == 0 # Clean up remove_all_systems() @og_test def test_mixing_rule_failure_nonrecipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" bowl = og.sim.scene.object_registry("name", "bowl") tablespoon = og.sim.scene.object_registry("name", "tablespoon") water = get_system("water") granulated_sugar = get_system("granulated_sugar") lemon_juice = get_system("lemon_juice") lemonade = get_system("lemonade") salt = get_system("salt") sludge = get_system("sludge") place_obj_on_floor_plane(bowl) og.sim.step() water.generate_particles(positions=[[-0.02, 0, 0.02]]) granulated_sugar.generate_particles(positions=[[0.0, 0.0, 0.02]]) lemon_juice.generate_particles(positions=[[0.02, 0.0, 0.02]]) # This fails the recipe because salt (nonrecipe system) is in the bowl salt.generate_particles(positions=[[0.0, 0.02, 0.02]]) assert bowl.states[Contains].get_value(water) assert bowl.states[Contains].get_value(granulated_sugar) assert bowl.states[Contains].get_value(lemon_juice) assert bowl.states[Contains].get_value(salt) assert lemonade.n_particles == 0 assert sludge.n_particles == 0 tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1]) og.sim.step() assert tablespoon.states[Touching].get_value(bowl) # Recipe should fail: no milkshake should be created, and sludge should be created. assert lemonade.n_particles == 0 assert sludge.n_particles > 0 assert water.n_particles == 0 assert granulated_sugar.n_particles == 0 assert lemon_juice.n_particles == 0 assert salt.n_particles == 0 # Clean up remove_all_systems() @og_test def test_mixing_rule_success(): assert len(REGISTERED_RULES) > 0, "No rules registered!" bowl = og.sim.scene.object_registry("name", "bowl") tablespoon = og.sim.scene.object_registry("name", "tablespoon") water = get_system("water") granulated_sugar = get_system("granulated_sugar") lemon_juice = get_system("lemon_juice") lemonade = get_system("lemonade") place_obj_on_floor_plane(bowl) og.sim.step() water.generate_particles(positions=[[-0.02, 0.0, 0.02]]) granulated_sugar.generate_particles(positions=[[0.0, 0.0, 0.02]]) lemon_juice.generate_particles(positions=[[0.02, 0.0, 0.02]]) assert bowl.states[Contains].get_value(water) assert bowl.states[Contains].get_value(granulated_sugar) assert bowl.states[Contains].get_value(lemon_juice) assert lemonade.n_particles == 0 tablespoon.set_position_orientation([0.04, 0.0, 0.11], [0, 0, 0, 1]) og.sim.step() assert tablespoon.states[Touching].get_value(bowl) # Recipe should execute successfully: new lemonade should be created, and the ingredients should be deleted assert lemonade.n_particles > 0 assert water.n_particles == 0 assert granulated_sugar.n_particles == 0 assert lemon_juice.n_particles == 0 # Clean up remove_all_systems() @og_test def test_cooking_system_rule_failure_recipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") chicken = og.sim.scene.object_registry("name", "chicken") chicken_broth = get_system("chicken_broth") diced_carrot = get_system("diced__carrot") diced_celery = get_system("diced__celery") salt = get_system("salt") rosemary = get_system("rosemary") chicken_soup = get_system("cooked__chicken_soup") place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) # This fails the recipe because chicken broth (recipe system) is not in the stockpot chicken_broth.generate_particles(positions=[[-0.33, 0.05, 1.93]]) diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]]) diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]]) salt.generate_particles(positions=[[-0.33, 0.15, 0.93]]) rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]]) og.sim.step() assert chicken.states[Inside].get_value(stockpot) assert not chicken.states[Cooked].get_value() assert not stockpot.states[Contains].get_value(chicken_broth) assert stockpot.states[Contains].get_value(diced_carrot) assert stockpot.states[Contains].get_value(diced_celery) assert stockpot.states[Contains].get_value(salt) assert stockpot.states[Contains].get_value(rosemary) assert chicken_soup.n_particles == 0 assert stove.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no chicken soup should be created assert chicken_soup.n_particles == 0 assert chicken_broth.n_particles > 0 assert diced_carrot.n_particles > 0 assert diced_celery.n_particles > 0 assert salt.n_particles > 0 assert rosemary.n_particles > 0 assert og.sim.scene.object_registry("name", "chicken") is not None # Clean up remove_all_systems() @og_test def test_cooking_system_rule_failure_nonrecipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") chicken = og.sim.scene.object_registry("name", "chicken") water = get_system("water") chicken_broth = get_system("chicken_broth") diced_carrot = get_system("diced__carrot") diced_celery = get_system("diced__celery") salt = get_system("salt") rosemary = get_system("rosemary") chicken_soup = get_system("cooked__chicken_soup") place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) # This fails the recipe because water (nonrecipe system) is inside the stockpot water.generate_particles(positions=[[-0.24, 0.11, 0.93]]) chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]]) diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]]) diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]]) salt.generate_particles(positions=[[-0.33, 0.15, 0.93]]) rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]]) og.sim.step() assert chicken.states[Inside].get_value(stockpot) assert not chicken.states[Cooked].get_value() assert stockpot.states[Contains].get_value(water) assert stockpot.states[Contains].get_value(chicken_broth) assert stockpot.states[Contains].get_value(diced_carrot) assert stockpot.states[Contains].get_value(diced_celery) assert stockpot.states[Contains].get_value(salt) assert stockpot.states[Contains].get_value(rosemary) assert chicken_soup.n_particles == 0 assert stove.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no chicken soup should be created assert chicken_soup.n_particles == 0 assert chicken_broth.n_particles > 0 assert diced_carrot.n_particles > 0 assert diced_celery.n_particles > 0 assert salt.n_particles > 0 assert rosemary.n_particles > 0 assert water.n_particles > 0 assert og.sim.scene.object_registry("name", "chicken") is not None # Clean up remove_all_systems() @og_test def test_cooking_system_rule_failure_nonrecipe_objects(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") chicken = og.sim.scene.object_registry("name", "chicken") bowl = og.sim.scene.object_registry("name", "bowl") chicken_broth = get_system("chicken_broth") diced_carrot = get_system("diced__carrot") diced_celery = get_system("diced__celery") salt = get_system("salt") rosemary = get_system("rosemary") chicken_soup = get_system("cooked__chicken_soup") place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) # This fails the recipe because the bowl (nonrecipe object) is inside the stockpot bowl.set_position_orientation([-0.20, 0.15, 1], [0, 0, 0, 1]) chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]]) diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]]) diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]]) salt.generate_particles(positions=[[-0.33, 0.15, 0.93]]) rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]]) og.sim.step() assert chicken.states[Inside].get_value(stockpot) assert bowl.states[Inside].get_value(stockpot) assert not chicken.states[Cooked].get_value() assert stockpot.states[Contains].get_value(chicken_broth) assert stockpot.states[Contains].get_value(diced_carrot) assert stockpot.states[Contains].get_value(diced_celery) assert stockpot.states[Contains].get_value(salt) assert stockpot.states[Contains].get_value(rosemary) assert chicken_soup.n_particles == 0 assert stove.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no chicken soup should be created assert chicken_soup.n_particles == 0 assert chicken_broth.n_particles > 0 assert diced_carrot.n_particles > 0 assert diced_celery.n_particles > 0 assert salt.n_particles > 0 assert rosemary.n_particles > 0 assert og.sim.scene.object_registry("name", "chicken") is not None assert og.sim.scene.object_registry("name", "bowl") is not None # Clean up remove_all_systems() @og_test def test_cooking_system_rule_success(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") stockpot = og.sim.scene.object_registry("name", "stockpot") chicken = og.sim.scene.object_registry("name", "chicken") chicken_broth = get_system("chicken_broth") diced_carrot = get_system("diced__carrot") diced_celery = get_system("diced__celery") salt = get_system("salt") rosemary = get_system("rosemary") chicken_soup = get_system("cooked__chicken_soup") deleted_objs = [chicken] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(stove) og.sim.step() stockpot.set_position_orientation([-0.24, 0.11, 0.89], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[OnTop].get_value(stove) chicken.set_position_orientation([-0.24, 0.11, 0.86], [0, 0, 0, 1]) chicken_broth.generate_particles(positions=[[-0.33, 0.05, 0.93]]) diced_carrot.generate_particles(positions=[[-0.28, 0.05, 0.93]]) diced_celery.generate_particles(positions=[[-0.23, 0.05, 0.93]]) salt.generate_particles(positions=[[-0.33, 0.15, 0.93]]) rosemary.generate_particles(positions=[[-0.28, 0.15, 0.93]]) og.sim.step() assert chicken.states[Inside].get_value(stockpot) assert not chicken.states[Cooked].get_value() assert stockpot.states[Contains].get_value(chicken_broth) assert stockpot.states[Contains].get_value(diced_carrot) assert stockpot.states[Contains].get_value(diced_celery) assert stockpot.states[Contains].get_value(salt) assert stockpot.states[Contains].get_value(rosemary) assert chicken_soup.n_particles == 0 assert stove.states[ToggledOn].set_value(True) og.sim.step() # Recipe should execute successfully: new chicken soup should be created, and the ingredients should be deleted assert chicken_soup.n_particles > 0 assert chicken_broth.n_particles == 0 assert diced_carrot.n_particles == 0 assert diced_celery.n_particles == 0 assert salt.n_particles == 0 assert rosemary.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_cooking_object_rule_failure_wrong_container(): assert len(REGISTERED_RULES) > 0, "No rules registered!" oven = og.sim.scene.object_registry("name", "oven") stockpot = og.sim.scene.object_registry("name", "stockpot") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() place_obj_on_floor_plane(oven) og.sim.step() # This fails the recipe because it requires the baking sheet to be inside the oven, not the stockpot stockpot.set_position_orientation([0, 0, 0.47], [0, 0, 0, 1]) og.sim.step() assert stockpot.states[Inside].get_value(oven) bagel_dough.set_position_orientation([0, 0, 0.45], [0, 0, 0, 1]) raw_egg.set_position_orientation([0.02, 0, 0.50], [0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[Inside].get_value(stockpot) assert raw_egg.states[OnTop].get_value(bagel_dough) assert bagel_dough.states[Cooked].set_value(False) assert raw_egg.states[Cooked].set_value(False) og.sim.step() assert bagel_dough.states[Covered].set_value(sesame_seed, True) og.sim.step() assert oven.states[ToggledOn].set_value(True) og.sim.step() final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() assert len(final_bagels) == len(initial_bagels) # Clean up remove_all_systems() @og_test def test_cooking_object_rule_failure_recipe_objects(): assert len(REGISTERED_RULES) > 0, "No rules registered!" oven = og.sim.scene.object_registry("name", "oven") baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() place_obj_on_floor_plane(oven) og.sim.step() baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) # This fails the recipe because it requires the bagel dough to be on top of the baking sheet bagel_dough.set_position_orientation([1, 0, 0.5], [0, 0, 0, 1]) raw_egg.set_position_orientation([1.02, 0, 0.55], [0, 0, 0, 1]) og.sim.step() assert not bagel_dough.states[OnTop].get_value(baking_sheet) assert bagel_dough.states[Cooked].set_value(False) assert raw_egg.states[Cooked].set_value(False) og.sim.step() assert bagel_dough.states[Covered].set_value(sesame_seed, True) og.sim.step() assert oven.states[ToggledOn].set_value(True) og.sim.step() final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() assert len(final_bagels) == len(initial_bagels) # Clean up remove_all_systems() @og_test def test_cooking_object_rule_failure_unary_states(): assert len(REGISTERED_RULES) > 0, "No rules registered!" oven = og.sim.scene.object_registry("name", "oven") baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() place_obj_on_floor_plane(oven) og.sim.step() baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1]) raw_egg.set_position_orientation([0.02, 0, 0.55], [0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) # This fails the recipe because it requires the bagel dough and the raw egg to be not cooked assert bagel_dough.states[Cooked].set_value(True) assert raw_egg.states[Cooked].set_value(True) og.sim.step() assert bagel_dough.states[Covered].set_value(sesame_seed, True) og.sim.step() assert oven.states[ToggledOn].set_value(True) og.sim.step() final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() assert len(final_bagels) == len(initial_bagels) # Clean up remove_all_systems() @og_test def test_cooking_object_rule_failure_binary_system_states(): assert len(REGISTERED_RULES) > 0, "No rules registered!" oven = og.sim.scene.object_registry("name", "oven") baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() place_obj_on_floor_plane(oven) og.sim.step() baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1]) raw_egg.set_position_orientation([0.02, 0, 0.55], [0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) assert bagel_dough.states[Cooked].set_value(False) assert raw_egg.states[Cooked].set_value(False) og.sim.step() # This fails the recipe because it requires the bagel dough to be covered with sesame seed assert bagel_dough.states[Covered].set_value(sesame_seed, False) og.sim.step() assert oven.states[ToggledOn].set_value(True) og.sim.step() final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() assert len(final_bagels) == len(initial_bagels) # Clean up remove_all_systems() @og_test def test_cooking_object_rule_failure_binary_object_states(): assert len(REGISTERED_RULES) > 0, "No rules registered!" oven = og.sim.scene.object_registry("name", "oven") baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() place_obj_on_floor_plane(oven) og.sim.step() baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1]) raw_egg.set_position_orientation([0.12, 0.15, 0.47], [0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) # This fails the recipe because it requires the raw egg to be on top of the bagel dough assert not raw_egg.states[OnTop].get_value(bagel_dough) assert bagel_dough.states[Cooked].set_value(False) assert raw_egg.states[Cooked].set_value(False) og.sim.step() assert bagel_dough.states[Covered].set_value(sesame_seed, True) og.sim.step() assert oven.states[ToggledOn].set_value(True) og.sim.step() final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() assert len(final_bagels) == len(initial_bagels) # Clean up remove_all_systems() @og_test def test_cooking_object_rule_failure_wrong_heat_source(): assert len(REGISTERED_RULES) > 0, "No rules registered!" stove = og.sim.scene.object_registry("name", "stove") baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() # This fails the recipe because it requires the oven to be the heat source, not the stove place_obj_on_floor_plane(stove) og.sim.step() heat_source_position = stove.states[HeatSourceOrSink].link.get_position() baking_sheet.set_position_orientation([-0.20, 0, 0.80], [0, 0, 0, 1]) og.sim.step() bagel_dough.set_position_orientation([-0.20, 0, 0.84], [0, 0, 0, 1]) raw_egg.set_position_orientation([-0.18, 0, 0.89], [0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) assert bagel_dough.states[Cooked].set_value(True) assert raw_egg.states[Cooked].set_value(True) og.sim.step() assert bagel_dough.states[Covered].set_value(sesame_seed, True) og.sim.step() assert stove.states[ToggledOn].set_value(True) og.sim.step() # Make sure the stove affects the baking sheet assert stove.states[HeatSourceOrSink].affects_obj(baking_sheet) final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() assert len(final_bagels) == len(initial_bagels) # Clean up remove_all_systems() @og_test def test_cooking_object_rule_success(): assert len(REGISTERED_RULES) > 0, "No rules registered!" oven = og.sim.scene.object_registry("name", "oven") baking_sheet = og.sim.scene.object_registry("name", "baking_sheet") bagel_dough = og.sim.scene.object_registry("name", "bagel_dough") raw_egg = og.sim.scene.object_registry("name", "raw_egg") sesame_seed = get_system("sesame_seed") deleted_objs = [bagel_dough, raw_egg] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] initial_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() place_obj_on_floor_plane(oven) og.sim.step() baking_sheet.set_position_orientation([0, 0, 0.455], [0, 0, 0, 1]) og.sim.step() assert baking_sheet.states[Inside].get_value(oven) bagel_dough.set_position_orientation([0, 0, 0.5], [0, 0, 0, 1]) raw_egg.set_position_orientation([0.02, 0, 0.55], [0, 0, 0, 1]) og.sim.step() assert bagel_dough.states[OnTop].get_value(baking_sheet) assert raw_egg.states[OnTop].get_value(bagel_dough) assert bagel_dough.states[Cooked].set_value(False) assert raw_egg.states[Cooked].set_value(False) og.sim.step() assert bagel_dough.states[Covered].set_value(sesame_seed, True) og.sim.step() assert oven.states[ToggledOn].set_value(True) og.sim.step() final_bagels = og.sim.scene.object_registry("category", "bagel", set()).copy() # Recipe should execute successfully: new bagels should be created, and the ingredients should be deleted assert len(final_bagels) > len(initial_bagels) for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Need to step again for the new bagels to be initialized, placed in the container, and cooked. og.sim.step() # All new bagels should be cooked new_bagels = final_bagels - initial_bagels for bagel in new_bagels: assert bagel.states[Cooked].get_value() # This assertion occasionally fails, because when four bagels are sampled on top of the baking sheet one by one, # there is no guarantee that all four of them will be on top of the baking sheet at the end. # assert bagel.states[OnTop].get_value(baking_sheet) assert bagel.states[Inside].get_value(oven) # Clean up remove_all_systems() og.sim.remove_object(new_bagels) og.sim.step() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_system_failure_wrong_container(): assert len(REGISTERED_RULES) > 0, "No rules registered!" food_processor = og.sim.scene.object_registry("name", "food_processor") ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream") milk = get_system("whole_milk") chocolate_sauce = get_system("chocolate_sauce") milkshake = get_system("milkshake") sludge = get_system("sludge") deleted_objs = [ice_cream] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] # This fails the recipe because it requires the blender to be the container, not the food processor place_obj_on_floor_plane(food_processor) og.sim.step() milk.generate_particles(positions=np.array([[0.02, 0.06, 0.22]])) chocolate_sauce.generate_particles(positions=np.array([[-0.05, -0.04, 0.22]])) ice_cream.set_position_orientation([0.03, -0.02, 0.23], [0, 0, 0, 1]) og.sim.step() assert food_processor.states[Contains].get_value(milk) assert food_processor.states[Contains].get_value(chocolate_sauce) assert ice_cream.states[Inside].get_value(food_processor) assert milkshake.n_particles == 0 assert sludge.n_particles == 0 food_processor.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no milkshake should be created, and sludge should be created. assert milkshake.n_particles == 0 assert sludge.n_particles > 0 assert milk.n_particles == 0 assert chocolate_sauce.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_system_failure_recipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" blender = og.sim.scene.object_registry("name", "blender") ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream") milk = get_system("whole_milk") chocolate_sauce = get_system("chocolate_sauce") milkshake = get_system("milkshake") sludge = get_system("sludge") deleted_objs = [ice_cream] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(blender) og.sim.step() # This fails the recipe because it requires the milk to be in the blender milk.generate_particles(positions=np.array([[0.02, 0, 1.57]])) chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]])) ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) og.sim.step() assert not blender.states[Contains].get_value(milk) assert blender.states[Contains].get_value(chocolate_sauce) assert ice_cream.states[Inside].get_value(blender) assert milkshake.n_particles == 0 assert sludge.n_particles == 0 blender.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no milkshake should be created, and sludge should be created. assert milkshake.n_particles == 0 assert sludge.n_particles > 0 assert chocolate_sauce.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_system_failure_recipe_objects(): assert len(REGISTERED_RULES) > 0, "No rules registered!" blender = og.sim.scene.object_registry("name", "blender") ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream") milk = get_system("whole_milk") chocolate_sauce = get_system("chocolate_sauce") milkshake = get_system("milkshake") sludge = get_system("sludge") place_obj_on_floor_plane(blender) og.sim.step() milk.generate_particles(positions=np.array([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]])) # This fails the recipe because it requires the ice cream to be inside the blender ice_cream.set_position_orientation([0, 0, 1.51], [0, 0, 0, 1]) og.sim.step() assert blender.states[Contains].get_value(milk) assert blender.states[Contains].get_value(chocolate_sauce) assert not ice_cream.states[Inside].get_value(blender) assert milkshake.n_particles == 0 assert sludge.n_particles == 0 blender.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no milkshake should be created, and sludge should be created. assert milkshake.n_particles == 0 assert sludge.n_particles > 0 assert milk.n_particles == 0 assert chocolate_sauce.n_particles == 0 # Clean up remove_all_systems() @og_test def test_single_toggleable_machine_rule_output_system_failure_nonrecipe_systems(): assert len(REGISTERED_RULES) > 0, "No rules registered!" blender = og.sim.scene.object_registry("name", "blender") ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream") milk = get_system("whole_milk") chocolate_sauce = get_system("chocolate_sauce") milkshake = get_system("milkshake") sludge = get_system("sludge") water = get_system("water") deleted_objs = [ice_cream] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(blender) og.sim.step() milk.generate_particles(positions=np.array([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]])) # This fails the recipe because water (nonrecipe system) is in the blender water.generate_particles(positions=np.array([[0, 0, 0.57]])) ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) og.sim.step() assert blender.states[Contains].get_value(milk) assert blender.states[Contains].get_value(chocolate_sauce) assert blender.states[Contains].get_value(water) assert ice_cream.states[Inside].get_value(blender) assert milkshake.n_particles == 0 assert sludge.n_particles == 0 blender.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no milkshake should be created, and sludge should be created. assert milkshake.n_particles == 0 assert sludge.n_particles > 0 assert milk.n_particles == 0 assert chocolate_sauce.n_particles == 0 assert water.n_particles == 0 # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_system_failure_nonrecipe_objects(): assert len(REGISTERED_RULES) > 0, "No rules registered!" blender = og.sim.scene.object_registry("name", "blender") ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream") bowl = og.sim.scene.object_registry("name", "bowl") milk = get_system("whole_milk") chocolate_sauce = get_system("chocolate_sauce") milkshake = get_system("milkshake") sludge = get_system("sludge") deleted_objs = [ice_cream, bowl] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(blender) og.sim.step() milk.generate_particles(positions=np.array([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]])) ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) # This fails the recipe because the bowl (nonrecipe object) is in the blender bowl.set_position_orientation([0, 0, 0.58], [0, 0, 0, 1]) og.sim.step() assert blender.states[Contains].get_value(milk) assert blender.states[Contains].get_value(chocolate_sauce) assert ice_cream.states[Inside].get_value(blender) assert bowl.states[Inside].get_value(blender) assert milkshake.n_particles == 0 assert sludge.n_particles == 0 blender.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no milkshake should be created, and sludge should be created. assert milkshake.n_particles == 0 assert sludge.n_particles > 0 assert milk.n_particles == 0 assert chocolate_sauce.n_particles == 0 # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_system_success(): assert len(REGISTERED_RULES) > 0, "No rules registered!" blender = og.sim.scene.object_registry("name", "blender") ice_cream = og.sim.scene.object_registry("name", "scoop_of_ice_cream") milk = get_system("whole_milk") chocolate_sauce = get_system("chocolate_sauce") milkshake = get_system("milkshake") sludge = get_system("sludge") deleted_objs = [ice_cream] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(blender) og.sim.step() milk.generate_particles(positions=np.array([[0.02, 0, 0.57]])) chocolate_sauce.generate_particles(positions=np.array([[0, -0.02, 0.57]])) ice_cream.set_position_orientation([0, 0, 0.51], [0, 0, 0, 1]) og.sim.step() assert blender.states[Contains].get_value(milk) assert blender.states[Contains].get_value(chocolate_sauce) assert ice_cream.states[Inside].get_value(blender) assert milkshake.n_particles == 0 assert sludge.n_particles == 0 blender.states[ToggledOn].set_value(True) og.sim.step() # Recipe should execute successfully: new milkshake should be created, and the ingredients should be deleted assert milkshake.n_particles > 0 assert sludge.n_particles == 0 assert milk.n_particles == 0 assert chocolate_sauce.n_particles == 0 for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_object_failure_unary_states(): assert len(REGISTERED_RULES) > 0, "No rules registered!" electric_mixer = og.sim.scene.object_registry("name", "electric_mixer") raw_egg = og.sim.scene.object_registry("name", "raw_egg") another_raw_egg = og.sim.scene.object_registry("name", "another_raw_egg") flour = get_system("flour") granulated_sugar = get_system("granulated_sugar") vanilla = get_system("vanilla") melted_butter = get_system("melted__butter") baking_powder = get_system("baking_powder") salt = get_system("salt") sludge = get_system("sludge") initial_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy() deleted_objs = [raw_egg, another_raw_egg] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(electric_mixer) og.sim.step() another_raw_egg.set_position_orientation([-0.01, -0.14, 0.50], [0, 0, 0, 1]) raw_egg.set_position_orientation([-0.01, -0.14, 0.47], [0, 0, 0, 1]) flour.generate_particles(positions=np.array([[-0.01, -0.15, 0.43]])) granulated_sugar.generate_particles(positions=np.array([[0.01, -0.15, 0.43]])) vanilla.generate_particles(positions=np.array([[0.03, -0.15, 0.43]])) melted_butter.generate_particles(positions=np.array([[-0.01, -0.13, 0.43]])) baking_powder.generate_particles(positions=np.array([[0.01, -0.13, 0.43]])) salt.generate_particles(positions=np.array([[0.03, -0.13, 0.43]])) # This fails the recipe because the egg should not be cooked raw_egg.states[Cooked].set_value(True) og.sim.step() assert electric_mixer.states[Contains].get_value(flour) assert electric_mixer.states[Contains].get_value(granulated_sugar) assert electric_mixer.states[Contains].get_value(vanilla) assert electric_mixer.states[Contains].get_value(melted_butter) assert electric_mixer.states[Contains].get_value(baking_powder) assert electric_mixer.states[Contains].get_value(salt) assert raw_egg.states[Inside].get_value(electric_mixer) assert raw_egg.states[Cooked].get_value() assert another_raw_egg.states[Inside].get_value(electric_mixer) assert not another_raw_egg.states[Cooked].get_value() assert sludge.n_particles == 0 electric_mixer.states[ToggledOn].set_value(True) og.sim.step() # Recipe should fail: no dough should be created, and sludge should be created. final_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy() # Recipe should execute successfully: new dough should be created, and the ingredients should be deleted assert len(final_doughs) == len(initial_doughs) for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None assert flour.n_particles == 0 assert granulated_sugar.n_particles == 0 assert vanilla.n_particles == 0 assert melted_butter.n_particles == 0 assert baking_powder.n_particles == 0 assert salt.n_particles == 0 assert sludge.n_particles > 0 # Clean up remove_all_systems() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step() @og_test def test_single_toggleable_machine_rule_output_object_success(): assert len(REGISTERED_RULES) > 0, "No rules registered!" electric_mixer = og.sim.scene.object_registry("name", "electric_mixer") raw_egg = og.sim.scene.object_registry("name", "raw_egg") another_raw_egg = og.sim.scene.object_registry("name", "another_raw_egg") flour = get_system("flour") granulated_sugar = get_system("granulated_sugar") vanilla = get_system("vanilla") melted_butter = get_system("melted__butter") baking_powder = get_system("baking_powder") salt = get_system("salt") sludge = get_system("sludge") initial_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy() deleted_objs = [raw_egg, another_raw_egg] deleted_objs_cfg = [retrieve_obj_cfg(obj) for obj in deleted_objs] place_obj_on_floor_plane(electric_mixer) og.sim.step() another_raw_egg.set_position_orientation([-0.01, -0.14, 0.50], [0, 0, 0, 1]) raw_egg.set_position_orientation([-0.01, -0.14, 0.47], [0, 0, 0, 1]) flour.generate_particles(positions=np.array([[-0.01, -0.15, 0.43]])) granulated_sugar.generate_particles(positions=np.array([[0.01, -0.15, 0.43]])) vanilla.generate_particles(positions=np.array([[0.03, -0.15, 0.43]])) melted_butter.generate_particles(positions=np.array([[-0.01, -0.13, 0.43]])) baking_powder.generate_particles(positions=np.array([[0.01, -0.13, 0.43]])) salt.generate_particles(positions=np.array([[0.03, -0.13, 0.43]])) og.sim.step() assert electric_mixer.states[Contains].get_value(flour) assert electric_mixer.states[Contains].get_value(granulated_sugar) assert electric_mixer.states[Contains].get_value(vanilla) assert electric_mixer.states[Contains].get_value(melted_butter) assert electric_mixer.states[Contains].get_value(baking_powder) assert electric_mixer.states[Contains].get_value(salt) assert raw_egg.states[Inside].get_value(electric_mixer) assert not raw_egg.states[Cooked].get_value() assert another_raw_egg.states[Inside].get_value(electric_mixer) assert not another_raw_egg.states[Cooked].get_value() assert sludge.n_particles == 0 electric_mixer.states[ToggledOn].set_value(True) og.sim.step() # Recipe should execute successfully: new dough should be created, and the ingredients should be deleted final_doughs = og.sim.scene.object_registry("category", "sugar_cookie_dough", set()).copy() # Recipe should execute successfully: new dough should be created, and the ingredients should be deleted assert len(final_doughs) > len(initial_doughs) for obj in deleted_objs: assert og.sim.scene.object_registry("name", obj.name) is None assert flour.n_particles == 0 assert granulated_sugar.n_particles == 0 assert vanilla.n_particles == 0 assert melted_butter.n_particles == 0 assert baking_powder.n_particles == 0 assert salt.n_particles == 0 # Need to step again for the new dough to be initialized, placed in the container, and cooked. og.sim.step() # All new doughs should not be cooked new_doughs = final_doughs - initial_doughs for dough in new_doughs: assert not dough.states[Cooked].get_value() assert dough.states[OnTop].get_value(electric_mixer) # Clean up og.sim.remove_object(new_doughs) og.sim.step() for obj_cfg in deleted_objs_cfg: obj = DatasetObject(**obj_cfg) og.sim.import_object(obj) og.sim.step()
58,046
Python
37.340158
131
0.68127
StanfordVL/OmniGibson/tests/test_object_removal.py
from omnigibson.objects import DatasetObject import omnigibson as og from omnigibson.utils.python_utils import NAMES from utils import og_test import pytest @og_test def test_removal_and_readdition(): # Make a copy of NAMES initial_names = NAMES.copy() # Add an apple apple = DatasetObject( name="apple_unique", category="apple", model="agveuv", ) # Import it into the scene og.sim.import_object(apple) # Check that NAMES has changed assert NAMES != initial_names # Step a few times for _ in range(5): og.sim.step() # Remove the apple og.sim.remove_object(obj=apple) # Check that NAMES is the same as before extra_names = NAMES - initial_names assert len(extra_names) == 0, f"Extra names: {extra_names}" # Importing should work now apple2 = DatasetObject( name="apple_unique", category="apple", model="agveuv", ) og.sim.import_object(apple2) og.sim.step() # Clear the stuff we added og.sim.remove_object(apple2) @og_test def test_readdition(): # Make a copy of NAMES initial_names = NAMES.copy() # Add an apple apple = DatasetObject( name="apple_unique", category="apple", model="agveuv", ) # Import it into the scene og.sim.import_object(apple) # Check that NAMES has changed new_names = NAMES.copy() assert new_names != initial_names # Step a few times for _ in range(5): og.sim.step() # Creating and importing a new apple should fail with pytest.raises(AssertionError): apple2 = DatasetObject( name="apple_unique", category="apple", model="agveuv", ) og.sim.import_object(apple2) # Check that NAMES has not changed assert NAMES == new_names # Clear the stuff we added og.sim.remove_object(apple)
1,931
Python
20.954545
63
0.617815
StanfordVL/OmniGibson/tests/test_dump_load_states.py
import omnigibson as og from omnigibson.systems import * from omnigibson.object_states import Covered from utils import og_test, SYSTEM_EXAMPLES import pytest @og_test def test_dump_load(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") for system_name, system_class in SYSTEM_EXAMPLES.items(): system = get_system(system_name) assert issubclass(system, system_class) if issubclass(system_class, VisualParticleSystem): assert breakfast_table.states[Covered].set_value(system, True) else: system.generate_particles(positions=[[0, 0, 1]]) assert system.n_particles > 0 system.remove_all_particles() state = og.sim.dump_state() og.sim.load_state(state) for system_name, system_class in SYSTEM_EXAMPLES.items(): system = get_system(system_name) system.clear() @og_test def test_dump_load_serialized(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") for system_name, system_class in SYSTEM_EXAMPLES.items(): system = get_system(system_name) assert issubclass(system, system_class) if issubclass(system_class, VisualParticleSystem): assert breakfast_table.states[Covered].set_value(system, True) else: system.generate_particles(positions=[[0, 0, 1]]) assert system.n_particles > 0 state = og.sim.dump_state(serialized=True) og.sim.load_state(state, serialized=True) for system_name, system_class in SYSTEM_EXAMPLES.items(): system = get_system(system_name) system.clear()
1,636
Python
33.829787
77
0.675428
StanfordVL/OmniGibson/tests/test_envs.py
import omnigibson as og from omnigibson.macros import gm def task_tester(task_type): cfg = { "scene": { "type": "InteractiveTraversableScene", "scene_model": "Rs_int", "load_object_categories": ["floors", "breakfast_table"], }, "robots": [ { "type": "Fetch", "obs_modalities": [], } ], # Task kwargs "task": { "type": task_type, # BehaviorTask-specific "activity_name": "assembling_gift_baskets", "online_object_sampling": True }, } # Make sure sim is stopped if og.sim is not None: og.sim.stop() # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_FLATCACHE = True # Create the environment env = og.Environment(configs=cfg) env.reset() for _ in range(5): env.step(env.robots[0].action_space.sample()) # Clear the sim og.sim.clear() def test_dummy_task(): task_tester("DummyTask") def test_point_reaching_task(): task_tester("PointReachingTask") def test_point_navigation_task(): task_tester("PointNavigationTask") def test_behavior_task(): task_tester("BehaviorTask") def test_rs_int_full_load(): cfg = { "scene": { "type": "InteractiveTraversableScene", "scene_model": "Rs_int", }, "robots": [ { "type": "Fetch", "obs_modalities": [], } ], # Task kwargs "task": { "type": "DummyTask", }, } # Make sure sim is stopped og.sim.stop() # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_FLATCACHE = True # Create the environment env = og.Environment(configs=cfg) env.reset() for _ in range(5): env.step(env.robots[0].action_space.sample()) # Clear the sim og.sim.clear()
2,175
Python
20.979798
89
0.538391
StanfordVL/OmniGibson/tests/create_tests_of_examples.py
import importlib import os import pkgutil import shutil from string import Template import omnigibson from omnigibson import examples from omnigibson.utils.asset_utils import download_assets download_assets() def main(): examples_list = [] for package in pkgutil.walk_packages(examples.__path__, examples.__name__ + "."): if ( not package.ispkg and package.name[17:] != "example_selector" and "web_ui" not in package.name[17:] # The WebUI examples require additional server setup and "vr_" not in package.name[17:] # The VR examples require additional dependencies and "ray_" not in package.name[17:] # The Ray/RLLib example does not run in a subprocess ): # Consider removing the last condition if we have runnable VR tests examples_list += [package.name[17:]] temp_folder_of_test = os.path.join("/", "tmp", "tests_of_examples") shutil.rmtree(temp_folder_of_test, ignore_errors=True) os.makedirs(temp_folder_of_test, exist_ok=True) for example in examples_list: template_file_name = os.path.join(omnigibson.__path__[0], "..", "tests", "test_of_example_template.txt") with open(template_file_name, "r") as f: substitutes = dict() substitutes["module"] = example name = example.rsplit(".", 1)[-1] substitutes["name"] = name src = Template(f.read()) dst = src.substitute(substitutes) test_file = open(os.path.join(temp_folder_of_test, name + "_test.py"), "w") n = test_file.write(dst) test_file.close() if __name__ == "__main__": main()
1,687
Python
35.695651
112
0.617072
StanfordVL/OmniGibson/tests/utils.py
import omnigibson as og from omnigibson.macros import gm from omnigibson.object_states import * from omnigibson.utils.constants import PrimType, ParticleModifyCondition, ParticleModifyMethod from omnigibson.systems import * import omnigibson.utils.transform_utils as T import numpy as np TEMP_RELATED_ABILITIES = {"cookable": {}, "freezable": {}, "burnable": {}, "heatable": {}} SYSTEM_EXAMPLES = { "water": FluidSystem, "white_rice": GranularSystem, "diced__apple": MacroPhysicalParticleSystem, "stain": MacroVisualParticleSystem, } def og_test(func): def wrapper(): assert_test_scene() try: func() finally: og.sim.scene.reset() return wrapper num_objs = 0 def retrieve_obj_cfg(obj): return { "name": obj.name, "category": obj.category, "model": obj.model, "prim_type": obj.prim_type, "position": obj.get_position(), "scale": obj.scale, "abilities": obj.abilities, "visual_only": obj.visual_only, } def get_obj_cfg(name, category, model, prim_type=PrimType.RIGID, scale=None, bounding_box=None, abilities=None, visual_only=False): global num_objs num_objs += 1 return { "type": "DatasetObject", "fit_avg_dim_volume": scale is None and bounding_box is None, "name": name, "category": category, "model": model, "prim_type": prim_type, "position": [150, 150, 150 + num_objs * 5], "scale": scale, "bounding_box": bounding_box, "abilities": abilities, "visual_only": visual_only, } def assert_test_scene(): if og.sim is None or og.sim.scene is None: cfg = { "scene": { "type": "Scene", }, "objects": [ get_obj_cfg("breakfast_table", "breakfast_table", "skczfi"), get_obj_cfg("bottom_cabinet", "bottom_cabinet", "immwzb"), get_obj_cfg("dishtowel", "dishtowel", "dtfspn", prim_type=PrimType.CLOTH, abilities={"cloth": {}}), get_obj_cfg("carpet", "carpet", "ctclvd", prim_type=PrimType.CLOTH, abilities={"cloth": {}}), get_obj_cfg("bowl", "bowl", "ajzltc"), get_obj_cfg("bagel", "bagel", "zlxkry", abilities=TEMP_RELATED_ABILITIES), get_obj_cfg("cookable_dishtowel", "dishtowel", "dtfspn", prim_type=PrimType.CLOTH, abilities={**TEMP_RELATED_ABILITIES, **{"cloth": {}}}), get_obj_cfg("microwave", "microwave", "hjjxmi"), get_obj_cfg("stove", "stove", "yhjzwg"), get_obj_cfg("fridge", "fridge", "dszchb"), get_obj_cfg("plywood", "plywood", "fkmkqa", abilities={"flammable": {}}), get_obj_cfg("shelf_back_panel", "shelf_back_panel", "gjsnrt", abilities={"attachable": {}}), get_obj_cfg("shelf_shelf", "shelf_shelf", "ymtnqa", abilities={"attachable": {}}), get_obj_cfg("shelf_baseboard", "shelf_baseboard", "hlhneo", abilities={"attachable": {}}), get_obj_cfg("bracelet", "bracelet", "thqqmo"), get_obj_cfg("oyster", "oyster", "enzocs"), get_obj_cfg("sink", "sink", "egwapq", scale=np.ones(3)), get_obj_cfg("stockpot", "stockpot", "dcleem", abilities={"fillable": {}, "heatable": {}}), get_obj_cfg("applier_dishtowel", "dishtowel", "dtfspn", abilities={"particleApplier": {"method": ParticleModifyMethod.ADJACENCY, "conditions": {"water": []}}}), get_obj_cfg("remover_dishtowel", "dishtowel", "dtfspn", abilities={"particleRemover": {"method": ParticleModifyMethod.ADJACENCY, "conditions": {"water": []}}}), get_obj_cfg("spray_bottle", "spray_bottle", "asztxi", visual_only=True, abilities={"toggleable": {}, "particleApplier": {"method": ParticleModifyMethod.PROJECTION, "conditions": {"water": [(ParticleModifyCondition.TOGGLEDON, True)]}}}), get_obj_cfg("vacuum", "vacuum", "bdmsbr", visual_only=True, abilities={"toggleable": {}, "particleRemover": {"method": ParticleModifyMethod.PROJECTION, "conditions": {"water": [(ParticleModifyCondition.TOGGLEDON, True)]}}}), get_obj_cfg("blender", "blender", "cwkvib", bounding_box=[0.316, 0.318, 0.649], abilities={"fillable": {}, "toggleable": {}, "heatable": {}}), get_obj_cfg("oven", "oven", "cgtaer", bounding_box=[0.943, 0.837, 1.297]), get_obj_cfg("baking_sheet", "baking_sheet", "yhurut", bounding_box=[0.41607812, 0.43617093, 0.02281223]), get_obj_cfg("bagel_dough", "bagel_dough", "iuembm", scale=np.ones(3) * 0.8), get_obj_cfg("raw_egg", "raw_egg", "ydgivr"), get_obj_cfg("scoop_of_ice_cream", "scoop_of_ice_cream", "dodndj", bounding_box=[0.076, 0.077, 0.065]), get_obj_cfg("food_processor", "food_processor", "gamkbo"), get_obj_cfg("electric_mixer", "electric_mixer", "qornxa"), get_obj_cfg("another_raw_egg", "raw_egg", "ydgivr"), get_obj_cfg("chicken", "chicken", "nppsmz", scale=np.ones(3) * 0.7), get_obj_cfg("tablespoon", "tablespoon", "huudhe"), get_obj_cfg("swiss_cheese", "swiss_cheese", "hwxeto"), get_obj_cfg("apple", "apple", "agveuv"), get_obj_cfg("table_knife", "table_knife", "jxdfyy"), get_obj_cfg("half_apple", "half_apple", "sguztn"), get_obj_cfg("washer", "washer", "dobgmu"), get_obj_cfg("carpet_sweeper", "carpet_sweeper", "xboreo"), ], "robots": [ { "type": "Fetch", "obs_modalities": ["seg_semantic", "seg_instance", "seg_instance_id"], "position": [150, 150, 100], "orientation": [0, 0, 0, 1], } ] } # Make sure sim is stopped if og.sim is not None: og.sim.stop() # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_FLATCACHE = False # Create the environment env = og.Environment(configs=cfg) # Additional processing for the tests to pass more deterministically og.sim.stop() bounding_box_object_names = ["bagel_dough", "raw_egg"] for name in bounding_box_object_names: obj = og.sim.scene.object_registry("name", name) for collision_mesh in obj.root_link.collision_meshes.values(): collision_mesh.set_collision_approximation("boundingCube") og.sim.play() def get_random_pose(pos_low=10.0, pos_hi=20.0): pos = np.random.uniform(pos_low, pos_hi, 3) orn = T.euler2quat(np.random.uniform(-np.pi, np.pi, 3)) return pos, orn def place_objA_on_objB_bbox(objA, objB, x_offset=0.0, y_offset=0.0, z_offset=0.001): objA.keep_still() objB.keep_still() # Reset pose if cloth object if objA.prim_type == PrimType.CLOTH: objA.root_link.reset() objA_aabb_center, objA_aabb_extent = objA.aabb_center, objA.aabb_extent objB_aabb_center, objB_aabb_extent = objB.aabb_center, objB.aabb_extent objA_aabb_offset = objA.get_position() - objA_aabb_center target_objA_aabb_pos = objB_aabb_center + np.array([0, 0, (objB_aabb_extent[2] + objA_aabb_extent[2]) / 2.0]) + \ np.array([x_offset, y_offset, z_offset]) objA.set_position(target_objA_aabb_pos + objA_aabb_offset) def place_obj_on_floor_plane(obj, x_offset=0.0, y_offset=0.0, z_offset=0.01): obj.keep_still() # Reset pose if cloth object if obj.prim_type == PrimType.CLOTH: obj.root_link.reset() obj_aabb_center, obj_aabb_extent = obj.aabb_center, obj.aabb_extent obj_aabb_offset = obj.get_position() - obj_aabb_center target_obj_aabb_pos = np.array([0, 0, obj_aabb_extent[2] / 2.0]) + np.array([x_offset, y_offset, z_offset]) obj.set_position(target_obj_aabb_pos + obj_aabb_offset) def remove_all_systems(): for system in ParticleRemover.supported_active_systems.values(): system.remove_all_particles() og.sim.step()
8,329
Python
46.329545
252
0.581102
StanfordVL/OmniGibson/tests/test_robot_teleoperation.py
import omnigibson as og import numpy as np from omnigibson.macros import gm from telemoma.human_interface.teleop_core import TeleopAction from omnigibson.utils.transform_utils import quat2euler import pytest @pytest.mark.skip(reason="test hangs on CI") def test_teleop(): cfg = { "env": {"action_timestep": 1 / 60., "physics_timestep": 1 / 120.}, "scene": {"type": "Scene"}, "robots": [ { "type": "Fetch", "action_normalize": False, "controller_config": { "arm_0": { "name": "InverseKinematicsController", "command_input_limits": None, }, } } ], } # Make sure sim is stopped if og.sim is not None: og.sim.stop() # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache gm.USE_GPU_DYNAMICS = False gm.ENABLE_FLATCACHE = False # Create the environment env = og.Environment(configs=cfg) robot = env.robots[0] env.reset() teleop_action = TeleopAction() start_base_pose = robot.get_position_orientation() start_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation() # test moving robot arm teleop_action.right = np.concatenate(([0.01], np.zeros(6))) for _ in range(50): action = robot.teleop_data_to_action(teleop_action) env.step(action) cur_eef_pose = robot.links[robot.eef_link_names[robot.default_arm]].get_position_orientation() assert cur_eef_pose[0][0] - start_eef_pose[0][0] > 0.02, "Robot arm not moving forward" # test moving robot base teleop_action.right = np.zeros(7) teleop_action.base = np.array([0.1, 0, 0.1]) for _ in range(50): action = robot.teleop_data_to_action(teleop_action) env.step(action) cur_base_pose = robot.get_position_orientation() assert cur_base_pose[0][0] - start_base_pose[0][0] > 0.02, "robot base not moving forward" assert quat2euler(cur_base_pose[1])[2] - quat2euler(start_base_pose[1])[2] > 0.02, "robot base not rotating counter-clockwise" # Clear the sim og.sim.clear()
2,248
Python
34.698412
130
0.606317
StanfordVL/OmniGibson/tests/test_robot_states.py
import numpy as np import omnigibson as og from omnigibson.macros import gm import omnigibson.lazy as lazy from omnigibson.sensors import VisionSensor from omnigibson.object_states import ObjectsInFOVOfRobot from omnigibson.utils.transform_utils import pose2mat, mat2pose, relative_pose_transform from omnigibson.utils.usd_utils import PoseAPI from omnigibson.utils.constants import semantic_class_name_to_id def setup_environment(flatcache=True): """ Sets up the environment with or without flatcache based on the flatcache parameter. """ # Ensure any existing simulation is stopped if og.sim is not None: og.sim.stop() # Set global flags gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = True gm.ENABLE_FLATCACHE = flatcache # Set based on function parameter # Define the environment configuration config = { "scene": { "type": "Scene", }, "robots": [ { "type": "Fetch", "obs_modalities": 'all', "position": [150, 150, 100], "orientation": [0, 0, 0, 1], "controller_config": { "arm_0": { "name": "NullJointController", "motor_type": "position", }, }, } ] } env = og.Environment(configs=config) return env def camera_pose_test(flatcache): env = setup_environment(flatcache) robot = env.robots[0] env.reset() sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)] assert len(sensors) > 0 vision_sensor = sensors[0] # Get vision sensor world pose via directly calling get_position_orientation robot_world_pos, robot_world_ori = robot.get_position_orientation() sensor_world_pos, sensor_world_ori = vision_sensor.get_position_orientation() robot_to_sensor_mat = pose2mat(relative_pose_transform(sensor_world_pos, sensor_world_ori, robot_world_pos, robot_world_ori)) sensor_world_pos_gt = np.array([150.16513062, 150., 101.38952637]) sensor_world_ori_gt = np.array([-0.29444987, 0.29444981, 0.64288363, -0.64288352]) assert np.allclose(sensor_world_pos, sensor_world_pos_gt, atol=1e-3) assert np.allclose(sensor_world_ori, sensor_world_ori_gt, atol=1e-3) # Now, we want to move the robot and check if the sensor pose has been updated old_camera_local_pose = vision_sensor.get_local_pose() robot.set_position_orientation(position=[100, 100, 100]) new_camera_local_pose = vision_sensor.get_local_pose() new_camera_world_pose = vision_sensor.get_position_orientation() robot_pose_mat = pose2mat(robot.get_position_orientation()) expected_camera_world_pos, expected_camera_world_ori = mat2pose(robot_pose_mat @ robot_to_sensor_mat) assert np.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3) assert np.allclose(new_camera_world_pose[0], expected_camera_world_pos, atol=1e-3) assert np.allclose(new_camera_world_pose[1], expected_camera_world_ori, atol=1e-3) # Then, we want to move the local pose of the camera and check # 1) if the world pose is updated 2) if the robot stays in the same position old_camera_local_pose = vision_sensor.get_local_pose() vision_sensor.set_local_pose(position=[10, 10, 10], orientation=[0, 0, 0, 1]) new_camera_world_pose = vision_sensor.get_position_orientation() camera_parent_prim = lazy.omni.isaac.core.utils.prims.get_prim_parent(vision_sensor.prim) camera_parent_path = str(camera_parent_prim.GetPath()) camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path) expected_new_camera_world_pos, expected_new_camera_world_ori = mat2pose(camera_parent_world_transform @ pose2mat([[10, 10, 10], [0, 0, 0, 1]])) assert np.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3) assert np.allclose(new_camera_world_pose[1], expected_new_camera_world_ori, atol=1e-3) assert np.allclose(robot.get_position(), [100, 100, 100], atol=1e-3) # Finally, we want to move the world pose of the camera and check # 1) if the local pose is updated 2) if the robot stays in the same position robot.set_position_orientation(position=[150, 150, 100]) old_camera_local_pose = vision_sensor.get_local_pose() vision_sensor.set_position_orientation([150, 150, 101.36912537], [-0.29444987, 0.29444981, 0.64288363, -0.64288352]) new_camera_local_pose = vision_sensor.get_local_pose() assert not np.allclose(old_camera_local_pose[0], new_camera_local_pose[0], atol=1e-3) assert not np.allclose(old_camera_local_pose[1], new_camera_local_pose[1], atol=1e-3) assert np.allclose(robot.get_position(), [150, 150, 100], atol=1e-3) # Another test we want to try is setting the camera's parent scale and check if the world pose is updated camera_parent_prim.GetAttribute('xformOp:scale').Set(lazy.pxr.Gf.Vec3d([2.0, 2.0, 2.0])) camera_parent_world_transform = PoseAPI.get_world_pose_with_scale(camera_parent_path) camera_local_pose = vision_sensor.get_local_pose() expected_new_camera_world_pos, _ = mat2pose(camera_parent_world_transform @ pose2mat(camera_local_pose)) new_camera_world_pose = vision_sensor.get_position_orientation() assert np.allclose(new_camera_world_pose[0], expected_new_camera_world_pos, atol=1e-3) og.sim.clear() def test_camera_pose_flatcache_on(): camera_pose_test(True) def test_camera_pose_flatcache_off(): camera_pose_test(False) def test_camera_semantic_segmentation(): env = setup_environment(False) robot = env.robots[0] env.reset() sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)] assert len(sensors) > 0 vision_sensor = sensors[0] env.reset() all_observation, all_info = vision_sensor.get_obs() seg_semantic = all_observation['seg_semantic'] seg_semantic_info = all_info['seg_semantic'] agent_label = semantic_class_name_to_id()['agent'] background_label = semantic_class_name_to_id()['background'] assert np.all(np.isin(seg_semantic, [agent_label, background_label])) assert set(seg_semantic_info.keys()) == {agent_label, background_label} og.sim.clear() def test_object_in_FOV_of_robot(): env = setup_environment(False) robot = env.robots[0] env.reset() assert robot.states[ObjectsInFOVOfRobot].get_value() == [robot] sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)] assert len(sensors) > 0 vision_sensor = sensors[0] vision_sensor.set_position_orientation(position=[100, 150, 100]) og.sim.step() og.sim.step() assert robot.states[ObjectsInFOVOfRobot].get_value() == [] og.sim.clear()
6,867
Python
43.888889
147
0.678462
StanfordVL/OmniGibson/tests/conftest.py
import omnigibson as og def pytest_unconfigure(config): og.shutdown()
75
Python
14.199997
31
0.746667
StanfordVL/OmniGibson/tests/test_systems.py
import omnigibson as og from omnigibson.systems import * from omnigibson.object_states import Covered from utils import og_test, SYSTEM_EXAMPLES import pytest @og_test def test_system_clear(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") for system_name, system_class in SYSTEM_EXAMPLES.items(): for _ in range(3): system = get_system(system_name) assert issubclass(system, system_class) if issubclass(system_class, VisualParticleSystem): assert breakfast_table.states[Covered].set_value(system, True) else: system.generate_particles(positions=[[0, 0, 1]]) assert system.n_particles > 0 og.sim.step() system.clear()
780
Python
32.95652
78
0.644872
StanfordVL/OmniGibson/tests/test_sensors.py
from omnigibson.systems import get_system, is_physical_particle_system, is_visual_particle_system import omnigibson.utils.transform_utils as T import omnigibson as og from omnigibson.sensors import VisionSensor from utils import og_test, place_obj_on_floor_plane, SYSTEM_EXAMPLES import pytest import numpy as np @og_test def test_seg(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") dishtowel = og.sim.scene.object_registry("name", "dishtowel") robot = og.sim.scene.robots[0] place_obj_on_floor_plane(breakfast_table) dishtowel.set_position_orientation([-0.4, 0.0, 0.55], [0, 0, 0, 1]) robot.set_position_orientation([0, 0.8, 0.0], T.euler2quat([0, 0, -np.pi/2])) robot.reset() systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items()] for i, system in enumerate(systems): # Sample two particles for each system pos = np.array([-0.2 + i * 0.2, 0, 0.55]) if is_physical_particle_system(system_name=system.name): system.generate_particles(positions=[pos, pos + np.array([0.1, 0.0, 0.0])]) else: if system.get_group_name(breakfast_table) not in system.groups: system.create_attachment_group(breakfast_table) system.generate_group_particles( group=system.get_group_name(breakfast_table), positions=np.array([pos, pos + np.array([0.1, 0.0, 0.0])]), link_prim_paths=[breakfast_table.root_link.prim_path], ) og.sim.step() og.sim.render() sensors = [s for s in robot.sensors.values() if isinstance(s, VisionSensor)] assert len(sensors) > 0 vision_sensor = sensors[0] all_observation, all_info = vision_sensor.get_obs() seg_semantic = all_observation['seg_semantic'] seg_semantic_info = all_info['seg_semantic'] assert set(np.unique(seg_semantic)) == set(seg_semantic_info.keys()) expected_dict = { 335706086: 'diced__apple', 825831922: 'floors', 884110082: 'stain', 1949122937: 'breakfast_table', 2814990211: 'agent', 3051938632: 'white_rice', 3330677804: 'water', 4207839377: 'dishtowel' } assert set(seg_semantic_info.values()) == set(expected_dict.values()) seg_instance = all_observation['seg_instance'] seg_instance_info = all_info['seg_instance'] assert set(np.unique(seg_instance)) == set(seg_instance_info.keys()) expected_dict = { 2: 'robot0', 3: 'groundPlane', 4: 'dishtowel', 5: 'breakfast_table', 6: 'stain', 7: 'water', 8: 'white_rice', 9: 'diced__apple' } assert set(seg_instance_info.values()) == set(expected_dict.values()) seg_instance_id = all_observation['seg_instance_id'] seg_instance_id_info = all_info['seg_instance_id'] assert set(np.unique(seg_instance_id)) == set(seg_instance_id_info.keys()) expected_dict = { 3: '/World/robot0/gripper_link/visuals', 4: '/World/robot0/wrist_roll_link/visuals', 5: '/World/robot0/forearm_roll_link/visuals', 6: '/World/robot0/wrist_flex_link/visuals', 8: '/World/groundPlane/geom', 9: '/World/dishtowel/base_link_cloth', 10: '/World/robot0/r_gripper_finger_link/visuals', 11: '/World/robot0/l_gripper_finger_link/visuals', 12: '/World/breakfast_table/base_link/visuals', 13: 'stain', 14: 'white_rice', 15: 'diced__apple', 16: 'water' } # Temporarily disable this test because og_assets are outdated on CI machines # assert set(seg_instance_id_info.values()) == set(expected_dict.values()) def test_clear_sim(): og.sim.clear()
3,768
Python
37.85567
97
0.628185
StanfordVL/OmniGibson/tests/test_primitives.py
import numpy as np import pytest import omnigibson as og from omnigibson.macros import gm from omnigibson.action_primitives.starter_semantic_action_primitives import StarterSemanticActionPrimitives, StarterSemanticActionPrimitiveSet import omnigibson.utils.transform_utils as T from omnigibson.objects.dataset_object import DatasetObject def execute_controller(ctrl_gen, env): for action in ctrl_gen: env.step(action) def primitive_tester(load_object_categories, objects, primitives, primitives_args): cfg = { "scene": { "type": "InteractiveTraversableScene", "scene_model": "Rs_int", "load_object_categories": load_object_categories, }, "robots": [ { "type": "Fetch", "obs_modalities": ["scan", "rgb", "depth"], "scale": 1.0, "self_collisions": True, "action_normalize": False, "action_type": "continuous", "grasping_mode": "sticky", "rigid_trunk": False, "default_arm_pose": "diagonal30", "default_trunk_offset": 0.365, "controller_config": { "base": { "name": "DifferentialDriveController", }, "arm_0": { "name": "InverseKinematicsController", "command_input_limits": "default", "command_output_limits": [[-0.2, -0.2, -0.2, -0.5, -0.5, -0.5], [0.2, 0.2, 0.2, 0.5, 0.5, 0.5]], "mode": "pose_absolute_ori", "kp": 300.0 }, "gripper_0": { "name": "JointController", "motor_type": "position", "command_input_limits": [-1, 1], "command_output_limits": None, "use_delta_commands": True }, "camera": { "name": "JointController", "use_delta_commands": False } } } ], } # Make sure sim is stopped if og.sim is not None: og.sim.stop() # Make sure GPU dynamics are enabled (GPU dynamics needed for cloth) and no flatcache gm.ENABLE_OBJECT_STATES = True gm.USE_GPU_DYNAMICS = False gm.ENABLE_FLATCACHE = False # Create the environment env = og.Environment(configs=cfg) robot = env.robots[0] env.reset() for obj in objects: og.sim.import_object(obj['object']) obj['object'].set_position_orientation(obj['position'], obj['orientation']) og.sim.step() controller = StarterSemanticActionPrimitives(env, enable_head_tracking=False) try: for primitive, args in zip(primitives, primitives_args): try: execute_controller(controller.apply_ref(primitive, *args), env) except Exception as e: return False finally: # Clear the sim og.sim.clear() return True def test_navigate(): categories = ["floors", "ceilings", "walls"] objects = [] obj_1 = { "object": DatasetObject( name="cologne", category="bottle_of_cologne", model="lyipur" ), "position": [-0.3, -0.8, 0.5], "orientation": [0, 0, 0, 1] } objects.append(obj_1) primitives = [StarterSemanticActionPrimitiveSet.NAVIGATE_TO] primitives_args = [(obj_1['object'],)] assert primitive_tester(categories, objects, primitives, primitives_args) def test_grasp(): categories = ["floors", "ceilings", "walls", "coffee_table"] objects = [] obj_1 = { "object": DatasetObject( name="cologne", category="bottle_of_cologne", model="lyipur" ), "position": [-0.3, -0.8, 0.5], "orientation": [0, 0, 0, 1] } objects.append(obj_1) primitives = [StarterSemanticActionPrimitiveSet.GRASP] primitives_args = [(obj_1['object'],)] assert primitive_tester(categories, objects, primitives, primitives_args) def test_place(): categories = ["floors", "ceilings", "walls", "coffee_table"] objects = [] obj_1 = { "object": DatasetObject( name="table", category="breakfast_table", model="rjgmmy", scale=[0.3, 0.3, 0.3] ), "position": [-0.7, 0.5, 0.2], "orientation": [0, 0, 0, 1] } obj_2 = { "object": DatasetObject( name="cologne", category="bottle_of_cologne", model="lyipur" ), "position": [-0.3, -0.8, 0.5], "orientation": [0, 0, 0, 1] } objects.append(obj_1) objects.append(obj_2) primitives = [StarterSemanticActionPrimitiveSet.GRASP, StarterSemanticActionPrimitiveSet.PLACE_ON_TOP] primitives_args = [(obj_2['object'],), (obj_1['object'],)] assert primitive_tester(categories, objects, primitives, primitives_args) @pytest.mark.skip(reason="primitives are broken") def test_open_prismatic(): categories = ["floors"] objects = [] obj_1 = { "object": DatasetObject( name="bottom_cabinet", category="bottom_cabinet", model="bamfsz", scale=[0.7, 0.7, 0.7] ), "position": [-1.2, -0.4, 0.5], "orientation": [0, 0, 0, 1] } objects.append(obj_1) primitives = [StarterSemanticActionPrimitiveSet.OPEN] primitives_args = [(obj_1['object'],)] assert primitive_tester(categories, objects, primitives, primitives_args) @pytest.mark.skip(reason="primitives are broken") def test_open_revolute(): categories = ["floors"] objects = [] obj_1 = { "object": DatasetObject( name="fridge", category="fridge", model="dszchb", scale=[0.7, 0.7, 0.7] ), "position": [-1.2, -0.4, 0.5], "orientation": [0, 0, 0, 1] } objects.append(obj_1) primitives = [StarterSemanticActionPrimitiveSet.OPEN] primitives_args = [(obj_1['object'],)] assert primitive_tester(categories, objects, primitives, primitives_args)
6,423
Python
30.336585
142
0.528414
StanfordVL/OmniGibson/tests/test_object_states.py
from omnigibson.macros import macros as m from omnigibson.object_states import * from omnigibson.systems import get_system, is_physical_particle_system, is_visual_particle_system, VisualParticleSystem from omnigibson.utils.constants import PrimType from omnigibson.utils.physx_utils import apply_force_at_pos, apply_torque import omnigibson.utils.transform_utils as T import omnigibson as og from utils import og_test, get_random_pose, place_objA_on_objB_bbox, place_obj_on_floor_plane, SYSTEM_EXAMPLES import pytest import numpy as np @og_test def test_on_top(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(breakfast_table) for i, obj in enumerate((bowl, dishtowel)): place_objA_on_objB_bbox(obj, breakfast_table) for _ in range(5): og.sim.step() assert obj.states[OnTop].get_value(breakfast_table) obj.set_position(np.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[OnTop].get_value(breakfast_table) assert bowl.states[OnTop].set_value(breakfast_table, True) assert dishtowel.states[OnTop].set_value(breakfast_table, True) with pytest.raises(NotImplementedError): bowl.states[OnTop].set_value(breakfast_table, False) @og_test def test_inside(): bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(bottom_cabinet) bowl.set_position([0., 0., 0.08]) dishtowel.set_position([0, 0., 0.5]) for _ in range(5): og.sim.step() assert bowl.states[Inside].get_value(bottom_cabinet) assert dishtowel.states[Inside].get_value(bottom_cabinet) bowl.set_position([10., 10., 1.]) dishtowel.set_position([20., 20., 1.]) for _ in range(5): og.sim.step() assert not bowl.states[Inside].get_value(bottom_cabinet) assert not dishtowel.states[Inside].get_value(bottom_cabinet) assert bowl.states[Inside].set_value(bottom_cabinet, True) assert dishtowel.states[Inside].set_value(bottom_cabinet, True) with pytest.raises(NotImplementedError): bowl.states[OnTop].set_value(bottom_cabinet, False) @og_test def test_under(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(breakfast_table) for i, obj in enumerate((bowl, dishtowel)): place_obj_on_floor_plane(obj) for _ in range(5): og.sim.step() assert obj.states[Under].get_value(breakfast_table) obj.set_position(np.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[Under].get_value(breakfast_table) assert bowl.states[Under].set_value(breakfast_table, True) assert dishtowel.states[Under].set_value(breakfast_table, True) with pytest.raises(NotImplementedError): bowl.states[Under].set_value(breakfast_table, False) @og_test def test_touching(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(breakfast_table) for i, obj in enumerate((bowl, dishtowel)): place_objA_on_objB_bbox(obj, breakfast_table) for _ in range(5): og.sim.step() assert obj.states[Touching].get_value(breakfast_table) assert breakfast_table.states[Touching].get_value(obj) obj.set_position(np.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[Touching].get_value(breakfast_table) assert not breakfast_table.states[Touching].get_value(obj) with pytest.raises(NotImplementedError): bowl.states[Touching].set_value(breakfast_table, None) @og_test def test_contact_bodies(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(breakfast_table) for i, obj in enumerate((bowl, dishtowel)): place_objA_on_objB_bbox(obj, breakfast_table) for _ in range(5): og.sim.step() # TODO: rigid body's ContactBodies should include cloth if obj.prim_type != PrimType.CLOTH: assert obj.root_link in breakfast_table.states[ContactBodies].get_value() assert breakfast_table.root_link in obj.states[ContactBodies].get_value() obj.set_position(np.ones(3) * 10 * (i + 1)) og.sim.step() assert obj.root_link not in breakfast_table.states[ContactBodies].get_value() assert breakfast_table.root_link not in obj.states[ContactBodies].get_value() with pytest.raises(NotImplementedError): bowl.states[ContactBodies].set_value(None) @og_test def test_next_to(): bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(bottom_cabinet) for i, (axis, obj) in enumerate(zip(("x", "y"), (bowl, dishtowel))): place_obj_on_floor_plane(obj, **{f"{axis}_offset": 0.3}) for _ in range(5): og.sim.step() assert obj.states[NextTo].get_value(bottom_cabinet) assert bottom_cabinet.states[NextTo].get_value(obj) obj.set_position(np.ones(3) * 10 * (i + 1)) og.sim.step() assert not obj.states[NextTo].get_value(bottom_cabinet) assert not bottom_cabinet.states[NextTo].get_value(obj) with pytest.raises(NotImplementedError): bowl.states[NextTo].set_value(bottom_cabinet, None) @og_test def test_overlaid(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") carpet = og.sim.scene.object_registry("name", "carpet") place_obj_on_floor_plane(breakfast_table) place_objA_on_objB_bbox(carpet, breakfast_table) for _ in range(5): og.sim.step() assert carpet.states[Overlaid].get_value(breakfast_table) carpet.set_position(np.ones(3) * 20.0) og.sim.step() assert not carpet.states[Overlaid].get_value(breakfast_table) assert carpet.states[Overlaid].set_value(breakfast_table, True) with pytest.raises(NotImplementedError): carpet.states[Overlaid].set_value(breakfast_table, False) @og_test def test_pose(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") dishtowel = og.sim.scene.object_registry("name", "dishtowel") pos1, orn1 = get_random_pose() breakfast_table.set_position_orientation(pos1, orn1) pos2, orn2 = get_random_pose() dishtowel.set_position_orientation(pos2, orn2) assert np.allclose(breakfast_table.states[Pose].get_value()[0], pos1) assert np.allclose(breakfast_table.states[Pose].get_value()[1], orn1) or np.allclose(breakfast_table.states[Pose].get_value()[1], -orn1) assert np.allclose(dishtowel.states[Pose].get_value()[0], pos2) assert np.allclose(dishtowel.states[Pose].get_value()[1], orn2) or np.allclose(dishtowel.states[Pose].get_value()[1], -orn2) with pytest.raises(NotImplementedError): breakfast_table.states[Pose].set_value(None) @og_test def test_aabb(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") dishtowel = og.sim.scene.object_registry("name", "dishtowel") pos1, orn1 = get_random_pose() breakfast_table.set_position_orientation(pos1, orn1) pos2, orn2 = get_random_pose() dishtowel.set_position_orientation(pos2, orn2) # Need to take one sim step og.sim.step() assert np.allclose(breakfast_table.states[AABB].get_value(), breakfast_table.aabb) assert np.all((breakfast_table.states[AABB].get_value()[0] < pos1) & (pos1 < breakfast_table.states[AABB].get_value()[1])) pp = dishtowel.root_link.compute_particle_positions() offset = dishtowel.root_link.cloth_system.particle_contact_offset assert np.allclose(dishtowel.states[AABB].get_value(), (pp.min(axis=0) - offset, pp.max(axis=0) + offset)) assert np.all((dishtowel.states[AABB].get_value()[0] < pos2) & (pos2 < dishtowel.states[AABB].get_value()[1])) with pytest.raises(NotImplementedError): breakfast_table.states[AABB].set_value(None) @og_test def test_adjacency(): bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet") bowl = og.sim.scene.object_registry("name", "bowl") dishtowel = og.sim.scene.object_registry("name", "dishtowel") place_obj_on_floor_plane(bottom_cabinet) for i, (axis, obj) in enumerate(zip(("x", "y"), (bowl, dishtowel))): place_obj_on_floor_plane(obj, **{f"{axis}_offset": 0.4}) og.sim.step() assert bottom_cabinet in set.union( *(axis.positive_neighbors | axis.negative_neighbors for coordinate in obj.states[HorizontalAdjacency].get_value() for axis in coordinate) ) bowl.set_position([0., 0., 1.]) dishtowel.set_position([0., 0., 2.0]) # Need to take one sim step og.sim.step() assert bowl in bottom_cabinet.states[VerticalAdjacency].get_value().positive_neighbors # TODO: adjacency relies on raytest, which doesn't take particle systems into account # assert dishtowel in bottom_cabinet.states[VerticalAdjacency].get_value().positive_neighbors assert bottom_cabinet in bowl.states[VerticalAdjacency].get_value().negative_neighbors # TODO: adjacency relies on raytest, which doesn't take particle systems into account # assert dishtowel in bowl.states[VerticalAdjacency].get_value().positive_neighbors assert bottom_cabinet in dishtowel.states[VerticalAdjacency].get_value().negative_neighbors assert bowl in dishtowel.states[VerticalAdjacency].get_value().negative_neighbors with pytest.raises(NotImplementedError): bottom_cabinet.states[HorizontalAdjacency].set_value(None) bottom_cabinet.states[VerticalAdjacency].set_value(None) @og_test def test_temperature(): microwave = og.sim.scene.object_registry("name", "microwave") stove = og.sim.scene.object_registry("name", "stove") fridge = og.sim.scene.object_registry("name", "fridge") plywood = og.sim.scene.object_registry("name", "plywood") bagel = og.sim.scene.object_registry("name", "bagel") dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel") place_obj_on_floor_plane(microwave) place_obj_on_floor_plane(stove, x_offset=1.0) place_obj_on_floor_plane(fridge, x_offset=2.0) place_obj_on_floor_plane(plywood, x_offset=3.0) # Set the objects to be far away place_obj_on_floor_plane(bagel, x_offset=-0.5) place_obj_on_floor_plane(dishtowel, x_offset=-1.0) for _ in range(5): og.sim.step() # Not affected by any heat source assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE # Open the microwave microwave.joints["j_link_0"].set_pos(np.pi / 2) # Set the objects to be inside the microwave bagel.set_position_orientation([0, 0, 0.11], [0, 0, 0, 1]) dishtowel.set_position_orientation([-0.15, 0, 0.11], [0, 0, 0, 1]) for _ in range(5): og.sim.step() # Not affected by any heat source (the microwave is NOT toggled on) assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE microwave.states[ToggledOn].set_value(True) for _ in range(5): og.sim.step() # Not affected by any heat source (the microwave is open) assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE microwave.joints["j_link_0"].set_pos(0.) for _ in range(5): og.sim.step() # Affected by the microwave bagel_new_temp = bagel.states[Temperature].get_value() dishtowel_new_temp = dishtowel.states[Temperature].get_value() assert bagel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE # Set the objects to be far away place_obj_on_floor_plane(bagel, x_offset=-0.5) place_obj_on_floor_plane(dishtowel, x_offset=-1.0) for _ in range(5): og.sim.step() # Not affected by any heat source (should cool down by itself towards the default temp) assert bagel.states[Temperature].get_value() < bagel_new_temp assert dishtowel.states[Temperature].get_value() < dishtowel_new_temp # Setter should work assert bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE # Set the objects to be on top of the stove bagel.set_position_orientation([0.71, 0.11, 0.88], [0, 0, 0, 1]) dishtowel.set_position_orientation([0.84, 0.11, 0.88], [0, 0, 0, 1]) for _ in range(5): og.sim.step() # Not affected by any heat source (the stove is off) assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE stove.states[ToggledOn].set_value(True) for _ in range(5): og.sim.step() # Affected by the stove assert bagel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE # Reset assert bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) # Set the objects to be inside the fridge bagel.set_position_orientation([1.9, 0, 0.89], [0, 0, 0, 1]) dishtowel.set_position_orientation([2.1, 0, 0.89], [0, 0, 0, 1]) for _ in range(5): og.sim.step() # Affected by the fridge assert bagel.states[Temperature].get_value() < m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() < m.object_states.temperature.DEFAULT_TEMPERATURE # Reset temp assert bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) assert dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE) # Set the objects to be near the plywood place_obj_on_floor_plane(bagel, x_offset=2.9) place_obj_on_floor_plane(dishtowel, x_offset=3.1) for _ in range(5): og.sim.step() # Not affected by any heat source (the plywood is NOT onfire) assert bagel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE plywood.states[OnFire].set_value(True) for _ in range(5): og.sim.step() assert bagel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[Temperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE @og_test def test_max_temperature(): bagel = og.sim.scene.object_registry("name", "bagel") dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel") assert bagel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE assert bagel.states[MaxTemperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE - 1) assert dishtowel.states[MaxTemperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE - 1) assert bagel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE - 1 assert dishtowel.states[MaxTemperature].get_value() == m.object_states.temperature.DEFAULT_TEMPERATURE - 1 bagel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE + 1) dishtowel.states[Temperature].set_value(m.object_states.temperature.DEFAULT_TEMPERATURE + 1) og.sim.step() assert bagel.states[MaxTemperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE assert dishtowel.states[MaxTemperature].get_value() > m.object_states.temperature.DEFAULT_TEMPERATURE @og_test def test_heat_source_or_sink(): microwave = og.sim.scene.object_registry("name", "microwave") stove = og.sim.scene.object_registry("name", "stove") fridge = og.sim.scene.object_registry("name", "fridge") assert microwave.states[HeatSourceOrSink].requires_inside assert microwave.states[HeatSourceOrSink].requires_closed assert microwave.states[HeatSourceOrSink].requires_toggled_on microwave.joints["j_link_0"].set_pos(np.pi / 2) microwave.states[ToggledOn].set_value(False) og.sim.step() assert not microwave.states[HeatSourceOrSink].get_value() microwave.joints["j_link_0"].set_pos(0.0) og.sim.step() assert not microwave.states[HeatSourceOrSink].get_value() microwave.states[ToggledOn].set_value(True) og.sim.step() assert microwave.states[HeatSourceOrSink].get_value() assert fridge.states[HeatSourceOrSink].requires_inside assert fridge.states[HeatSourceOrSink].requires_closed assert not fridge.states[HeatSourceOrSink].requires_toggled_on fridge.joints["j_link_0"].set_pos(np.pi / 2) og.sim.step() assert not fridge.states[HeatSourceOrSink].get_value() fridge.joints["j_link_0"].set_pos(0.0) og.sim.step() assert fridge.states[HeatSourceOrSink].get_value() assert not stove.states[HeatSourceOrSink].requires_inside assert not stove.states[HeatSourceOrSink].requires_closed assert stove.states[HeatSourceOrSink].requires_toggled_on stove.states[ToggledOn].set_value(False) og.sim.step() assert not stove.states[HeatSourceOrSink].get_value() stove.states[ToggledOn].set_value(True) og.sim.step() assert stove.states[HeatSourceOrSink].get_value() @og_test def test_cooked(): bagel = og.sim.scene.object_registry("name", "bagel") dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel") assert not bagel.states[Cooked].get_value() assert not dishtowel.states[Cooked].get_value() bagel.states[MaxTemperature].set_value(bagel.states[Cooked].cook_temperature) dishtowel.states[MaxTemperature].set_value(dishtowel.states[Cooked].cook_temperature) og.sim.step() assert bagel.states[Cooked].get_value() assert dishtowel.states[Cooked].get_value() assert bagel.states[Cooked].set_value(False) assert dishtowel.states[Cooked].set_value(False) assert not bagel.states[Cooked].get_value() assert not dishtowel.states[Cooked].get_value() assert bagel.states[MaxTemperature].get_value() < bagel.states[Cooked].cook_temperature assert dishtowel.states[MaxTemperature].get_value() < dishtowel.states[Cooked].cook_temperature assert bagel.states[Cooked].set_value(True) assert dishtowel.states[Cooked].set_value(True) assert bagel.states[Cooked].get_value() assert dishtowel.states[Cooked].get_value() assert bagel.states[MaxTemperature].get_value() >= bagel.states[Cooked].cook_temperature assert dishtowel.states[MaxTemperature].get_value() >= dishtowel.states[Cooked].cook_temperature @og_test def test_burnt(): bagel = og.sim.scene.object_registry("name", "bagel") dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel") assert not bagel.states[Burnt].get_value() assert not dishtowel.states[Burnt].get_value() bagel.states[MaxTemperature].set_value(bagel.states[Burnt].burn_temperature) dishtowel.states[MaxTemperature].set_value(dishtowel.states[Burnt].burn_temperature) og.sim.step() assert bagel.states[Burnt].get_value() assert dishtowel.states[Burnt].get_value() assert bagel.states[Burnt].set_value(False) assert dishtowel.states[Burnt].set_value(False) assert not bagel.states[Burnt].get_value() assert not dishtowel.states[Burnt].get_value() assert bagel.states[MaxTemperature].get_value() < bagel.states[Burnt].burn_temperature assert dishtowel.states[MaxTemperature].get_value() < dishtowel.states[Burnt].burn_temperature assert bagel.states[Burnt].set_value(True) assert dishtowel.states[Burnt].set_value(True) assert bagel.states[Burnt].get_value() assert dishtowel.states[Burnt].get_value() assert bagel.states[MaxTemperature].get_value() >= bagel.states[Burnt].burn_temperature assert dishtowel.states[MaxTemperature].get_value() >= dishtowel.states[Burnt].burn_temperature @og_test def test_frozen(): bagel = og.sim.scene.object_registry("name", "bagel") dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel") assert not bagel.states[Frozen].get_value() assert not dishtowel.states[Frozen].get_value() bagel.states[Temperature].set_value(bagel.states[Frozen].freeze_temperature - 1) dishtowel.states[Temperature].set_value(dishtowel.states[Frozen].freeze_temperature - 1) og.sim.step() assert bagel.states[Frozen].get_value() assert dishtowel.states[Frozen].get_value() assert bagel.states[Frozen].set_value(False) assert dishtowel.states[Frozen].set_value(False) assert not bagel.states[Frozen].get_value() assert not dishtowel.states[Frozen].get_value() assert bagel.states[Temperature].get_value() > bagel.states[Frozen].freeze_temperature assert dishtowel.states[Temperature].get_value() > dishtowel.states[Frozen].freeze_temperature assert bagel.states[Frozen].set_value(True) assert dishtowel.states[Frozen].set_value(True) assert bagel.states[Frozen].get_value() assert dishtowel.states[Frozen].get_value() assert bagel.states[Temperature].get_value() <= bagel.states[Frozen].freeze_temperature assert dishtowel.states[Temperature].get_value() <= dishtowel.states[Frozen].freeze_temperature @og_test def test_heated(): bagel = og.sim.scene.object_registry("name", "bagel") dishtowel = og.sim.scene.object_registry("name", "cookable_dishtowel") assert not bagel.states[Heated].get_value() assert not dishtowel.states[Heated].get_value() bagel.states[Temperature].set_value(bagel.states[Heated].heat_temperature + 1) dishtowel.states[Temperature].set_value(dishtowel.states[Heated].heat_temperature + 1) og.sim.step() assert bagel.states[Heated].get_value() assert dishtowel.states[Heated].get_value() assert bagel.states[Heated].set_value(False) assert dishtowel.states[Heated].set_value(False) assert not bagel.states[Heated].get_value() assert not dishtowel.states[Heated].get_value() assert bagel.states[Temperature].get_value() < bagel.states[Heated].heat_temperature assert dishtowel.states[Temperature].get_value() < dishtowel.states[Heated].heat_temperature assert bagel.states[Heated].set_value(True) assert dishtowel.states[Heated].set_value(True) assert bagel.states[Heated].get_value() assert dishtowel.states[Heated].get_value() assert bagel.states[Temperature].get_value() >= bagel.states[Heated].heat_temperature assert dishtowel.states[Temperature].get_value() >= dishtowel.states[Heated].heat_temperature @og_test def test_on_fire(): plywood = og.sim.scene.object_registry("name", "plywood") assert not plywood.states[OnFire].get_value() plywood.states[Temperature].set_value(plywood.states[OnFire].ignition_temperature + 1) og.sim.step() assert plywood.states[OnFire].get_value() assert plywood.states[OnFire].set_value(False) assert not plywood.states[OnFire].get_value() assert plywood.states[Temperature].get_value() < plywood.states[OnFire].ignition_temperature assert plywood.states[OnFire].set_value(True) assert plywood.states[OnFire].get_value() assert plywood.states[Temperature].get_value() == plywood.states[OnFire].temperature for _ in range(5): og.sim.step() assert plywood.states[Temperature].get_value() == plywood.states[OnFire].temperature @og_test def test_toggled_on(): stove = og.sim.scene.object_registry("name", "stove") robot = og.sim.scene.object_registry("name", "robot0") stove.set_position_orientation([1.48, 0.3, 0.443], T.euler2quat([0, 0, -np.pi / 2.0])) robot.set_position_orientation([0.0, 0.38, 0.0], [0, 0, 0, 1]) assert not stove.states[ToggledOn].get_value() q = robot.get_joint_positions() jnt_idxs = {name: i for i, name in enumerate(robot.joints.keys())} q[jnt_idxs["torso_lift_joint"]] = 0.0 q[jnt_idxs["shoulder_pan_joint"]] = np.deg2rad(90.0) q[jnt_idxs["shoulder_lift_joint"]] = np.deg2rad(9.0) q[jnt_idxs["upperarm_roll_joint"]] = 0.0 q[jnt_idxs["elbow_flex_joint"]] = 0.0 q[jnt_idxs["forearm_roll_joint"]] = 0.0 q[jnt_idxs["wrist_flex_joint"]] = 0.0 q[jnt_idxs["wrist_roll_joint"]] = 0.0 q[jnt_idxs["l_gripper_finger_joint"]] = 0.0 q[jnt_idxs["r_gripper_finger_joint"]] = 0.0 robot.set_joint_positions(q, drive=False) steps = m.object_states.toggle.CAN_TOGGLE_STEPS for _ in range(steps): og.sim.step() # End-effector not close to the button, stays False assert not stove.states[ToggledOn].get_value() q[jnt_idxs["shoulder_pan_joint"]] = 0.0 robot.set_joint_positions(q, drive=False) for _ in range(steps - 1): og.sim.step() # End-effector close to the button, but not enough time has passed, still False assert not stove.states[ToggledOn].get_value() og.sim.step() # Enough time has passed, turns True assert stove.states[ToggledOn].get_value() # Setter should work assert stove.states[ToggledOn].set_value(False) assert not stove.states[ToggledOn].get_value() @pytest.mark.skip(reason="skipping attachment for now") @og_test def test_attached_to(): shelf_back_panel = og.sim.scene.object_registry("name", "shelf_back_panel") shelf_shelf = og.sim.scene.object_registry("name", "shelf_shelf") shelf_baseboard = og.sim.scene.object_registry("name", "shelf_baseboard") shelf_back_panel.set_position_orientation([0, 0, 0.01], [0, 0, 0, 1]) shelf_back_panel.keep_still() shelf_shelf.set_position_orientation([0, 0.03, 0.17], [0, 0, 0, 1]) shelf_shelf.keep_still() # The shelf should not be attached to the back panel (no contact yet) assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) # Let the shelf fall for _ in range(10): og.sim.step() # The shelf should be attached to the back panel assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True) # The shelf should still be attached to the back panel assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, False) # The shelf should not be attached to the back panel assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True) # shelf should be attached to the back panel assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) force_dir = np.array([0, 0, 1]) # A small force will not break the attachment force_mag = 10 apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position()) og.sim.step() assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) # A large force will break the attachment force_mag = 1000 apply_force_at_pos(shelf_shelf.root_link, force_dir * force_mag, shelf_shelf.get_position()) og.sim.step() assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) shelf_shelf.set_position_orientation([0, 0, 10], [0, 0, 0, 1]) assert not shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True) # The shelf should not be attached to the back panel because the alignment is wrong assert not shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) assert shelf_shelf.states[AttachedTo].set_value(shelf_back_panel, True, bypass_alignment_checking=True) # The shelf should be attached to the back panel because the alignment checking is bypassed assert shelf_shelf.states[AttachedTo].get_value(shelf_back_panel) # The shelf baseboard should NOT be attached because the attachment has the wrong type shelf_baseboard.set_position_orientation([0.37, -0.93, 0.03], [0, 0, 0, 1]) assert not shelf_baseboard.states[AttachedTo].set_value(shelf_back_panel, True, bypass_alignment_checking=True) assert not shelf_baseboard.states[AttachedTo].get_value(shelf_back_panel) @og_test def test_particle_source(): sink = og.sim.scene.object_registry("name", "sink") place_obj_on_floor_plane(sink) for _ in range(3): og.sim.step() assert not sink.states[ToggledOn].get_value() water_system = get_system("water") # Sink is toggled off, no water should be present assert water_system.n_particles == 0 sink.states[ToggledOn].set_value(True) for _ in range(sink.states[ParticleSource].n_steps_per_modification): og.sim.step() # Sink is toggled on, some water should be present assert water_system.n_particles > 0 # Cannot set this state with pytest.raises(NotImplementedError): sink.states[ParticleSource].set_value(True) water_system.remove_all_particles() @og_test def test_particle_sink(): sink = og.sim.scene.object_registry("name", "sink") place_obj_on_floor_plane(sink) for _ in range(3): og.sim.step() water_system = get_system("water") # There should be no water particles. assert water_system.n_particles == 0 sink_pos = sink.states[ParticleSink].link.get_position() water_system.generate_particles(positions=[sink_pos + np.array([0, 0, 0.05])]) # There should be exactly 1 water particle. assert water_system.n_particles == 1 for _ in range(sink.states[ParticleSink].n_steps_per_modification): og.sim.step() # There should be no water particles because the fluid source absorbs them. assert water_system.n_particles == 0 # Cannot set this state with pytest.raises(NotImplementedError): sink.states[ParticleSink].set_value(True) water_system.remove_all_particles() @og_test def test_particle_applier(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") spray_bottle = og.sim.scene.object_registry("name", "spray_bottle") applier_dishtowel = og.sim.scene.object_registry("name", "applier_dishtowel") # Test projection place_obj_on_floor_plane(breakfast_table) place_objA_on_objB_bbox(spray_bottle, breakfast_table, z_offset=0.1) spray_bottle.set_orientation(np.array([0.707, 0, 0, 0.707])) for _ in range(3): og.sim.step() assert not spray_bottle.states[ToggledOn].get_value() water_system = get_system("water") # Spray bottle is toggled off, no water should be present assert water_system.n_particles == 0 # Take number of steps for water to be generated, make sure there is still no water n_applier_steps = spray_bottle.states[ParticleApplier].n_steps_per_modification for _ in range(n_applier_steps): og.sim.step() assert water_system.n_particles == 0 # Turn particle applier on, and verify particles are generated after the same number of steps are taken spray_bottle.states[ToggledOn].set_value(True) for _ in range(n_applier_steps): og.sim.step() # Some water should be present assert water_system.n_particles > 0 # Test adjacency water_system.remove_all_particles() spray_bottle.set_position_orientation(position=np.ones(3) * 50.0, orientation=np.array([0, 0, 0, 1.0])) place_objA_on_objB_bbox(applier_dishtowel, breakfast_table) og.sim.step() # no water should be present assert water_system.n_particles == 0 # Take number of steps for water to be generated n_applier_steps = applier_dishtowel.states[ParticleApplier].n_steps_per_modification for _ in range(n_applier_steps): og.sim.step() # Some water should be present assert water_system.n_particles > 0 # Cannot set this state with pytest.raises(NotImplementedError): spray_bottle.states[ParticleApplier].set_value(True) water_system.remove_all_particles() @og_test def test_particle_remover(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") vacuum = og.sim.scene.object_registry("name", "vacuum") remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel") # Test projection place_obj_on_floor_plane(breakfast_table) place_objA_on_objB_bbox(vacuum, breakfast_table, z_offset=0.02) for _ in range(3): og.sim.step() assert not vacuum.states[ToggledOn].get_value() water_system = get_system("water") # Place single particle of water on middle of table water_system.generate_particles(positions=[np.array([0, 0, breakfast_table.aabb[1][2] + water_system.particle_radius])]) assert water_system.n_particles > 0 # Take number of steps for water to be removed, make sure there is still water n_remover_steps = vacuum.states[ParticleRemover].n_steps_per_modification for _ in range(n_remover_steps): og.sim.step() assert water_system.n_particles > 0 # Turn particle remover on, and verify particles are generated after the same number of steps are taken vacuum.states[ToggledOn].set_value(True) for _ in range(n_remover_steps): og.sim.step() # No water should be present assert water_system.n_particles == 0 # Test adjacency vacuum.set_position(np.ones(3) * 50.0) place_objA_on_objB_bbox(remover_dishtowel, breakfast_table, z_offset=0.03) og.sim.step() # Place single particle of water on middle of table water_system.generate_particles(positions=[np.array([0, 0, breakfast_table.aabb[1][2] + water_system.particle_radius])]) # Water should be present assert water_system.n_particles > 0 # Take number of steps for water to be removed n_remover_steps = remover_dishtowel.states[ParticleRemover].n_steps_per_modification for _ in range(n_remover_steps): og.sim.step() # No water should be present assert water_system.n_particles == 0 # Cannot set this state with pytest.raises(NotImplementedError): vacuum.states[ParticleRemover].set_value(True) water_system.remove_all_particles() @og_test def test_saturated(): remover_dishtowel = og.sim.scene.object_registry("name", "remover_dishtowel") place_obj_on_floor_plane(remover_dishtowel) for _ in range(5): og.sim.step() water_system = get_system("water") # Place single row of water above dishtowel n_particles = 5 remover_dishtowel.states[Saturated].set_limit(water_system, n_particles) water_system.generate_particles(positions=[np.array([0, 0, remover_dishtowel.aabb[1][2] + water_system.particle_radius * (1 + 2 * i)]) for i in range(n_particles)]) # Take a few steps for _ in range(20): og.sim.step() # Make sure Saturated is True, and no particles exist assert water_system.n_particles == 0 assert remover_dishtowel.states[Saturated].get_value(water_system) # Make sure we can toggle saturated to be true and false assert remover_dishtowel.states[Saturated].set_value(water_system, False) assert remover_dishtowel.states[Saturated].set_value(water_system, True) water_system.remove_all_particles() @og_test def test_open(): microwave = og.sim.scene.object_registry("name", "microwave") bottom_cabinet = og.sim.scene.object_registry("name", "bottom_cabinet") # By default, objects should not be open. assert not microwave.states[Open].get_value() assert not bottom_cabinet.states[Open].get_value() # Set the joints to their upper limits. microwave.joints["j_link_0"].set_pos(microwave.joints["j_link_0"].upper_limit) bottom_cabinet.joints["j_link_2"].set_pos(bottom_cabinet.joints["j_link_2"].upper_limit) og.sim.step() # The objects should be open. assert microwave.states[Open].get_value() assert bottom_cabinet.states[Open].get_value() # Set the joints to their lower limits. microwave.joints["j_link_0"].set_pos(microwave.joints["j_link_0"].lower_limit) bottom_cabinet.joints["j_link_2"].set_pos(bottom_cabinet.joints["j_link_2"].lower_limit) og.sim.step() # The objects should not be open. assert not microwave.states[Open].get_value() assert not bottom_cabinet.states[Open].get_value() # Setters should work. assert microwave.states[Open].set_value(True) assert bottom_cabinet.states[Open].set_value(True) # The objects should be open. assert microwave.states[Open].get_value() assert bottom_cabinet.states[Open].get_value() # Setters should work. assert microwave.states[Open].set_value(False) assert bottom_cabinet.states[Open].set_value(False) # The objects should not be open. assert not microwave.states[Open].get_value() assert not bottom_cabinet.states[Open].get_value() @og_test def test_folded_unfolded(): carpet = og.sim.scene.object_registry("name", "carpet") place_obj_on_floor_plane(carpet) for _ in range(5): og.sim.step() assert not carpet.states[Folded].get_value() assert carpet.states[Unfolded].get_value() pos = carpet.root_link.compute_particle_positions() x_min, x_max = np.min(pos, axis=0)[0], np.max(pos, axis=0)[0] x_extent = x_max - x_min # Get indices for the bottom 10 percent vertices in the x-axis indices = np.argsort(pos, axis=0)[:, 0][:(pos.shape[0] // 10)] start = np.copy(pos[indices]) # lift up a bit mid = np.copy(start) mid[:, 2] += x_extent * 0.2 # move towards x_max end = np.copy(mid) end[:, 0] += x_extent * 0.9 increments = 25 for ctrl_pts in np.concatenate([np.linspace(start, mid, increments), np.linspace(mid, end, increments)]): carpet.root_link.set_particle_positions(ctrl_pts, idxs=indices) og.sim.step() assert carpet.states[Folded].get_value() assert not carpet.states[Unfolded].get_value() assert carpet.states[Unfolded].set_value(True) with pytest.raises(NotImplementedError): carpet.states[Unfolded].set_value(False) with pytest.raises(NotImplementedError): carpet.states[Folded].set_value(True) @og_test def test_draped(): breakfast_table = og.sim.scene.object_registry("name", "breakfast_table") carpet = og.sim.scene.object_registry("name", "carpet") place_obj_on_floor_plane(breakfast_table) place_objA_on_objB_bbox(carpet, breakfast_table) for _ in range(10): og.sim.step() assert carpet.states[Draped].get_value(breakfast_table) carpet.set_position([20., 20., 1.]) for _ in range(5): og.sim.step() assert not carpet.states[Draped].get_value(breakfast_table) assert carpet.states[Draped].set_value(breakfast_table, True) with pytest.raises(NotImplementedError): carpet.states[Draped].set_value(breakfast_table, False) @og_test def test_filled(): stockpot = og.sim.scene.object_registry("name", "stockpot") systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items() if not issubclass(system_class, VisualParticleSystem)] for system in systems: stockpot.set_position_orientation(position=np.ones(3) * 50.0, orientation=[0, 0, 0, 1.0]) place_obj_on_floor_plane(stockpot) for _ in range(5): og.sim.step() assert stockpot.states[Filled].set_value(system, True) og.sim.step() assert stockpot.states[Filled].get_value(system) # Cannot set Filled state False with pytest.raises(NotImplementedError): stockpot.states[Filled].set_value(system, False) system.remove_all_particles() og.sim.step() assert not stockpot.states[Filled].get_value(system) @og_test def test_contains(): stockpot = og.sim.scene.object_registry("name", "stockpot") systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items()] for system in systems: print(f"Testing Contains {stockpot.name} with {system.name}") stockpot.set_position_orientation(position=np.ones(3) * 50.0, orientation=[0, 0, 0, 1.0]) place_obj_on_floor_plane(stockpot) for _ in range(5): og.sim.step() # Sample single particle if is_physical_particle_system(system_name=system.name): system.generate_particles(positions=[np.array([0, 0, stockpot.aabb[1][2] - 0.1])]) else: if system.get_group_name(stockpot) not in system.groups: system.create_attachment_group(stockpot) system.generate_group_particles( group=system.get_group_name(stockpot), positions=np.array([np.array([0, 0, stockpot.aabb[1][2] - 0.1])]), link_prim_paths=[stockpot.root_link.prim_path], ) og.sim.step() assert stockpot.states[Contains].get_value(system) # Remove all particles and make sure contains returns False stockpot.states[Contains].set_value(system, False) og.sim.step() assert not stockpot.states[Contains].get_value(system) # Cannot set Contains state with pytest.raises(NotImplementedError): stockpot.states[Contains].set_value(system, True) system.remove_all_particles() @og_test def test_covered(): bracelet = og.sim.scene.object_registry("name", "bracelet") bowl = og.sim.scene.object_registry("name", "bowl") microwave = og.sim.scene.object_registry("name", "microwave") systems = [get_system(system_name) for system_name, system_class in SYSTEM_EXAMPLES.items()] for obj in (bracelet, bowl, microwave): for system in systems: # bracelet is too small to sample physical particles on it sampleable = is_visual_particle_system(system.name) or obj != bracelet if sampleable: print(f"Testing Covered {obj.name} with {system.name}") obj.set_position_orientation(position=np.ones(3) * 50.0, orientation=[0, 0, 0, 1.0]) place_obj_on_floor_plane(obj) for _ in range(5): og.sim.step() assert obj.states[Covered].set_value(system, True) og.sim.step() assert obj.states[Covered].get_value(system) assert obj.states[Covered].set_value(system, False) # We don't call og.sim.step() here because it's possible for the "second" layer of particles to fall down # and make Covered to be True again. Instead, we clear the caches and check that Covered is False. obj.states[Covered].clear_cache() obj.states[ContactParticles].clear_cache() assert not obj.states[Covered].get_value(system) system.remove_all_particles() obj.set_position_orientation(position=np.ones(3) * 75.0, orientation=[0, 0, 0, 1.0]) def test_clear_sim(): og.sim.clear()
44,690
Python
37.593264
168
0.69237
StanfordVL/OmniGibson/tests/benchmark/benchmark_object_count.py
""" Script to benchmark speed vs. no. of objects in the scene. """ import os import time import matplotlib.pyplot as plt import numpy as np from omnigibson import app, launch_simulator from omnigibson.objects.primitive_object import PrimitiveObject from omnigibson.scenes.scene_base import Scene from omnigibson.utils.asset_utils import get_og_assets_version # Params to be set as needed. MAX_NUM_OBJS = 400 # Maximum no. of objects to add. NUM_OBJS_PER_ITER = 20 # No. of objects to add per iteration. NUM_STEPS_PER_ITER = 30 # No. of steps to take for each n of objects. OBJ_SCALE = 0.05 # Object scale to be set appropriately to sim collisions. RAND_POSITION = True # True to randomize positions. OUTPUT_DIR = os.path.join(os.path.expanduser("~"), "Desktop") # Internal constants. _N_PER_ROW = int(np.sqrt(MAX_NUM_OBJS)) _MIN_VAL = -2.0 _MAX_VAL = 2.0 _STEP_SIZE = (_MAX_VAL - _MIN_VAL) / _N_PER_ROW def _get_position(obj_idx, is_random=False): if is_random: pos_arange = np.arange(_MIN_VAL, _MAX_VAL, step=0.1, dtype=np.float32) x, y, z = np.random.choice(pos_arange, size=3) return x, y, z x = _MIN_VAL + _STEP_SIZE * (obj_idx % _N_PER_ROW) y = _MIN_VAL + _STEP_SIZE * (obj_idx // _N_PER_ROW) return x, y, 0.1 def benchmark_scene(sim): assets_version = get_og_assets_version() print("assets_version", assets_version) scene = Scene(floor_plane_visible=True) sim.import_scene(scene) sim.play() xs = [] ys = [] yerrs = [] for i in range(MAX_NUM_OBJS // NUM_OBJS_PER_ITER): new_objs = [] for j in range(NUM_OBJS_PER_ITER): obj_idx = i * NUM_OBJS_PER_ITER + j obj = PrimitiveObject( prim_path=f"/World/obj{obj_idx}", primitive_type="Sphere", name=f"obj{obj_idx}", scale=OBJ_SCALE, visual_only=False, ) sim.import_object(obj=obj, auto_initialize=False) # x, y, z = _get_position(obj_idx, RAND_POSITION) x, y = 0, 0 z = 0.5 + j * OBJ_SCALE * 2.25 obj.set_position(position=np.array([x, y, z])) new_objs.append(obj) # Take a step to initialize the new objects (done in _non_physics_step()). sim.step() step_freqs = [] for _ in range(NUM_STEPS_PER_ITER): start = time.time() sim.step() end = time.time() step_freqs.append(1 / (end - start)) xs.append(i * NUM_OBJS_PER_ITER) max_freq, min_freq = np.max(step_freqs), np.min(step_freqs) ys.append(np.mean((max_freq, min_freq))) yerrs.append(max_freq - ys[-1]) plt.figure(figsize=(9, 6)) ax = plt.subplot(1, 1, 1) plt.errorbar(xs, ys, yerr=yerrs, elinewidth=0.75) ax.set_xlabel("No. of objects") ax.set_ylabel("Step fps") ax.set_title(f"Version {assets_version}") plt.tight_layout() plt.savefig(os.path.join( OUTPUT_DIR, f"scene_objs_benchmark_{MAX_NUM_OBJS}_{OBJ_SCALE}.png")) def main(): assert MAX_NUM_OBJS <= 1000 sim = launch_simulator() benchmark_scene(sim) app.close() if __name__ == "__main__": main()
3,243
Python
29.603773
82
0.590811
StanfordVL/OmniGibson/tests/benchmark/profiling.py
import os import argparse import json import omnigibson as og import numpy as np import omnigibson.utils.transform_utils as T import time from omnigibson.macros import gm from omnigibson.systems import get_system from omnigibson.object_states import Covered from omnigibson.utils.profiling_utils import ProfilingEnv from omnigibson.utils.constants import PrimType parser = argparse.ArgumentParser() parser.add_argument("-r", "--robot", type=int, default=0) parser.add_argument("-s", "--scene", default="") parser.add_argument("-c", "--cloth", action='store_true') parser.add_argument("-w", "--fluids", action='store_true') parser.add_argument("-g", "--gpu_denamics", action='store_true') parser.add_argument("-p", "--macro_particle_system", action='store_true') PROFILING_FIELDS = ["FPS", "Omni step time", "Non-omni step time", "Memory usage", "Vram usage"] NUM_CLOTH = 5 NUM_SLICE_OBJECT = 3 SCENE_OFFSET = { "": [0, 0], "Rs_int": [0, 0], "Pomaria_0_garden": [0.3, 0], "grocery_store_cafe": [-3.5, 3.5], "house_single_floor": [-3, -1], "Ihlen_0_int": [-1, 2] } def main(): args = parser.parse_args() # Modify macros settings gm.ENABLE_HQ_RENDERING = args.fluids gm.ENABLE_OBJECT_STATES = True gm.ENABLE_TRANSITION_RULES = True gm.ENABLE_FLATCACHE = not args.cloth gm.USE_GPU_DYNAMICS = args.gpu_denamics cfg = { "env": { "action_frequency": 30, "physics_frequency": 120, } } if args.robot > 0: cfg["robots"] = [] for i in range(args.robot): cfg["robots"].append({ "type": "Fetch", "obs_modalities": "all", "position": [-1.3 + 0.75 * i + SCENE_OFFSET[args.scene][0], 0.5 + SCENE_OFFSET[args.scene][1], 0], "orientation": [0., 0., 0.7071, -0.7071] }) if args.scene: assert args.scene in SCENE_OFFSET, f"Scene {args.scene} not found in SCENE_OFFSET" cfg["scene"] = { "type": "InteractiveTraversableScene", "scene_model": args.scene, } else: cfg["scene"] = {"type": "Scene"} cfg["objects"] = [{ "type": "DatasetObject", "name": "table", "category": "breakfast_table", "model": "rjgmmy", "fixed_base": True, "scale": [0.75] * 3, "position": [0.5 + SCENE_OFFSET[args.scene][0], -1 + SCENE_OFFSET[args.scene][1], 0.3], "orientation": [0., 0., 0.7071, -0.7071] }] if args.cloth: cfg["objects"].extend([{ "type": "DatasetObject", "name": f"cloth_{n}", "category": "t_shirt", "model": "kvidcx", "prim_type": PrimType.CLOTH, "abilities": {"cloth": {}}, "bounding_box": [0.3, 0.5, 0.7], "position": [-0.4, -1, 0.7 + n * 0.4], "orientation": [0.7071, 0., 0.7071, 0.], } for n in range(NUM_CLOTH)]) cfg["objects"].extend([{ "type": "DatasetObject", "name": f"apple_{n}", "category": "apple", "model": "agveuv", "scale": [1.5] * 3, "position": [0.5 + SCENE_OFFSET[args.scene][0], -1.25 + SCENE_OFFSET[args.scene][1] + n * 0.2, 0.5], "abilities": {"diceable": {}} if args.macro_particle_system else {} } for n in range(NUM_SLICE_OBJECT)]) cfg["objects"].extend([{ "type": "DatasetObject", "name": f"knife_{n}", "category": "table_knife", "model": "jxdfyy", "scale": [2.5] * 3 } for n in range(NUM_SLICE_OBJECT)]) load_start = time.time() env = ProfilingEnv(configs=cfg) table = env.scene.object_registry("name", "table") apples = [env.scene.object_registry("name", f"apple_{n}") for n in range(NUM_SLICE_OBJECT)] knifes = [env.scene.object_registry("name", f"knife_{n}") for n in range(NUM_SLICE_OBJECT)] if args.cloth: clothes = [env.scene.object_registry("name", f"cloth_{n}") for n in range(NUM_CLOTH)] for cloth in clothes: cloth.root_link.mass = 1.0 env.reset() for n, knife in enumerate(knifes): knife.set_position_orientation( position=apples[n].get_position() + np.array([-0.15, 0.0, 0.1 * (n + 2)]), orientation=T.euler2quat([-np.pi / 2, 0, 0]), ) knife.keep_still() if args.fluids: table.states[Covered].set_value(get_system("water"), True) output, results = [], [] # Update the simulator's viewer camera's pose so it points towards the robot og.sim.viewer_camera.set_position([SCENE_OFFSET[args.scene][0], -3 + SCENE_OFFSET[args.scene][1], 1]) # record total load time total_load_time = time.time() - load_start for i in range(300): if args.robot: result = env.step(np.array([np.random.uniform(-0.3, 0.3, env.robots[i].action_dim) for i in range(args.robot)]).flatten())[4] else: result = env.step(None)[4] results.append(result) field = f"{args.scene}" if args.scene else "Empty scene" if args.robot: field += f", with {args.robot} Fetch" if args.cloth: field += ", cloth" if args.fluids: field += ", fluids" if args.macro_particle_system: field += ", macro particles" output.append({ "name": field, "unit": "time (ms)", "value": total_load_time, "extra": ["Loading time", "Loading time"] }) results = np.array(results) for i, title in enumerate(PROFILING_FIELDS): unit = "time (ms)" if 'time' in title else "GB" value = np.mean(results[:, i]) if title == "FPS": value = 1000 / value unit = "fps" output.append({"name": field, "unit": unit, "value": value, "extra": [title, title]}) ret = [] if os.path.exists("output.json"): with open("output.json", "r") as f: ret = json.load(f) ret.extend(output) with open("output.json", "w") as f: json.dump(ret, f, indent=4) og.shutdown() if __name__ == "__main__": main()
6,127
Python
32.670329
137
0.555247
StanfordVL/OmniGibson/tests/benchmark/benchmark_interactive_scene.py
#!/usr/bin/env python import os import time import matplotlib.pyplot as plt import numpy as np import omnigibson as og from omnigibson.objects import DatasetObject from omnigibson.macros import gm from omnigibson.robots.turtlebot import Turtlebot from omnigibson.scenes.interactive_traversable_scene import InteractiveTraversableScene from omnigibson.simulator import launch_simulator from omnigibson.utils.asset_utils import get_og_assets_version from omnigibson.utils.constants import PrimType from omnigibson.systems import get_system # Params to be set as needed. SCENES = ["Rs_int"] # house_single_floor OUTPUT_DIR = os.path.join(os.path.expanduser("~"), "Desktop") NUM_STEPS = 2000 gm.HEADLESS = False gm.GUI_VIEWPORT_ONLY = True gm.RENDER_VIEWER_CAMERA = False gm.ENABLE_FLATCACHE = True gm.USE_GPU_DYNAMICS = False gm.ENABLE_OBJECT_STATES = False gm.ENABLE_TRANSITION_RULES = False gm.DEFAULT_VIEWER_WIDTH = 128 gm.DEFAULT_VIEWER_HEIGHT = 128 # Launch the simulator launch_simulator(physics_dt=1/60., rendering_dt=1/60.) def benchmark_scene(scene_name, non_rigid_simulation=False, import_robot=True): assets_version = get_og_assets_version() print("assets_version", assets_version) scene = InteractiveTraversableScene(scene_name) start = time.time() og.sim.import_scene(scene) if gm.RENDER_VIEWER_CAMERA: og.sim.viewer_camera.set_position_orientation([0, 0, 0.2], [0.5, -0.5, -0.5, 0.5]) print(time.time() - start) if import_robot: turtlebot = Turtlebot(prim_path="/World/robot", name="agent", obs_modalities=['rgb']) og.sim.import_object(turtlebot) og.sim.step() if non_rigid_simulation: cloth = DatasetObject( name="cloth", prim_path="/World/cloth", category="t_shirt", model="kvidcx", prim_type=PrimType.CLOTH, abilities={"cloth": {}}, bounding_box=[0.3, 0.5, 0.7], ) og.sim.import_object(cloth) og.sim.step() water_system = get_system("water") for i in range(100): water_system.generate_particles( positions=[np.array([0.5, 0, 0.5]) + np.random.randn(3) * 0.1] ) og.sim.step() og.sim.play() if non_rigid_simulation: cloth.set_position([1, 0, 1]) og.sim.step() fps = [] physics_fps = [] render_fps = [] print(len(og.sim.scene.objects)) for i in range(NUM_STEPS): start = time.time() if import_robot: # Apply random actions. turtlebot.apply_action(np.zeros(2)) og.sim.step(render=False) physics_end = time.time() og.sim.render() end = time.time() if i % 100 == 0: print("Elapsed time: ", end - start) print("Render Frequency: ", 1 / (end - physics_end)) print("Physics Frequency: ", 1 / (physics_end - start)) print("Step Frequency: ", 1 / (end - start)) fps.append(1 / (end - start)) physics_fps.append(1 / (physics_end - start)) render_fps.append(1 / (end - physics_end)) plt.figure(figsize=(7, 25)) ax = plt.subplot(6, 1, 1) plt.hist(render_fps) ax.set_xlabel("Render fps") ax.set_title( "Scene {} version {}\nnon_physics {} num_obj {}\n import_robot {}".format( scene_name, assets_version, non_rigid_simulation, scene.n_objects, import_robot ) ) ax = plt.subplot(6, 1, 2) plt.hist(physics_fps) ax.set_xlabel("Physics fps") ax = plt.subplot(6, 1, 3) plt.hist(fps) ax.set_xlabel("Step fps") ax = plt.subplot(6, 1, 4) plt.plot(render_fps) ax.set_xlabel("Render fps with time, converge to {}".format(np.mean(render_fps[-100:]))) ax.set_ylabel("fps") ax = plt.subplot(6, 1, 5) plt.plot(physics_fps) ax.set_xlabel("Physics fps with time, converge to {}".format(np.mean(physics_fps[-100:]))) ax.set_ylabel("fps") ax = plt.subplot(6, 1, 6) plt.plot(fps) ax.set_xlabel("Overall fps with time, converge to {}".format(np.mean(fps[-100:]))) ax.set_ylabel("fps") plt.tight_layout() plt.savefig(os.path.join( OUTPUT_DIR, "scene_benchmark_{}_np_{}_r_{}.pdf".format(scene_name, non_rigid_simulation, import_robot))) def main(): for scene in SCENES: benchmark_scene(scene, non_rigid_simulation=False, import_robot=True) og.shutdown() if __name__ == "__main__": main()
4,494
Python
30.215278
100
0.62016
StanfordVL/OmniGibson/docs/gen_ref_pages.py
"""Generate the code reference pages.""" from pathlib import Path import mkdocs_gen_files nav = mkdocs_gen_files.Nav() src = "omnigibson" for path in sorted(Path(src).rglob("*.py")): # module_path = path.relative_to(src).with_suffix("") # doc_path = path.relative_to(src).with_suffix(".md") # full_doc_path = Path("reference", doc_path) # parts = tuple(module_path.parts) if parts[-1] == "__init__": parts = parts[:-1] doc_path = doc_path.with_name("index.md") full_doc_path = full_doc_path.with_name("index.md") elif parts[-1] == "__main__": continue # print(f"parts: {parts}") if parts == (): continue # parts = (src,) # input(f"parts: {parts}") nav[parts] = doc_path.as_posix() with mkdocs_gen_files.open(full_doc_path, "w") as fd: ident = ".".join(parts) fd.write(f"# {parts[-1]}\n\n::: {ident}") mkdocs_gen_files.set_edit_path(full_doc_path, Path("../../") / path) with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) # parts = list(module_path.parts) # if parts[-1] == "__init__": # # parts = parts[:-1] # elif parts[-1] == "__main__": # continue # with mkdocs_gen_files.open(full_doc_path, "w") as fd: # # identifier = ".".join(parts) # # print("::: " + identifier, file=fd) # # mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path) #
1,520
Python
27.698113
75
0.554605
StanfordVL/OmniGibson/docs/index.md
--- title: Title template: index.html ---
44
Markdown
5.428571
20
0.613636
StanfordVL/OmniGibson/docs/modules/scene.md
--- icon: material/home-outline --- # 🏠 **Scene** Scene are one level higher than objects. A scene consists of multiple objects that interacts with each other. OmniGibson currently supports two types of scenes: - `EmptyScene`: This is an empty scene that can be used to create custom scenes. It does not contain any pre-defined objects. - `InteractiveTraversableScene`: This type of scene are interactive and traversible. It comes with traversable maps that enables robots to perform navigation tasks. Users can choose from the predefined 51 scenes in the OmniGibson dataset. Here's a list of all the `InteractiveTraversableScene` scenes available in OmniGibson: <table markdown="span"> <tr> <td valign="top" width="30%"> **`Beechwood_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Beechwood_0_garden.png" alt="Beechwood_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Beechwood_0_garden.png" alt="Beechwood_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Beechwood_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Beechwood_0_int.png" alt="Beechwood_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Beechwood_0_int.png" alt="Beechwood_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Beechwood_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Beechwood_1_int.png" alt="Beechwood_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Beechwood_1_int.png" alt="Beechwood_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Benevolence_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Benevolence_0_int.png" alt="Benevolence_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Benevolence_0_int.png" alt="Benevolence_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Benevolence_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Benevolence_1_int.png" alt="Benevolence_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Benevolence_1_int.png" alt="Benevolence_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Benevolence_2_int`** </td> <td> <img src="../assets/scenes/birds-eye-views/Benevolence_2_int.png" alt="Benevolence_2_int"> </td> <td> <img src="../assets/scenes/scene-views/Benevolence_2_int.png" alt="Benevolence_2_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Ihlen_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Ihlen_0_int.png" alt="Ihlen_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Ihlen_0_int.png" alt="Ihlen_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Ihlen_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Ihlen_1_int.png" alt="Ihlen_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Ihlen_1_int.png" alt="Ihlen_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Merom_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Merom_0_garden.png" alt="Merom_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Merom_0_garden.png" alt="Merom_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Merom_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Merom_0_int.png" alt="Merom_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Merom_0_int.png" alt="Merom_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Merom_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Merom_1_int.png" alt="Merom_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Merom_1_int.png" alt="Merom_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_0_garden.png" alt="Pomaria_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_0_garden.png" alt="Pomaria_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_0_int.png" alt="Pomaria_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_0_int.png" alt="Pomaria_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_1_int.png" alt="Pomaria_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_1_int.png" alt="Pomaria_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_2_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_2_int.png" alt="Pomaria_2_int"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_2_int.png" alt="Pomaria_2_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Rs_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Rs_garden.png" alt="Rs_garden"> </td> <td> <img src="../assets/scenes/scene-views/Rs_garden.png" alt="Rs_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Rs_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Rs_int.png" alt="Rs_int"> </td> <td> <img src="../assets/scenes/scene-views/Rs_int.png" alt="Rs_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Wainscott_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Wainscott_0_garden.png" alt="Wainscott_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Wainscott_0_garden.png" alt="Wainscott_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> [**`Wainscott_0_int`**](../reference/scene/Wainscott_0_int.html)<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Wainscott_0_int.png" alt="Wainscott_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Wainscott_0_int.png" alt="Wainscott_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> [**`Wainscott_1_int`**](../reference/scene/Wainscott_1_int.html)<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Wainscott_1_int.png" alt="Wainscott_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Wainscott_1_int.png" alt="Wainscott_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_asian`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_asian.png" alt="grocery_store_asian"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_asian.png" alt="grocery_store_asian"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_cafe`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_cafe.png" alt="grocery_store_cafe"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_cafe.png" alt="grocery_store_cafe"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_convenience`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_convenience.png" alt="grocery_store_convenience"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_convenience.png" alt="grocery_store_convenience"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_half_stocked`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_half_stocked.png" alt="grocery_store_half_stocked"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_half_stocked.png" alt="grocery_store_half_stocked"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_arch_wood`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_arch_wood.png" alt="hall_arch_wood"> </td> <td> <img src="../assets/scenes/scene-views/hall_arch_wood.png" alt="hall_arch_wood"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_conference_large`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_conference_large.png" alt="hall_conference_large"> </td> <td> <img src="../assets/scenes/scene-views/hall_conference_large.png" alt="hall_conference_large"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_glass_ceiling`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_glass_ceiling.png " alt="hall_glass_ceiling"> </td> <td> <img src="../assets/scenes/scene-views/hall_glass_ceiling.png" alt="hall_glass_ceiling"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_train_station`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_train_station.png" alt="hall_train_station"> </td> <td> <img src="../assets/scenes/scene-views/hall_train_station.png" alt="hall_train_station"> </td> </tr> <tr> <td valign="top" width="30%"> **`hotel_gym_spa`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hotel_gym_spa.png" alt="hotel_gym_spa"> </td> <td> <img src="../assets/scenes/scene-views/hotel_gym_spa.png" alt="hotel_gym_spa"> </td> </tr> <tr> <td valign="top" width="30%"> **`hotel_suite_large`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hotel_suite_large.png" alt="hotel_suite_large"> </td> <td> <img src="../assets/scenes/scene-views/hotel_suite_large.png" alt="hotel_suite_large"> </td> </tr> <tr> <td valign="top" width="30%"> **`hotel_suite_small`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hotel_suite_small.png" alt="hotel_suite_small"> </td> <td> <img src="../assets/scenes/scene-views/hotel_suite_small.png" alt="hotel_suite_small"> </td> </tr> <tr> <td valign="top" width="30%"> **`house_double_floor_lower`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/house_double_floor_lower.png" alt="house_double_floor_lower"> </td> <td> <img src="../assets/scenes/scene-views/house_double_floor_lower.png" alt="house_double_floor_lower"> </td> </tr> <tr> <td valign="top" width="30%"> **`house_double_floor_upper`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/house_double_floor_upper.png" alt="house_double_floor_upper"> </td> <td> <img src="../assets/scenes/scene-views/house_double_floor_upper.png" alt="house_double_floor_upper"> </td> </tr> <tr> <td valign="top" width="30%"> **`house_single_floor`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/house_single_floor.png" alt="house_single_floor"> </td> <td> <img src="../assets/scenes/scene-views/house_single_floor.png" alt="house_single_floor"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_bike`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_bike.png" alt="office_bike"> </td> <td> <img src="../assets/scenes/scene-views/office_bike.png" alt="office_bike"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_cubicles_left`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_cubicles_left.png" alt="office_cubicles_left"> </td> <td> <img src="../assets/scenes/scene-views/office_cubicles_left.png" alt="office_cubicles_left"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_cubicles_right`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_cubicles_right.png" alt="office_cubicles_right"> </td> <td> <img src="../assets/scenes/scene-views/office_cubicles_right.png" alt="office_cubicles_right"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_large`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_large.png" alt="office_large"> </td> <td> <img src="../assets/scenes/scene-views/office_large.png" alt="office_large"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_vendor_machine`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_vendor_machine.png" alt="office_vendor_machine"> </td> <td> <img src="../assets/scenes/scene-views/office_vendor_machine.png" alt="office_vendor_machine"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_asian`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_asian.png" alt="restaurant_asian"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_asian.png" alt="restaurant_asian"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_brunch`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_brunch.png" alt="restaurant_brunch"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_brunch.png" alt="restaurant_brunch"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_cafeteria`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_cafeteria.png" alt="restaurant_cafeteria"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_cafeteria.png" alt="restaurant_cafeteria"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_diner`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_diner.png" alt="restaurant_diner"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_diner.png" alt="restaurant_diner"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_hotel`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_hotel.png" alt="restaurant_hotel"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_hotel.png" alt="restaurant_hotel"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_urban`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_urban.png" alt="restaurant_urban"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_urban.png" alt="restaurant_urban"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_biology`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_biology.png" alt="school_biology"> </td> <td> <img src="../assets/scenes/scene-views/school_biology.png" alt="school_biology"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_chemistry`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_chemistry.png" alt="school_chemistry"> </td> <td> <img src="../assets/scenes/scene-views/school_chemistry.png" alt="school_chemistry"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_computer_lab_and_infirmary`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_computer_lab_and_infirmary.png" alt="school_computer_lab_and_infirmary"> </td> <td> <img src="../assets/scenes/scene-views/school_computer_lab_and_infirmary.png" alt="school_computer_lab_and_infirmary"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_geography`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_geography.png" alt="school_geography"> </td> <td> <img src="../assets/scenes/scene-views/school_geography.png" alt="school_geography"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_gym`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_gym.png" alt="school_gym"> </td> <td> <img src="../assets/scenes/scene-views/school_gym.png" alt="school_gym"> </td> </tr> </table>
19,299
Markdown
32.919156
238
0.498109
StanfordVL/OmniGibson/docs/modules/controllers.md
--- icon: material/knob --- # 🎛️ **Controllers** ## Description In **`OmniGibson`**, `Controller`s convert high-level actions into low-level joint motor (position, velocity, or effort) controls for a subset of an individual [`Robot`](./robots.md)'s joints. In an [`Environment`](./environment.md) instance, actions are passed to controllers via the `env.step(action)` call, resulting in the following behavior: <div class="annotate" markdown> - When `env.step(action)` is called, actions are parsed and passed to the respective robot owned by the environment (`env.robots`) via `robot.apply_action(action)` - For a given robot, its `action` is parsed and passed to the respective controllers owned by the robot (`robot.controllers`) via `controller.update_goal(command)` - For a given controller, the inputted `command` is preprocessed (re-scaled and shifted) and then converted into an internally tracked `goal` - Each time a physic step occurs (1), all controllers computes and deploys their desired joint controls via `controller.compute_control()` towards reaching their respective `goal`s </div> 1. Note that because environments operate at `action_frequency <= physics_frequency`, this means that a controller may take _multiple_ control steps per single `env.step(action)` call! **`OmniGibson`** supports multiple types of controllers, which are intended to control a specific subset of a robot's set of joints. Some are more general (such as the `JointController`, which can broadly be applied to any part of a robot), while others are more specific to a robot's morphology (such as the `InverseKinematicsController`, which is intended to be used to control a manipulation robot's end-effector pose). It is important to note that a single robot can potentially own multiple controllers. For example, `Turtlebot` only owns a single controller (to control its two-wheeled base), whereas the mobile-manipulator `Fetch` robot owns four (one to control its base, head, trunk + arm, and gripper). This allows for modular action space composition, where fine-grained modification of the action space can be achieved by modifying / swapping out individual controllers. For more information about the specific number of controllers each robot has, please see our [list of robots](./robots.md#models). ## Usage ### Definition Controllers can be specified in the config that is passed to the `Environment` constructor via the `['robots'][i]['controller_config']` key. This is expected to be a nested dictionary, mapping controller name (1) to the desired specific controller configuration. the desired configuration for a single robot to be created. For each individual controller dict, the `name` key is required and specifies the desired controller class. Additional keys can be specified and will be passed directly to the specific controller class constructor. An example of a robot controller configuration is shown below in `.yaml` form: { .annotate } 1. See `robot.controller_order` for the full list of expected controller names for a given robot ??? code "single_fetch_controller_config_example.yaml" ``` yaml linenums="1" robots: - type: Fetch controller_config: base: name: DifferentialDriveController arm_0: name: InverseKinematicsController kv: 2.0 gripper_0: name: MultiFingerGripperController mode: binary camera: name: JointController use_delta_commands: False ``` ### Runtime Usually, actions are passed to robots, parsed, and passed to individual controllers via `env.step(action)` --> `robot.apply_action(action)` --> `controller.update_goal(command)`. However, specific controller commands can be directly deployed with this API outside of the `env.step()` loop. A controller's internal state can be cleared by calling `controller.reset()`, and no-op actions can computed via `compute_no_op_goal`. Relevant properties, such as `control_type`, `control_dim`, `command_dim`, etc. are all queryable at runtime as well. ## Types **`OmniGibson`** currently supports 6 controllers, consisting of 2 general joint controllers, 1 locomotion-specific controller, 2 arm manipulation-specific controllers, and 1 gripper-specific controller. Below, we provide a brief overview of each controller type: ### General Controllers These are general-purpose controllers that are agnostic to a robot's morphology, and therefore can be used on any robot. <table markdown="span"> <tr> <td valign="top"> [**`JointController`**](../reference/controllers/joint_controller.html)<br><br> Directly controls individual joints. Either outputs low-level joint position or velocity controls if `use_impedance=False`, otherwise will internally compensate the desired gains with the robot's mass matrix and output joint effort controls.<br><br> <ul> <li>_Command Dim_: n_joints</li> <li>_Command Description_: desired per-joint `[q_0, q_1, ...q_n]` position / velocity / effort setpoints, which are assumed to be absolute joint values unless `use_delta` is set</li> <li>_Control Dim_: n_joints</li> <li>_Control Type_: position / velocity / effort</li> </ul> </td> </tr> <tr> <td valign="top"> [**`NullJointController`**](../reference/controllers/null_joint_controller.html)<br><br> Directly controls individual joints via an internally stored `default_command`. Inputted commands will be ignored unless `default_command` is updated.<br><br> <ul> <li>_Command Dim_: n_joints</li> <li>_Command Description_: `[q_0, ..., q_n]` N/A </li> <li>_Control Dim_: n_joints</li> <li>_Control Type_: position / velocity / effort</li> </ul> </td> </tr> </table> ### Locomotion Controllers These are controllers specifically meant for robots with navigation capabilities. <table markdown="span" width="100%"> <tr> <td valign="top" width="100%"> [**`DifferentialDriveController`**](../reference/controllers/dd_controller.html)<br><br> Commands 2-wheeled robots by setting linear / angular velocity setpoints and converting them into per-joint velocity control.<br><br> <ul> <li>_Command Dim_: n_joints</li> <li>_Command Description_: desired `[lin_vel, ang_vel]` setpoints </li> <li>_Control Dim_: 2</li> <li>_Control Type_: velocity</li> </ul> </td> </tr> </table> ### Manipulation Arm Controllers These are controllers specifically meant for robots with manipulation capabilities, and are intended to control a robot's end-effector pose <table markdown="span"> <tr> <td valign="top"> [**`InverseKinematicsController`**](../reference/controllers/ik_controller.html)<br><br> Controls a robot's end-effector by iteratively solving inverse kinematics to output a desired joint configuration to reach the desired end effector pose, and then runs an underlying `JointController` to reach the target joint configuration. Multiple modes are available, and dictate both the command dimension and behavior of the controller. `condition_on_current_position` can be set to seed the IK solver with the robot's current joint state, and `use_impedance` can be set if the robot's per-joint inertia should be taken into account when attempting to reach the target joint configuration.<br><br> Note: Orientation convention is axis-angle `[ax,ay,az]` representation, and commands are expressed in the robot base frame unless otherwise noted.<br><br> <ul> <li>_Command Dim_: 3 / 6</li> <li>_Command Description_: desired pose command, depending on `mode`: <ul> <li>`absolute_pose`: 6DOF `[x,y,z,ax,ay,az]` absolute position, absolute orientation</li> <li>`pose_absolute_ori`: 6DOF `[dx,dy,dz,ax,ay,az]` delta position, absolute orientation</li> <li>`pose_delta_ori`: 6DOF `[dx,dy,dz,dax,day,daz]` delta position, delta orientation</li> <li>`position_fixed_ori`: 3DOF `[dx,dy,dz]` delta position, orientation setpoint is kept as fixed initial absolute orientation</li> <li>`position_compliant_ori`: 3DOF `[dx,dy,dz]` delta position, delta orientation setpoint always kept as 0s (so can drift over time)</li> </ul></li> <li>_Control Dim_: n_arm_joints</li> <li>_Control Type_: position / effort</li> </ul> </td> </tr> <tr> <td valign="top"> [**`OperationalSpaceController`**](../reference/controllers/osc_controller.html)<br><br> Controls a robot's end-effector by applying the [operational space control](https://khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf) algorithm to apply per-joint efforts to perturb the robot's end effector with impedances ("force") along all six (x,y,z,ax,ay,az) axes. Unlike `InverseKinematicsController`, this controller is inherently compliant and especially useful for contact-rich tasks or settings where fine-grained forces are required. For robots with >6 arm joints, an additional null command is used as a secondary objective and is defined as joint state `reset_joint_pos`.<br><br> Note: Orientation convention is axis-angle `[ax,ay,az]` representation, and commands are expressed in the robot base frame unless otherwise noted.<br><br> <ul> <li>_Command Dim_: 3 / 6</li> <li>_Command Description_: desired pose command, depending on `mode`: <ul> <li>`absolute_pose`: 6DOF `[x,y,z,ax,ay,az]` absolute position, absolute orientation</li> <li>`pose_absolute_ori`: 6DOF `[dx,dy,dz,ax,ay,az]` delta position, absolute orientation</li> <li>`pose_delta_ori`: 6DOF `[dx,dy,dz,dax,day,daz]` delta position, delta orientation</li> <li>`position_fixed_ori`: 3DOF `[dx,dy,dz]` delta position, orientation setpoint is kept as fixed initial absolute orientation</li> <li>`position_compliant_ori`: 3DOF `[dx,dy,dz]` delta position, delta orientation setpoint always kept as 0s (so can drift over time)</li> </ul></li> <li>_Control Dim_: n_arm_joints</li> <li>_Control Type_: effort</li> </ul> </td> </tr> </table> ### Manipulation Gripper Controllers These are controllers specifically meant for robots with manipulation capabilities, and are intended to control a robot's end-effector gripper <table markdown="span" width="100%"> <tr> <td valign="top" width="100%"> [**`MultiFingerGripperController`**](../reference/controllers/multi_finger_gripper_controller.html)<br><br> Commands a robot's gripper joints, with behavior defined via `mode`. By default, &lt;closed, open&gt; is assumed to correspond to &lt;q_lower_limit, q_upper_limit&gt; for each joint, though this can be manually set via the `closed_qpos` and `open_qpos` arguments.<br><br> <ul> <li>_Command Dim_: 1 / n_gripper_joints</li> <li>_Command Description_: desired gripper command, depending on `mode`: <ul> <li>`binary`: 1DOF `[open / close]` binary command, where &gt;0 corresponds to open unless `inverted` is set, in which case &lt;0 corresponds to open</li> <li>`smooth`: 1DOF `[q]` command, which gets broadcasted across all finger joints</li> <li>`independent`: NDOF `[q_0, ..., q_n]` per-finger joint commands</li> </ul></li> <li>_Control Dim_: n_gripper_joints</li> <li>_Control Type_: position / velocity / effort</li> </ul> </td> </tr> </table>
12,148
Markdown
66.871508
616
0.670975
StanfordVL/OmniGibson/docs/modules/object.md
--- icon: material/food-apple-outline --- # 🍎 **Object** Objects, such as furnitures, are essential to building manipulation environments. We designed the MujocoObject interfaces to standardize and simplify the procedure for importing 3D models into the scene or procedurally generate new objects. MuJoCo defines models via the MJCF XML format. These MJCF files can either be stored as XML files on disk and loaded into simulator, or be created on-the-fly by code prior to simulation. ## Usage ### Importing Objects Objects can be added to a given `Environment` instance by specifying them in the config that is passed to the environment constructor via the `objects` key. This is expected to be a list of dictionaries, each of which specifies the desired configuration for a single object to be created. For each dict, the `type` key is required and specifies the desired object class, and global `position` and `orientation` (in (x,y,z,w) quaternion form) can also be specified. Additional keys can be specified and will be passed directly to the specific robot class constructor. An example of a robot configuration is shown below in `.yaml` form: ??? code "single_object_config_example.yaml" ``` yaml linenums="1" robots: - type: USDObject name: some_usd_object usd_path: your_path_to_model.usd visual_only: False position: [0, 0, 0] orientation: [0, 0, 0, 1] scale: [0.5, 0.6, 0.7] ``` `OmniGibson` supports 6 types of objects shown as follows: - `ControllableObject`: This class represents objects that can be controller through joint controllers. It is used as the parent class of the robot classes and provide functionalities to apply control actions to the objects. In general, users should not create object of this class, but rather directly spawn the desired robot type in the `robots` section of the config. - `StatefulObject`: This class represents objects that comes with object states. For more information regarding object states please take a look at `object_states`. This is also meant to be a parent class, and should generally not be instantiated directly. - `PrimitiveObject`: This class represents primitive shape objects (Cubes, Spheres, Cones, etc.) This are usually used as visual objects in the scene. For example, users can instantiate a sphere object to visualize the target location of a robot reaching task, and set it's property `visual_only` to true to disable it's kinematics and collision with other objects. - `LightObject`: This class specifically represents lights in the scene, and provide funtionalities to modify the properties of lights. There are 6 types of lights users can instantiate in OmniGibson, cylinder light, disk light, distant light, dome light, geometry ligtht, rectangle light, and sphere light. Users can choose whichever type of light that works for the best, and set the `intensity` property to control the brightness of it. - `USDObject`: This class represents objects loaded through a USD file. This is useful when users want to load a custom USD asset into the simulator. Users should specify the `usd_path` parameter of the `USDObject` in order to load the desired file of their choice. - `DatasetObject`: This class inherits from `USDObject` and represents object from the OmniGibson dataset. Users should specify the category of objects they want to load, as well as the model id, which is a 6 character string unique to each dataset object. For the possible categories and models, please refer to our [Knowledgebase Dashboard](https://behavior.stanford.edu/knowledgebase/) ### Runtime Usually, objects are instantiated upon startup. We can modify certain properties of the object when the simulator is running. For example, one might desire to teleop the object from one place to another, then simply call `object.set_position_orientation(new_pos, new_orn)` will do the job. Another example might be to highlight an object by setting `object.highlighed = True`, the object when then be highlighted in pick in the scene. To access the objects from the environment, one can call `env.scene.object_registry`. Here are a couple examples: - `env.scene.object_registry("name", OBJECT_NAME): get the object by its name - `env.scene.object_registry("category", CATEGORY): get the object by its category - `env.scene.object_registry("prim_path", PRIM_PATH): get the object by its prim path
4,438
Markdown
79.709089
634
0.770392
StanfordVL/OmniGibson/docs/modules/robots.md
--- icon: material/robot-outline --- # 🤖 **Robots** ## Description In **`OmniGibson`**, `Robot`s define agents that can interact with other objects in a given environment. Each robot can _interact_ by deploying joint commands via its set of [`Controller`](./controllers.md)s, and can _perceive_ its surroundings via its set of [`Sensor`](./sensor.md)s. **`OmniGibson`** supports both navigation and manipulation robots, and allows for modular specification of individual controllers for controlling the different components of a given robot. For example, the `Fetch` robot is a mobile manipulator composed of a mobile (two-wheeled) base, two head joints, a trunk, seven arm joints, and two gripper finger joints. `Fetch` owns 4 controllers, one for controlling the base, the head, the trunk + arm, and the gripper. There are multiple options for each controller depending on the desired action space. For more information, check out our [robot examples](../getting_started/examples.md#robots). It is important to note that robots are full-fledged `StatefulObject`s, and thus leverage the same APIs as normal scene objects and can be treated as such. Robots can be thought of as `StatefulObject`s that additionally own controllers (`robot.controllers`) and sensors (`robot.sensors`). ## Usage ### Importing Robots can be added to a given `Environment` instance by specifying them in the config that is passed to the environment constructor via the `robots` key. This is expected to be a list of dictionaries, where each dictionary specifies the desired configuration for a single robot to be created. For each dict, the `type` key is required and specifies the desired robot class, and global `position` and `orientation` (in (x,y,z,w) quaternion form) can also be specified. Additional keys can be specified and will be passed directly to the specific robot class constructor. An example of a robot configuration is shown below in `.yaml` form: ??? code "single_fetch_config_example.yaml" ``` yaml linenums="1" robots: - type: Fetch position: [0, 0, 0] orientation: [0, 0, 0, 1] obs_modalities: [scan, rgb, depth] scale: 1.0 self_collision: false action_normalize: true action_type: continuous grasping_mode: physical rigid_trunk: false default_trunk_offset: 0.365 default_arm_pose: diagonal30 reset_joint_pos: tuck sensor_config: VisionSensor: sensor_kwargs: image_height: 128 image_width: 128 ScanSensor: sensor_kwargs: min_range: 0.05 max_range: 10.0 controller_config: base: name: DifferentialDriveController arm_0: name: InverseKinematicsController kv: 2.0 gripper_0: name: MultiFingerGripperController mode: binary camera: name: JointController use_delta_commands: False ``` ### Runtime Usually, actions are passed to robots and observations retrieved via the `obs, info, reward, done = env.step(action)`. However, actions can be directly deployed and observations retrieved from the robot using the following APIs: <div class="annotate" markdown> - **Applying actions**: `robot.apply_action(action)` (1) - **Retrieving observations**: `obs, info = robot.get_obs()` (2) </div> 1. `action` is a 1D-numpy array. For more information, please see the [Controller](./controllers.md) section! 2. `obs` is a dict mapping observation name to observation data, and `info` is a dict of relevant metadata about the observations. For more information, please see the [Sensor](./sensor.md) section! Controllers and sensors can be accessed directly via the `controllers` and `sensors` properties, respectively. And, like all objects in **`OmniGibson`**, common information such as joint data and object states can also be directly accessed from the `robot` class. ## Models **`OmniGibson`** currently supports 9 robots, consisting of 4 mobile robots, 2 manipulation robots, 2 mobile manipulation robots, and 1 anthropomorphic "robot" (a bimanual agent proxy used for VR teleoperation). Below, we provide a brief overview of each model: ### Mobile Robots These are navigation-only robots (an instance of [`LocomotionRobot`](../reference/robots/locomotion_robot.html)) that solely consist of a base that can move. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`Turtlebot`**](../reference/robots/turtlebot.html)<br><br> The two-wheeled <a href="https://www.turtlebot.com/turtlebot2/">Turtlebot 2</a> model with the Kobuki base.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Turtlebot.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Locobot`**](../reference/robots/locobot.html)<br><br> The two-wheeled, open-source <a href="http://www.locobot.org/">LoCoBot</a> model.<br><br> Note that in our model the arm is disabled and is fixed to the base.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Locobot.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Husky`**](../reference/robots/husky.html)<br><br> The four-wheeled <a href="https://clearpathrobotics.com/husky-unmanned-ground-vehicle-robot/">Husky UAV</a> model from Clearpath Robotics.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Husky.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Freight`**](../reference/robots/freight.html)<br><br> The two-wheeled <a href="https://docs.fetchrobotics.com/">Freight</a> model which serves as the base for the Fetch robot.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Freight.png" alt="rgb"> </td> </tr> </table> ### Manipulation Robots These are manipulation-only robots (an instance of [`ManipulationRobot`](../reference/robots/manipulation_robot.html)) that cannot move and solely consist of an actuated arm with a gripper attached to its end effector. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`Franka`**](../reference/robots/franka.html)<br><br> The popular 7-DOF <a href="https://franka.de/">Franka Research 3</a> model equipped with a parallel jaw gripper. Note that OmniGibson also includes two alternative versions of Franka: FrankaAllegro (equipped with an Allegro hand) and FrankaLeap (equipped with a Leap hand).<br><br> <ul> <li>_Controllers_: Arm, Gripper</li> <li>_Sensors_: Wrist Camera</li> </ul> </td> <td> <img src="../assets/robots/FrankaPanda.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`VX300S`**](../reference/robots/vx300s.html)<br><br> The 6-DOF <a href="https://www.trossenrobotics.com/viperx-300-robot-arm-6dof.aspx">ViperX 300 6DOF</a> model from Trossen Robotics equipped with a parallel jaw gripper.<br><br> <ul> <li>_Controllers_: Arm, Gripper</li> <li>_Sensors_: Wrist Camera</li> </ul> </td> <td> <img src="../assets/robots/VX300S.png" alt="rgb"> </td> </tr> </table> ### Mobile Manipulation Robots These are robots that can both navigate and manipulate (and inherit from both [`LocomotionRobot`](../reference/robots/locomotion_robot.html) and [`ManipulationRobot`](../reference/robots/manipulation_robot.html)), and are equipped with both a base that can move as well as one or more gripper-equipped arms that can actuate. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`Fetch`**](../reference/robots/fetch.html)<br><br> The <a href="https://docs.fetchrobotics.com/">Fetch</a> model, composed of a two-wheeled base, linear trunk, 2-DOF head, 7-DOF arm, and 2-DOF parallel jaw gripper.<br><br> <ul> <li>_Controllers_: Base, Head, Arm, Gripper</li> <li>_Sensors_: Head Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Fetch.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Tiago`**](../reference/robots/tiago.html)<br><br> The bimanual <a href="https://pal-robotics.com/robots/tiago/">Tiago</a> model from PAL robotics, composed of a holonomic base (which we model as a 3-DOF (x,y,rz) set of joints), linear trunk, 2-DOF head, x2 7-DOF arm, and x2 2-DOF parallel jaw grippers.<br><br> <ul> <li>_Controllers_: Base, Head, Left Arm, Right Arm, Left Gripper, Right Gripper</li> <li>_Sensors_: Head Camera, Rear LIDAR, Front LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Tiago.png" alt="rgb"> </td> </tr> </table> ### Additional Robots <table markdown="span"> <tr> <td valign="top" width="60%"> [**`BehaviorRobot`**](../reference/robots/behavior_robot.html#robots.behavior_robot.BehaviorRobot)<br><br> A hand-designed model intended to be used exclusively for VR teleoperation.<br><br> <ul> <li>_Controllers_: Base, Head, Left Arm, Right Arm, Left Gripper, Right Gripper</li> <li>_Sensors_: Head Camera</li> </ul> </td> <td> <img src="../assets/robots/BehaviorRobot.png" alt="rgb"> </td> </tr> </table>
10,379
Markdown
45.756757
640
0.613161
StanfordVL/OmniGibson/docs/modules/prim.md
--- icon: material/cube-outline --- # 🧱 **Prim** A Prim, short for "primitive," is a fundamental building block of a scene, representing an individual object or entity within the scene's hierarchy. It is essentially a container that encapsulates data, attributes, and relationships, allowing it to represent various scene components like models, cameras, lights, or groups of prims. These prims are systematically organized into a hierarchical framework, creating a scene graph that depicts the relationships and transformations between objects. Every prim is uniquely identified by a path, which serves as a locator within the scene graph. This path includes the names of all parent prims leading up to it. For example, a prim's path might be `/World/robot0/gripper_link`, indicating that the `gripper_link` is a child of `robot0`. Additionally, prims carry a range of attributes, including position, rotation, scale, and material properties. These attributes define the properties and characteristics of the objects they represent.
1,040
Markdown
79.076917
496
0.796154
StanfordVL/OmniGibson/docs/modules/sensor.md
--- icon: material/camera-outline --- # 📷 **Sensor** ## Description Sensors play a crucial role in OmniGibson, as they facilitate the robots' observation of their environment. We offer two main classes of sensors: - `ScanSensor`: This includes a 2D LiDAR range sensor and an occupancy grid sensor. - `VisionSensor`: This sensor type features a camera equipped with various modalities, including RGB, depth, normals, three types of segmentation, optical flow, 2D and 3D bounding boxes. ## Usage To obtain sensor readings, the `get_obs()` function can be invoked at multiple levels within our hierarchy: - From `Environment`: Provides 1. All observations from all robots 2. All task-related observations 3. Observations from external sensors, if available - From `Robot`: Provides 1. Readings from all sensors associated with the robot 2. Proprioceptive observations for the robot (e.g., base pose, joint position, joint velocity) - From `Sensor`: Delivers all sensor readings based on the sensor's modalities. Additionally, our API allows for the simulation of real-world sensor behaviors by: 1. Adding noise 2. Dropping out sensor values to emulate missing data in sensor readings Besides the actual data, `get_obs()` also returns a secondary dictionary containing information about the data, such as segmentation labels for vision sensors. For instance, calling `get_obs()` on an environment with a single robot, which has all modalities enabled, might produce results similar to this: <details> <summary>Click to see code!</summary> <pre><code> data: { "robot0": { "robot0:laser_link:Lidar:0": { "scan": np.array(...), "occupancy_grid": np.array(...) }, "robot0:eyes:Camera:0": { "rgb": np.array(...), "depth": np.array(...), "depth_linear": np.array(...), "normal": np.array(...), "flow": np.array(...), "bbox_2d_tight": np.array(...), "bbox_2d_loose": np.array(...), "bbox_3d": np.array(...), "seg_semantic": np.array(...), "seg_instance": np.array(...), "seg_instance_id": np.array(...) }, "proprio": np.array(...) } "task": { "low_dim": np.array(...) } } info: { 'robot0': { 'robot0:laser_link:Lidar:0': {}, 'robot0:eyes:Camera:0': { 'seg_semantic': {'298104422': 'object', '764121901': 'background', '2814990211': 'agent'}, 'seg_instance': {...}, 'seg_instance_id': {...} }, 'proprio': {} } } </code></pre> </details> ## Observations ### Vision Sensor <table markdown="span"> <tr> <td valign="top" width="60%"> <strong>RGB</strong><br><br> RGB image of the scene from the camera perspective.<br><br> Size: (height, width, 4), numpy.uint8<br><br> </td> <td> <img src="../assets/sensor_asset/rgb.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Depth</strong><br><br> Distance between the camera and everything else in the scene.<br><br> Size: (height, width), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/depth.png" alt="Depth Map"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Depth Linear</strong><br><br> Distance between the camera and everything else in the scene, where distance measurement is linearly proportional to the actual distance.<br><br> Size: (height, width), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/depth_linear.png" alt="Depth Map Linear"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Normal</strong><br><br> Surface normals - vectors perpendicular to the surface of objects in the scene.<br><br> Size: (height, width, 4), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/normal.png" alt="Normal"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Semantic Segmentation</strong><br><br> Each pixel is assigned a label, indicating the object category it belongs to (e.g., table, chair).<br><br> Size: (height, width), numpy.uint32<br><br> We also provide a dictionary containing the mapping of semantic IDs to object categories. You can get this here: <br><br> from omnigibson.utils.constants import semantic_class_id_to_name </td> <td> <img src="../assets/sensor_asset/seg_semantic.png" alt="Semantic Segmentation"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Instance Segmentation</strong><br><br> Each pixel is assigned a label, indicating the specific object instance it belongs to (e.g., table1, chair2).<br><br> Size: (height, width), numpy.uint32<br><br> </td> <td> <img src="../assets/sensor_asset/seg_instance.png" alt="Instance Segmentation"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Instance Segmentation ID</strong><br><br> Each pixel is assigned a label, indicating the specific object instance it belongs to (e.g., /World/table1/visuals, /World/chair2/visuals).<br><br> Size: (height, width), numpy.uint32<br><br> </td> <td> <img src="../assets/sensor_asset/seg_instance_id.png" alt="Instance Segmentation ID"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Optical Flow</strong><br><br> Optical flow - motion of pixels belonging to objects caused by the relative motion between the camera and the scene.<br><br> Size: (height, width, 4), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/optical_flow.png" alt="Optical Flow"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>2D Bounding Box Tight</strong><br><br> 2D bounding boxes wrapping individual objects, excluding any parts that are occluded.<br><br> Size: a list of <br> semanticID, numpy.uint32;<br> x_min, numpy.int32;<br> y_min, numpy.int32;<br> x_max, numpy.int32;<br> y_max, numpy.int32;<br> occlusion_ratio, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/bbox_2d_tight.png" alt="2D Bounding Box Tight"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>2D Bounding Box Loose</strong><br><br> 2D bounding boxes wrapping individual objects, including occluded parts.<br><br> Size: a list of <br> semanticID, numpy.uint32;<br> x_min, numpy.int32;<br> y_min, numpy.int32;<br> x_max, numpy.int32;<br> y_max, numpy.int32;<br> occlusion_ratio, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/bbox_2d_loose.png" alt="2D Bounding Box Loose"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>3D Bounding Box</strong><br><br> 3D bounding boxes wrapping individual objects.<br><br> Size: a list of <br> semanticID, numpy.uint32;<br> x_min, numpy.float32;<br> y_min, numpy.float32;<br> z_min, numpy.float32;<br> x_max, numpy.float32;<br> y_max, numpy.float32;<br> z_max, numpy.float32;<br> transform (4x4), numpy.float32;<br> occlusion_ratio, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/bbox_3d.png" alt="3D Bounding Box"> </td> </tr> </table> ### Range Sensor <table markdown="span"> <tr> <td valign="top" width="60%"> <strong>2D LiDAR</strong><br><br> Distances to surrounding objects by emitting laser beams and detecting the reflected light.<br><br> Size: # of rays, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/lidar.png" alt="2D LiDAR"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Occupancy Grid</strong><br><br> A representation of the environment as a 2D grid where each cell indicates the presence (or absence) of an obstacle.<br><br> Size: (grid resolution, grid resolution), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/occupancy_grid.png" alt="Occupancy Grid"> </td> </tr> </table> ### Proprioception <table markdown="span"> <tr> <td valign="top" width="100%"> <strong>Joint Positions</strong><br><br> Joint positions.<br><br> Size: # of joints, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Joint Velocities</strong><br><br> Joint velocities.<br><br> Size: # of joints, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Joint Efforts</strong><br><br> Torque measured at each joint.<br><br> Size: # of joints, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Position</strong><br><br> Robot position in the world frame.<br><br> Size: (x, y, z), numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Orientation</strong><br><br> Robot global euler orientation.<br><br> Size: (roll, pitch, yaw), numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td> <strong>Robot 2D Orientation</strong><br><br> Robot orientation on the XY plane of the world frame.<br><br> Size: angle, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Linear Velocity</strong><br><br> Robot linear velocity.<br><br> Size: (x_vel, y_vel, z_vel), numpy.float64<br><br> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Angular Velocity</strong><br><br> Robot angular velocity.<br><br> Size: (x_vel, y_vel, z_vel), numpy.float64<br><br> </td> <td> </td> </tr> </table> ### Task Observation <table markdown="span" style="width: 100%;"> <tr> <td valign="top" width="100%"> <strong>Low-dim task observation</strong><br><br> Task-specific observation, e.g. navigation goal position.<br><br> Size: # of low-dim observation, numpy.float64<br><br> </td> <td> </td> </tr> </table>
11,560
Markdown
33.822289
187
0.54109
StanfordVL/OmniGibson/docs/modules/overview.md
--- icon: material/graph-outline --- # **Overview** <figure markdown="span"> ![OmniGibson architecture overview](../assets/architecture_overview.png){ width="100%" } </figure> **`OmniGibson`**'s framework provides **modular APIs** for (a) quickly interacting with different components of a created environment and (b) prototyping and developing custom environments. **`OmniGibson`** is built upon NVIDIA's [IsaacSim](https://docs.omniverse.nvidia.com/isaacsim/latest/index.html), a powerful simulation platform that uses [PhysX](https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/index.html) as the physics backend. We build upon IsaacSim's `Simulator` interface to construct our `Environment` class, which is an [OpenAI gym-compatible](https://gymnasium.farama.org/content/gym_compatibility/) interface and the main entry point into **`OmniGibson`**. An `Environment` instance generally consists of the following: - A [`Scene`](./scene.md) instance, which by default is a "dummy" (empty) or a full-populated (`InteractiveTraversableScene`) instance, - A [`BaseTask`](./task.md) instance, which can range from a complex `BehaviorTask`, navigation `PointNavigationTask`, or no-op `DummyTask`, - Optionally, one or more [`BaseRobot`](./robots.md)s, which define the action space for the given environment instance, - Optionally, one or more additional [`BaseObject`](./object.md)s, which are additional object models not explicitly defined in the environment's scene The above figure describes **`OmniGibson`**'s simulation loop: 1. **Action Execution:** An externally defined `action` is passed to `Robot` instances in the `Environment`, which is processed by each robot's own set of `Controller`s and converted into low-level joint commands that are then deployed on the robot. 2. **Simulation Stepping:** The simulator takes at least one (and potentially multiple) physics steps, updating its internal state. 3. **Observation Retrieval:** Sensors on each `Robot` instance grab observations from the updated simulator state, and the loaded `Task` instance also computes its task-relevant observations and updates its internal state. The observations as well as task-relevant data is then returned from the `Environment` instance. Each of the modules in **`OmniGibson`** can be extended by the user, and allow for custom subclass implementations to be used without needing to directly modify **`OmniGibson`** source code. This section provides high-level overviews of each of the modules, as well as general insight into the purpose and intended use-cases of each module.
2,584
Markdown
88.137928
340
0.772446
StanfordVL/OmniGibson/docs/modules/environment.md
--- icon: material/earth --- # 🌎 **Environment** The OpenAI Gym Environment serves as a top-level simulation object, offering a suite of common interfaces. These include methods such as `step`, `reset`, `render`, and properties like `observation_space` and `action_space`. The OmniGibson Environment builds upon this foundation by also supporting the loading of scenes, robots, and tasks. Following the OpenAI Gym interface, the OmniGibson environment further provides access to both the action space and observation space of the robots and external sensors. Creating a minimal environment requires the definition of a config dictionary. This dictionary should contain details about the scene, objects, robots, and specific characteristics of the environment: <details> <summary>Click to see code!</summary> <pre><code> import omnigibson as og cfg = { "env": { "action_frequency": 10, "physics_frequency": 120, }, "scene": { "type": "Scene", }, "objects": [], "robots": [ { "type": "Fetch", "obs_modalities": 'all', "controller_config": { "arm_0": { "name": "NullJointController", "motor_type": "position", }, }, } ] } env = og.Environment(configs=cfg) </code></pre> </details>
1,373
Markdown
30.953488
509
0.631464
StanfordVL/OmniGibson/docs/tutorials/demo_collection.md
--- icon: octicons/rocket-16 --- # 🕹️ **Collecting Demonstrations** ## Devices I/O Devices can be used to read user input and teleoperate simulated robots in real-time. OmniGibson leverages [TeleMoMa](https://robin-lab.cs.utexas.edu/telemoma-web/), a modular and versatile library for manipulating mobile robots in the scene. This is achieved by using devies such as keyboards, SpaceMouse, cameras, VR devices, mobile phones, or any combination thereof. More generally, we support any interface that implements the `telemoma.human_interface.teleop_core.BaseTeleopInterface` class. In order to support your own custom device, simply subclass this base class and implement the required methods. For more information on this, checkout the [TeleMoMa codebase](https://github.com/UT-Austin-RobIn/telemoma). ## Teleoperation The following section will go through `robot_teleoperation_example.py`, which lets users to choose a robot to complete a simple pick and place task. Users are also encouraged to take a look at `vr_simple_demo.py`, which show how to actually render to VR headset and teleoperate `BehaviorRobot` with VR controllers (HTC VIVE). We assume that we already have the scene and task setup. To initialize a teleoperation system, we first need to specify the configuration for it. After the config simply instantiate teh teleoperation system. ``` teleop_sys = TeleopSystem(config=teleop_config, robot=robot, show_control_marker=True) ``` `TeleopSystem` takes in the config dictionary, which we just created. It also takes in the robot instance we want to teleoperate, as well as `show_control_marker`, which if set to `True`, will also create a green visual marker indicates the desired pose of the robot end effector that the user wants to robot to go. After the `TeleopSystem` is created, start by calling ``` teleop_sys.start() ``` Then, within the simulation loop, simply call ``` action = teleop_sys.get_action(teleop_sys.get_obs()) ``` to get the action based on the user teleoperation input, and pass the action to the `env.step` function. ## (Optional) Saving and Loading Simulation State You can save the current state of the simulator to a json file by calling `save`: ``` og.sim.save(JSON_PATH) ``` To restore any saved state, simply call `restore` ``` og.sim.restore(JSON_PATH) ``` Alternatively, if you just want to save all the scene and objects info at the current tiemframe, you can also call `self.scene.dump_state(serialized=True)`, which will return a numpy array containing all the relavant information. You can then stack the array together to get the full trajectory of states.
2,630
Markdown
49.596153
722
0.774525
StanfordVL/OmniGibson/docs/miscellaneous/known_issues.md
# **Known Issues & Troubleshooting** ## 🤔 **Known Issues** ??? question "How can I parallelize running multiple scenes in OmniGibson?" Currently, to run multiple scenes in parallel, you will need to launch separate instances of the OmniGibson environment. While this introduces some overhead due to running multiple instances of IsaacSim, we are actively working on implementing parallelization capabilities. Our goal is to enable running multiple scenes within a single instance, streamlining the process and reducing the associated overhead. ## 🧯 **Troubleshooting** ??? question "I cannot open Omniverse Launcher AppImage on Linux" You probably need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage. ??? question "OmniGibson is stuck at `HydraEngine rtx failed creating scene renderer.`" `OmniGibson` is likely using an unsupported GPU (default is id 0). Run `nvidia-smi` to see the active list of GPUs, and select an NVIDIA-supported GPU and set its corresponding ID when running `OmniGibson` with `export OMNIGIBSON_GPU_ID=<ID NUMBER>`.
1,120
Markdown
64.941173
412
0.776786
StanfordVL/OmniGibson/docs/miscellaneous/contact.md
# **Contact** If you have any questions, comments, or concerns, please feel free to reach out to use by joining our Discord server: <a href="https://discord.gg/bccR5vGFEx"><img src="https://discordapp.com/api/guilds/1166422812160966707/widget.png?style=banner3"></a>
268
Markdown
52.799989
134
0.757463
StanfordVL/OmniGibson/docs/getting_started/examples.md
--- icon: material/laptop --- # 💻 **Examples** **`OmniGibson`** ships with many demo scripts highlighting its modularity and diverse feature set intended as a set of building blocks enabling your research. Let's try them out! *** ## ⚙️ **A quick word about macros** ??? question annotate "Why macros?" Macros enforce global behavior that is consistent within an individual python process but can differ between processes. This is useful because globally enabling all of **`OmniGibson`**'s features can cause unnecessary slowdowns, and so configuring the macros for your specific use case can optimize performance. For example, Omniverse provides a so-called `flatcache` feature which provides significant performance boosts, but cannot be used when fluids or soft bodies are present. So, we ideally should always have `gm.USE_FLATCACHE=True` unless we have fluids or soft bodies in our environment. `macros` define a globally available set of magic numbers or flags set throughout **`OmniGibson`**. These can either be directly set in `omnigibson.macros.py`, or can be programmatically modified at runtime via: ```{.python .annotate} from omnigibson.macros import gm, macros gm.<GLOBAL_MACRO> = <VALUE> # (1)! macros.<OG_DIRECTORY>.<OG_MODULE>.<MODULE_MACRO> = <VALUE> # (2)! ``` 1. `gm` refers to the "global" macros -- i.e.: settings that generally impact the entire **`OmniGibson`** stack. These are usually the only settings you may need to modify. 2. `macros` captures all remaining macros defined throughout **`OmniGibson`**'s codebase -- these are often hardcoded default settings or magic numbers defined in a specific module. These can also be overridden, but we recommend inspecting the module first to understand how it is used. Many of our examples set various `macros` settings at the beginning of the script, and is a good way to understand use cases for modifying them! *** ## 🌎 **Environments** These examples showcase the full **`OmniGibson`** stack in use, and the types of environments immediately supported. ### **BEHAVIOR Task Demo** !!! abstract "This demo is useful for..." * Understanding how to instantiate a BEHAVIOR task * Understanding how a pre-defined configuration file is used ```{.python .annotate} python -m omnigibson.examples.environments.behavior_env_demo ``` This demo instantiates one of our BEHAVIOR tasks (and optionally sampling object locations online) in a fully-populated scene and loads a `Fetch` robot. The robot executes random actions and the environment is reset periodically. ??? code "behavior_env_demo.py" ``` py linenums="1" --8<-- "examples/environments/behavior_env_demo.py" ``` ### **Navigation Task Demo** !!! abstract "This demo is useful for..." * Understanding how to instantiate a navigation task * Understanding how a pre-defined configuration file is used ```{.python .annotate} python -m omnigibson.examples.environments.navigation_env_demo ``` This demo instantiates one of our navigation tasks in a fully-populated scene and loads a `Turtlebot` robot. The robot executes random actions and the environment is reset periodically. ??? code "navigation_env_demo.py" ``` py linenums="1" --8<-- "examples/environments/navigation_env_demo.py" ``` ## 🧑‍🏫 **Learning** These examples showcase how **`OmniGibson`** can be used to train embodied AI agents. ### **Reinforcement Learning Demo** !!! abstract "This demo is useful for..." * Understanding how to hook up **`OmniGibson`** to an external algorithm * Understanding how to train and evaluate a policy ```{.python .annotate} python -m omnigibson.examples.learning.navigation_policy_demo ``` This demo loads a BEHAVIOR task with a `Fetch` robot, and trains / evaluates the agent using [Stable Baseline3](https://stable-baselines3.readthedocs.io/en/master/)'s PPO algorithm. ??? code "navigation_policy_demo.py" ``` py linenums="1" --8<-- "examples/learning/navigation_policy_demo.py" ``` ## 🏔️ **Scenes** These examples showcase how to leverage **`OmniGibson`**'s large-scale, diverse scenes shipped with the BEHAVIOR dataset. ### **Scene Selector Demo** !!! abstract "This demo is useful for..." * Understanding how to load a scene into **`OmniGibson`** * Accessing all BEHAVIOR dataset scenes ```{.python .annotate} python -m omnigibson.examples.scenes.scene_selector ``` This demo lets you choose a scene from the BEHAVIOR dataset, loads it along with a `Turtlebot` robot, and cycles the resulting environment periodically. ??? code "scene_selector.py" ``` py linenums="1" --8<-- "examples/scenes/scene_selector.py" ``` ### **Scene Tour Demo** !!! abstract "This demo is useful for..." * Understanding how to load a scene into **`OmniGibson`** * Understanding how to generate a trajectory from a set of waypoints ```{.python .annotate} python -m omnigibson.examples.scenes.scene_tour_demo ``` This demo lets you choose a scene from the BEHAVIOR dataset. It allows you to move the camera using the keyboard, select waypoints, and then programmatically generates a video trajectory from the selected waypoints ??? code "scene_tour_demo.py" ``` py linenums="1" --8<-- "examples/scenes/scene_tour_demo.py" ``` ### **Traversability Map Demo** !!! abstract "This demo is useful for..." * Understanding how to leverage traversability map information from BEHAVIOR dataset scenes ```{.python .annotate} python -m omnigibson.examples.scenes.traversability_map_example ``` This demo lets you choose a scene from the BEHAVIOR dataset, and generates its corresponding traversability map. ??? code "traversability_map_example.py" ``` py linenums="1" --8<-- "examples/scenes/traversability_map_example.py" ``` ## 🍎 **Objects** These examples showcase how to leverage objects in **`OmniGibson`**. ### **Load Object Demo** !!! abstract "This demo is useful for..." * Understanding how to load an object into **`OmniGibson`** * Accessing all BEHAVIOR dataset asset categories and models ```{.python .annotate} python -m omnigibson.examples.objects.load_object_selector ``` This demo lets you choose a specific object from the BEHAVIOR dataset, and loads the requested object into an environment. ??? code "load_object_selector.py" ``` py linenums="1" --8<-- "examples/objects/load_object_selector.py" ``` ### **Object Visualizer Demo** !!! abstract "This demo is useful for..." * Viewing objects' textures as rendered in **`OmniGibson`** * Viewing articulated objects' range of motion * Understanding how to reference object instances from the environment * Understanding how to set object poses and joint states ```{.python .annotate} python -m omnigibson.examples.objects.visualize_object ``` This demo lets you choose a specific object from the BEHAVIOR dataset, and rotates the object in-place. If the object is articulated, it additionally moves its joints through its full range of motion. ??? code "visualize_object.py" ``` py linenums="1" --8<-- "examples/objects/visualize_object.py" ``` ### **Highlight Object** !!! abstract "This demo is useful for..." * Understanding how to highlight individual objects within a cluttered scene * Understanding how to access groups of objects from the environment ```{.python .annotate} python -m omnigibson.examples.objects.highlight_objects ``` This demo loads the Rs_int scene and highlights windows on/off repeatedly. ??? code "highlight_objects.py" ``` py linenums="1" --8<-- "examples/objects/highlight_objects.py" ``` ### **Draw Object Bounding Box Demo** !!! abstract annotate "This demo is useful for..." * Understanding how to access observations from a `GymObservable` object * Understanding how to access objects' bounding box information * Understanding how to dynamically modify vision modalities *[GymObservable]: [`Environment`](../reference/envs/env_base.md), all sensors extending from [`BaseSensor`](../reference/sensors/sensor_base.md), and all objects extending from [`BaseObject`](../reference/objects/object_base.md) (which includes all robots extending from [`BaseRobot`](../reference/robots/robot_base.md)!) are [`GymObservable`](../reference/utils/gym_utils.md#utils.gym_utils.GymObservable) objects! ```{.python .annotate} python -m omnigibson.examples.objects.draw_bounding_box ``` This demo loads a door object and banana object, and partially obscures the banana with the door. It generates both "loose" and "tight" bounding boxes (where the latter respects occlusions) for both objects, and dumps them to an image on disk. ??? code "draw_bounding_box.py" ``` py linenums="1" --8<-- "examples/objects/draw_bounding_box.py" ``` ## 🌡️ **Object States** These examples showcase **`OmniGibson`**'s powerful object states functionality, which captures both individual and relational kinematic and non-kinematic states. ### **Slicing Demo** !!! abstract "This demo is useful for..." * Understanding how slicing works in **`OmniGibson`** * Understanding how to access individual objects once the environment is created ```{.python .annotate} python -m omnigibson.examples.object_states.slicing_demo ``` This demo spawns an apple on a table with a knife above it, and lets the knife fall to "cut" the apple in half. ??? code "slicing_demo.py" ``` py linenums="1" --8<-- "examples/object_states/slicing_demo.py" ``` ### **Dicing Demo** !!! abstract "This demo is useful for..." * Understanding how to leverage the `Dicing` state * Understanding how to enable objects to be `diceable` ```{.python .annotate} python -m omnigibson.examples.object_states.dicing_demo ``` This demo loads an apple and a knife, and showcases how apple can be diced into smaller chunks with the knife. ??? code "dicing_demo.py" ``` py linenums="1" --8<-- "examples/object_states/dicing_demo.py" ``` ### **Folded and Unfolded Demo** !!! abstract "This demo is useful for..." * Understanding how to load a softbody (cloth) version of a BEHAVIOR dataset object * Understanding how to enable cloth objects to be `foldable` * Understanding the current heuristics used for gauging a cloth's "foldness" ```{.python .annotate} python -m omnigibson.examples.object_states.folded_unfolded_state_demo ``` This demo loads in three different cloth objects, and allows you to manipulate them while printing out their `Folded` state status in real-time. Try manipulating the object by holding down **`Shift`** and then **`Left-click + Drag`**! ??? code "folded_unfolded_state_demo.py" ``` py linenums="1" --8<-- "examples/object_states/folded_unfolded_state_demo.py" ``` ### **Overlaid Demo** !!! abstract "This demo is useful for..." * Understanding how cloth objects can be overlaid on rigid objects * Understanding current heuristics used for gauging a cloth's "overlaid" status ```{.python .annotate} python -m omnigibson.examples.object_states.overlaid_demo ``` This demo loads in a carpet on top of a table. The demo allows you to manipulate the carpet while printing out their `Overlaid` state status in real-time. Try manipulating the object by holding down **`Shift`** and then **`Left-click + Drag`**! ??? code "overlaid_demo.py" ``` py linenums="1" --8<-- "examples/object_states/overlaid_demo.py" ``` ### **Heat Source or Sink Demo** !!! abstract "This demo is useful for..." * Understanding how a heat source (or sink) is visualized in **`OmniGibson`** * Understanding how dynamic fire visuals are generated in real-time ```{.python .annotate} python -m omnigibson.examples.object_states.heat_source_or_sink_demo ``` This demo loads in a stove and toggles its `HeatSource` on and off, showcasing the dynamic fire visuals available in **`OmniGibson`**. ??? code "heat_source_or_sink_demo.py" ``` py linenums="1" --8<-- "examples/object_states/heat_source_or_sink_demo.py" ``` ### **Temperature Demo** !!! abstract "This demo is useful for..." * Understanding how to dynamically sample kinematic states for BEHAVIOR dataset objects * Understanding how temperature changes are propagated to individual objects from individual heat sources or sinks ```{.python .annotate} python -m omnigibson.examples.object_states.temperature_demo ``` This demo loads in various heat sources and sinks, and places an apple within close proximity to each of them. As the environment steps, each apple's temperature is printed in real-time, showcasing **`OmniGibson`**'s rudimentary temperature dynamics. ??? code "temperature_demo.py" ``` py linenums="1" --8<-- "examples/object_states/temperature_demo.py" ``` ### **Heated Demo** !!! abstract "This demo is useful for..." * Understanding how temperature modifications can cause objects' visual changes * Understanding how dynamic steam visuals are generated in real-time ```{.python .annotate} python -m omnigibson.examples.object_states.heated_state_demo ``` This demo loads in three bowls, and immediately sets their temperatures past their `Heated` threshold. Steam is generated in real-time from these objects, and then disappears once the temperature of the objects drops below their `Heated` threshold. ??? code "heated_state_demo.py" ``` py linenums="1" --8<-- "examples/object_states/heated_state_demo.py" ``` ### **Onfire Demo** !!! abstract "This demo is useful for..." * Understanding how changing onfire state can cause objects' visual changes * Understanding how onfire can be triggered by nearby onfire objects ```{.python .annotate} python -m omnigibson.examples.object_states.onfire_demo ``` This demo loads in a stove (toggled on) and two apples. The first apple will be ignited by the stove first, then the second apple will be ignited by the first apple. ??? code "onfire_demo.py" ``` py linenums="1" --8<-- "examples/object_states/onfire_demo.py" ``` ### **Particle Applier and Remover Demo** !!! abstract "This demo is useful for..." * Understanding how a `ParticleRemover` or `ParticleApplier` object can be generated * Understanding how particles can be dynamically generated on objects * Understanding different methods for applying and removing particles via the `ParticleRemover` or `ParticleApplier` object ```{.python .annotate} python -m omnigibson.examples.object_states.particle_applier_remover_demo ``` This demo loads in a washtowel and table and lets you choose the ability configuration to enable the washtowel with. The washtowel will then proceed to either remove and generate particles dynamically on the table while moving. ??? code "particle_applier_remover_demo.py" ``` py linenums="1" --8<-- "examples/object_states/particle_applier_remover_demo.py" ``` ### **Particle Source and Sink Demo** !!! abstract "This demo is useful for..." * Understanding how a `ParticleSource` or `ParticleSink` object can be generated * Understanding how particles can be dynamically generated and destroyed via such objects ```{.python .annotate} python -m omnigibson.examples.object_states.particle_source_sink_demo ``` This demo loads in a sink, which is enabled with both the ParticleSource and ParticleSink states. The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles, which is then destroyed ("sunk") by the sink's particle sink located at the drain. ??? note "Difference between `ParticleApplier/Removers` and `ParticleSource/Sinks`" The key difference between `ParticleApplier/Removers` and `ParticleSource/Sinks` is that `Applier/Removers` requires contact (if using `ParticleProjectionMethod.ADJACENCY`) or overlap (if using `ParticleProjectionMethod.PROJECTION`) in order to spawn / remove particles, and generally only spawn particles at the contact points. `ParticleSource/Sinks` are special cases of `ParticleApplier/Removers` that always use `ParticleProjectionMethod.PROJECTION` and always spawn / remove particles within their projection volume, irregardless of overlap with other objects. ??? code "particle_source_sink_demo.py" ``` py linenums="1" --8<-- "examples/object_states/particle_source_sink_demo.py" ``` ### **Kinematics Demo** !!! abstract "This demo is useful for..." * Understanding how to dynamically sample kinematic states for BEHAVIOR dataset objects * Understanding how to import additional objects after the environment is created ```{.python .annotate} python -m omnigibson.examples.object_states.sample_kinematics_demo ``` This demo procedurally generates a mini populated scene, spawning in a cabinet and placing boxes in its shelves, and then generating a microwave on a cabinet with a plate and apples sampled both inside and on top of it. ??? code "sample_kinematics_demo.py" ``` py linenums="1" --8<-- "examples/object_states/sample_kinematics_demo.py" ``` ### **Attachment Demo** !!! abstract "This demo is useful for..." * Understanding how to leverage the `Attached` state * Understanding how to enable objects to be `attachable` ```{.python .annotate} python -m omnigibson.examples.object_states.attachment_demo ``` This demo loads an assembled shelf, and showcases how it can be manipulated to attach and detach parts. ??? code "attachment_demo.py" ``` py linenums="1" --8<-- "examples/object_states/attachment_demo.py" ``` ### **Object Texture Demo** !!! abstract "This demo is useful for..." * Understanding how different object states can result in texture changes * Understanding how to enable objects with texture-changing states * Understanding how to dynamically modify object states ```{.python .annotate} python -m omnigibson.examples.object_states.object_state_texture_demo ``` This demo loads in a single object, and then dynamically modifies its state so that its texture changes with each modification. ??? code "object_state_texture_demo.py" ``` py linenums="1" --8<-- "examples/object_states/object_state_texture_demo.py" ``` ## 🤖 **Robots** These examples showcase how to interact and leverage robot objects in **`OmniGibson`**. ### **Robot Visualizer Demo** !!! abstract "This demo is useful for..." * Understanding how to load a robot into **`OmniGibson`** after an environment is created * Accessing all **`OmniGibson`** robot models * Viewing robots' low-level joint motion ```{.python .annotate} python -m omnigibson.examples.robots.all_robots_visualizer ``` This demo iterates over all robots in **`OmniGibson`**, loading each one into an empty scene and randomly moving its joints for a brief amount of time. ??? code "all_robots_visualizer.py" ``` py linenums="1" --8<-- "examples/robots/all_robots_visualizer.py" ``` ### **Robot Control Demo** !!! abstract "This demo is useful for..." * Understanding how different controllers can be used to control robots * Understanding how to teleoperate a robot through external commands ```{.python .annotate} python -m omnigibson.examples.robots.robot_control_example ``` This demo lets you choose a robot and the set of controllers to control the robot, and then lets you teleoperate the robot using your keyboard. ??? code "robot_control_example.py" ``` py linenums="1" --8<-- "examples/robots/robot_control_example.py" ``` ### **Robot Grasping Demo** !!! abstract annotate "This demo is useful for..." * Understanding the difference between `physical` and `sticky` grasping * Understanding how to teleoperate a robot through external commands ```{.python .annotate} python -m omnigibson.examples.robots.grasping_mode_example ``` This demo lets you choose a grasping mode and then loads a `Fetch` robot and a cube on a table. You can then teleoperate the robot to grasp the cube, observing the difference is grasping behavior based on the grasping mode chosen. Here, `physical` means natural friction is required to hold objects, while `sticky` means that objects are constrained to the robot's gripper once contact is made. ??? code "grasping_mode_example.py" ``` py linenums="1" --8<-- "examples/robots/grasping_mode_example.py" ``` ### **Advanced: IK Demo** !!! abstract "This demo is useful for..." * Understanding how to construct your own IK functionality using omniverse's native lula library without explicitly utilizing all of OmniGibson's class abstractions * Understanding how to manipulate the simulator at a lower-level than the main Environment entry point ```{.python .annotate} python -m omnigibson.examples.robots.advanced.ik_example ``` This demo loads in `Fetch` robot and a IK solver to control the robot, and then lets you teleoperate the robot using your keyboard. ??? code "ik_example.py" ``` py linenums="1" --8<-- "examples/robots/advanced/ik_example.py" ``` ## 🧰 **Simulator** These examples showcase useful functionality from **`OmniGibson`**'s monolithic `Simulator` object. ??? question "What's the difference between `Environment` and `Simulator`?" The [`Simulator`](../../reference/simulator) class is a lower-level object that: * handles importing scenes and objects into the actual simulation * directly interfaces with the underlying physics engine The [`Environment`](../../reference/environemnts/base_env) class thinly wraps the `Simulator`'s core functionality, by: * providing convenience functions for automatically importing a predefined scene, object(s), and robot(s) (via the `cfg` argument), as well as a [`task`](../../reference/tasks/task_base) * providing a OpenAI Gym interface for stepping through the simulation While most of the core functionality in `Environment` (as well as more fine-grained physics control) can be replicated via direct calls to `Simulator` (`og.sim`), it requires deeper understanding of **`OmniGibson`**'s infrastructure and is not recommended for new users. ### **State Saving and Loading Demo** !!! abstract "This demo is useful for..." * Understanding how to interact with objects using the mouse * Understanding how to save the active simulator state to a file * Understanding how to restore the simulator state from a given file ```{.python .annotate} python -m omnigibson.examples.simulator.sim_save_load_example ``` This demo loads a stripped-down scene with the `Turtlebot` robot, and lets you interact with objects to modify the scene. The state is then saved, written to a `.json` file, and then restored in the simulation. ??? code "sim_save_load_example.py" ``` py linenums="1" --8<-- "examples/simulator/sim_save_load_example.py" ``` ## 🖼️ **Rendering** These examples showcase how to change renderer settings in **`OmniGibson`**. ### **Renderer Settings Demo** !!! abstract "This demo is useful for..." * Understanding how to use RendererSettings class ```{.python .annotate} python -m omnigibson.examples.renderer_settings.renderer_settings_example ``` This demo iterates over different renderer settings of and shows how they can be programmatically set with **`OmniGibson`** interface. ??? code "renderer_settings_example.py" ``` py linenums="1" --8<-- "examples/renderer_settings/renderer_settings_example.py" ```
23,454
Markdown
37.45082
415
0.725377
StanfordVL/OmniGibson/docs/getting_started/quickstart.md
--- icon: octicons/rocket-16 --- # 🚀 **Quickstart** Let's quickly create an environment programmatically! **`OmniGibson`**'s workflow is straightforward: define the configuration of scene, object(s), robot(s), and task you'd like to load, and then instantiate our `Environment` class with that config. Let's start with the following: ```{.python .annotate} import omnigibson as og # (1)! from omnigibson.macros import gm # (2)! # Start with an empty configuration cfg = dict() ``` 1. All python scripts should start with this line! This allows access to key global variables through the top-level package. 2. Global macros (`gm`) can always be accessed directly and modified on the fly! ## 🏔️ **Defining a scene** Next, let's define a scene: ```{.python .annotate} cfg["scene"] = { "type": "Scene", # (1)! "floor_plane_visible": True, # (2)! } ``` 1. Our configuration gets parsed automatically and generates the appropriate class instance based on `type` (the string form of the class name). In this case, we're generating the most basic scene, which only consists of a floor plane. Check out [all of our available `Scene` classes](../reference/scenes/scene_base.md)! 2. In addition to specifying `type`, the remaining keyword-arguments get passed directly into the class constructor. So for the base [`Scene`](../reference/scenes/scene_base.md) class, you could optionally specify `"use_floor_plane"` and `"floor_plane_visible"`, whereas for the more powerful [`InteractiveTraversableScene`](../reference/scenes/interactive_traversable_scene.md) class (which loads a curated, preconfigured scene) you can additionally specify options for filtering objects, such as `"load_object_categories"` and `"load_room_types"`. You can see all available keyword-arguments by viewing the [individual `Scene` class](../reference/scenes/scene_base.md) you'd like to load! ## 🎾 **Defining objects** We can optionally define some objects to load into our scene: ```{.python .annotate} cfg["objects"] = [ # (1)! { "type": "USDObject", # (2)! "name": "ghost_stain", # (3)! "usd_path": f"{gm.ASSET_PATH}/models/stain/stain.usd", "category": "stain", # (4)! "visual_only": True, # (5)! "scale": [1.0, 1.0, 1.0], # (6)! "position": [1.0, 2.0, 0.001], # (7)! "orientation": [0, 0, 0, 1.0], # (8)! }, { "type": "DatasetObject", # (9)! "name": "delicious_apple", "category": "apple", "model": "agveuv", # (10)! "position": [0, 0, 1.0], }, { "type": "PrimitiveObject", # (11)! "name": "incredible_box", "primitive_type": "Cube", # (12)! "rgba": [0, 1.0, 1.0, 1.0], # (13)! "scale": [0.5, 0.5, 0.1], "fixed_base": True, # (14)! "position": [-1.0, 0, 1.0], "orientation": [0, 0, 0.707, 0.707], }, { "type": "LightObject", # (15)! "name": "brilliant_light", "light_type": "Sphere", # (16)! "intensity": 50000, # (17)! "radius": 0.1, # (18)! "position": [3.0, 3.0, 4.0], }, ] ``` 1. Unlike the `"scene"` sub-config, we can define an arbitrary number of objects to load, so this is a `list` of `dict` istead of a single nested `dict`. 2. **`OmniGibson`** supports multiple object classes, and we showcase an instance of each core class here. A [`USDObject`](../reference/objects/usd_object.md) is our most generic object class, and generates an object sourced from the `usd_path` argument. 3. All objects **must** define the `name` argument! This is because **`OmniGibson`** enforces a global unique naming scheme, and so any created objects must have unique names assigned to them. 4. `category` is used by all object classes to assign semantic segmentation IDs. 5. `visual_only` is used by all object classes and defines whether the object is subject to both gravity and collisions. 6. `scale` is used by all object classes and defines the global (x,y,z) relative scale of the object. 7. `position` is used by all object classes and defines the initial (x,y,z) position of the object in the global frame. 8. `orientation` is used by all object classes and defines the initial (x,y,z,w) quaternion orientation of the object in the global frame. 9. A [`DatasetObject`](../reference/objects/dataset_object.md) is an object pulled directly from our **BEHAVIOR** dataset. It includes metadata and annotations not found on a generic `USDObject`. Note that these assets are encrypted, and thus cannot be created via the `USDObject` class. 10. Instead of explicitly defining the hardcoded path to the dataset USD model, `model` (in conjunction with `category`) is used to infer the exact dataset object to load. In this case this is the exact same underlying raw USD asset that was loaded above as a `USDObject`! 11. A [`PrimitiveObject`](../reference/objects/primitive_object.md) is a programmatically generated object defining a convex primitive shape. 12. `primitive_type` defines what primitive shape to load -- see [`PrimitiveObject`](../reference/objects/primitive_object.md) for available options! 13. Because this object is programmatically generated, we can also specify the color to assign to this primitive object. 14. `fixed_base` is used by all object classes and determines whether the generated object is fixed relative to the world frame. Useful for fixing in place large objects, such as furniture or structures. 15. A [`LightObject`](../reference/objects/light_object.md) is a programmatically generated light source. It is used to directly illuminate the given scene. 16. `light_type` defines what light shape to load -- see [`LightObject`](../reference/objects/light_object.md) for available options! 17. `intensity` defines how bright the generated light source should be. 18. `radius` is used by `Sphere` lights and determines their relative size. ## 🤖 **Defining robots** We can also optionally define robots to load into our scene: ```{.python .annotate} cfg["robots"] = [ # (1)! { "type": "Fetch", # (2)! "name": "baby_robot", "obs_modalities": ["scan", "rgb", "depth"], # (3)! }, ] ``` 1. Like the `"objects"` sub-config, we can define an arbitrary number of robots to load, so this is a `list` of `dict`. 2. **`OmniGibson`** supports multiple robot classes, where each class represents a specific robot model. Check out our [`robots`](../reference/robots/robot_base.md) to view all available robot classes! 3. Execute `print(og.ALL_SENSOR_MODALITIES)` for a list of all available observation modalities! ## 📋 **Defining a task** Lastly, we can optionally define a task to load into our scene. Since we're just getting started, let's load a "Dummy" task (which is the task that is loaded anyways even if we don't explicitly define a task in our config): ```{.python .annotate} cfg["task"] = { "type": "DummyTask", # (1)! "termination_config": dict(), # (2)! "reward_config": dict(), # (3)! } ``` 1. Check out all of **`OmniGibson`**'s [available tasks](../reference/tasks/task_base.md)! 2. `termination_config` configures the termination conditions for this task. It maps specific [`TerminationCondition`](../reference/termination_conditions/termination_condition_base.md) arguments to their corresponding values to set. 3. `reward_config` configures the reward functions for this task. It maps specific [`RewardFunction`](../reference/reward_functions/reward_function_base.md) arguments to their corresponding values to set. ## 🌀 **Creating the environment** We're all set! Let's load the config and create our environment: ```{.python .annotate} env = og.Environment(cfg) ``` Once the environment loads, we can interface with our environment similar to OpenAI's Gym interface: ```{.python .annotate} obs, rew, done, info = env.step(env.action_space.sample()) ``` ??? question "What happens if we have no robot loaded?" Even if we have no robot loaded, we still need to define an "action" to pass into the environment. In this case, our action space is 0, so you can simply pass `[]` or `np.array([])` into the `env.step()` call! ??? code "my_first_env.py" ``` py linenums="1" import omnigibson as og from omnigibson.macros import gm cfg = dict() # Define scene cfg["scene"] = { "type": "Scene", "floor_plane_visible": True, } # Define objects cfg["objects"] = [ { "type": "USDObject", "name": "ghost_stain", "usd_path": f"{gm.ASSET_PATH}/models/stain/stain.usd", "category": "stain", "visual_only": True, "scale": [1.0, 1.0, 1.0], "position": [1.0, 2.0, 0.001], "orientation": [0, 0, 0, 1.0], }, { "type": "DatasetObject", "name": "delicious_apple", "category": "apple", "model": "agveuv", "position": [0, 0, 1.0], }, { "type": "PrimitiveObject", "name": "incredible_box", "primitive_type": "Cube", "rgba": [0, 1.0, 1.0, 1.0], "scale": [0.5, 0.5, 0.1], "fixed_base": True, "position": [-1.0, 0, 1.0], "orientation": [0, 0, 0.707, 0.707], }, { "type": "LightObject", "name": "brilliant_light", "light_type": "Sphere", "intensity": 50000, "radius": 0.1, "position": [3.0, 3.0, 4.0], }, ] # Define robots cfg["robots"] = [ { "type": "Fetch", "name": "skynet_robot", "obs_modalities": ["scan", "rgb", "depth"], }, ] # Define task cfg["task"] = { "type": "DummyTask", "termination_config": dict(), "reward_config": dict(), } # Create the environment env = og.Environment(cfg) # Allow camera teleoperation og.sim.enable_viewer_camera_teleoperation() # Step! for _ in range(10000): obs, rew, done, info = env.step(env.action_space.sample()) og.shutdown() ``` ## 👀 **Looking around** Look around by: * `Left-CLICK + Drag`: Tilt * `Scroll-Wheel-CLICK + Drag`: Pan * `Scroll-Wheel UP / DOWN`: Zoom Interact with objects by: * `Shift + Left-CLICK + Drag`: Apply force on selected object Or, for more fine-grained control, run: ```{.python .annotate} og.sim.enable_viewer_camera_teleoperation() # (1)! ``` 1. This allows you to move the camera precisely with your keyboard, record camera poses, and dynamically modify lights! Or, for programmatic control, directly set the viewer camera's global pose: ```{.python .annotate} og.sim.viewer_camera.set_position_orientation(<POSITION>, <ORIENTATION>) ``` *** **Next:** Check out some of **`OmniGibson`**'s breadth of features from our [Building Block](./building_blocks.md) examples!
10,980
Markdown
41.727626
690
0.643443
StanfordVL/OmniGibson/docs/getting_started/installation.md
--- icon: material/hammer-wrench --- # 🛠️ **Installation** ## 🗒️ **Requirements** Please make sure your system meets the following specs: - [x] **OS:** Ubuntu 20.04+ / Windows 10+ - [x] **RAM:** 32GB+ - [x] **GPU:** NVIDIA RTX 2070+ - [x] **VRAM:** 8GB+ ??? question "Why these specs?" **`OmniGibson`** is built upon NVIDIA's [Omniverse](https://www.nvidia.com/en-us/omniverse/) and [Isaac Sim](https://developer.nvidia.com/isaac-sim) platforms, so we inherit their dependencies. For more information, please see [Isaac Sim's Requirements](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/requirements.html). ## 💻 **Setup** There are two ways to setup **`OmniGibson`**: - **🐳 Install with Docker (Linux only)**: You can quickly get **`OmniGibson`** immediately up and running from our pre-built docker image. - **🧪 Install from source (Linux / Windows)**: This method is recommended for deeper users looking to develop upon **`OmniGibson`** or use it extensively for research. !!! tip "" === "🐳 Install with Docker (Linux only)" Install **`OmniGibson`** with Docker is supported for **🐧 Linux** only. ??? info "Need to install docker or NVIDIA docker?" ```{.shell .annotate} # Install docker curl https://get.docker.com | sh && sudo systemctl --now enable docker # Install nvidia-docker runtime distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | \ sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list sudo apt-get update sudo apt-get install -y nvidia-docker2 # install sudo systemctl restart docker # restart docker engine ``` 1. Install our docker launching scripts: ```shell curl -LJO https://raw.githubusercontent.com/StanfordVL/OmniGibson/main/docker/run_docker.sh chmod a+x run_docker.sh ``` ??? question annotate "What is being installed?" Our docker image automatically ships with a pre-configured conda virtual environment named `omnigibson` with Isaac Sim and **`OmniGibson`** pre-installed. Upon running the first time, our scene and object assets will automatically be downloaded as well. 2. Then, simply launch the shell script: === "Headless" ```{.shell .annotate} sudo ./run_docker.sh -h <ABS_DATA_PATH> # (1)! ``` 1. `<ABS_DATA_PATH>` specifies the **absolute** path data will be stored on your machine (if no `<ABS_DATA_PATH>` is specified, it defaults to `./omnigibson_data`). This needs to be called each time the docker container is run! === "GUI" ```{.shell .annotate} sudo ./run_docker.sh <ABS_DATA_PATH> # (1)! ``` 1. `<ABS_DATA_PATH>` specifies the **absolute** path data will be stored on your machine (if no `<ABS_DATA_PATH>` is specified, it defaults to `./omnigibson_data`). This needs to be called each time the docker container is run! ??? warning annotate "Are you using NFS or AFS?" Docker containers are unable to access NFS or AFS drives, so if `run_docker.sh` are located on an NFS / AFS partition, please set `<DATA_PATH>` to an alternative data directory located on a non-NFS / AFS partition. === "🧪 Install from source (Linux / Windows)" Install **`OmniGibson`** from source is supported for both **🐧 Linux (bash)** and **📁 Windows (powershell/cmd)**. !!! example "" === "🐧 Linux (bash)" <div class="annotate" markdown> 1. Install [Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) and NVIDIA's [Omniverse Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html) !!! warning "Please make sure you have the latest version of Isaac Sim (2023.1.1) installed." For Ubuntu 22.04, you need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage. 2. Clone [**`OmniGibson`**](https://github.com/StanfordVL/OmniGibson) and move into the directory: ```shell git clone https://github.com/StanfordVL/OmniGibson.git cd OmniGibson ``` ??? note "Nightly build" The main branch contains the stable version of **`OmniGibson`**. For our latest developed (yet not fully tested) features and bug fixes, please clone from the `og-develop` branch. 3. Setup a virtual conda environment to run **`OmniGibson`**: ```{.shell .annotate} ./scripts/setup.sh # (1)! ``` 1. The script will ask you which Isaac Sim to use. If you installed it in the default location, it should be `~/.local/share/ov/pkg/isaac_sim-2023.1.1` This will create a conda env with `omnigibson` installed. Simply call `conda activate` to activate it. 4. Download **`OmniGibson`** dataset (within the conda env): ```shell python scripts/download_datasets.py ``` </div> === "📁 Windows (powershell/cmd)" <div class="annotate" markdown> 1. Install [Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) and NVIDIA's [Omniverse Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html) !!! warning "Please make sure you have the latest version of Isaac Sim (2023.1.1) installed." 2. Clone [**`OmniGibson`**](https://github.com/StanfordVL/OmniGibson) and move into the directory: ```shell git clone https://github.com/StanfordVL/OmniGibson.git cd OmniGibson ``` ??? note "Nightly build" The main branch contains the stable version of **`OmniGibson`**. For our latest developed (yet not fully tested) features and bug fixes, please clone from the `og-develop` branch. 3. Setup a virtual conda environment to run **`OmniGibson`**: ```{.powershell .annotate} .\scripts\setup.bat # (1)! ``` 1. The script will ask you which Isaac Sim to use. If you installed it in the default location, it should be `C:\Users\<USER_NAME>\AppData\Local\ov\pkg\isaac_sim-2023.1.1` This will create a conda env with `omnigibson` installed. Simply call `conda activate` to activate it. 4. Download **`OmniGibson`** dataset (within the conda env): ```powershell python scripts\download_datasets.py ``` </div> ## 🌎 **Explore `OmniGibson`!** !!! warning annotate "Expect slowdown during first execution" Omniverse requires some one-time startup setup when **`OmniGibson`** is imported for the first time. The process could take up to 5 minutes. This is expected behavior, and should only occur once! **`OmniGibson`** is now successfully installed! Try exploring some of our new scenes interactively: ```{.shell .annotate} python -m omnigibson.examples.scenes.scene_selector # (1)! ``` 1. This demo lets you choose a scene and interactively move around using your keyboard and mouse. Hold down **`Shift`** and then **`Left-click + Drag`** an object to apply forces! You can also try teleoperating one of our robots: ```{.shell .annotate} python -m omnigibson.examples.robots.robot_control_example # (1)! ``` 1. This demo lets you choose a scene, robot, and set of controllers, and then teleoperate the robot using your keyboard. *** **Next:** Get quickly familiarized with **`OmniGibson`** from our [Quickstart Guide](./quickstart.md)! ## 🧯 **Troubleshooting** ??? question "I cannot open Omniverse Launcher AppImage on Linux" You probably need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage. ??? question "OmniGibson is stuck at `HydraEngine rtx failed creating scene renderer.`" `OmniGibson` is likely using an unsupported GPU (default is id 0). Run `nvidia-smi` to see the active list of GPUs, and select an NVIDIA-supported GPU and set its corresponding ID when running `OmniGibson` with `export OMNIGIBSON_GPU_ID=<ID NUMBER>`.
9,239
Markdown
44.294117
337
0.613486
StanfordVL/OmniGibson/docs/getting_started/slurm.md
--- icon: material/server-network --- # 🔌 **Running on a SLURM cluster** _This documentation is a work in progress._ OmniGibson can be run on a SLURM cluster using the _enroot_ container software, which is a replacement for Docker that allows containers to be run as the current user rather than as root. _enroot_ needs to be installed on your SLURM cluster by an administrator. With enroot installed, you can follow the below steps to run OmniGibson on SLURM: 1. Download the dataset to a location that is accessible by cluster nodes. To do this, you can use the download_dataset.py script inside OmniGibson's scripts directory, and move it to the right spot later. In the below example, /cvgl/ is a networked drive that is accessible by the cluster nodes. **For Stanford users, this step is already done for SVL and Viscam nodes** ```{.shell .annotate} OMNIGIBSON_NO_OMNIVERSE=1 python scripts/download_dataset.py mv omnigibson/data /cvgl/group/Gibson/og-data-0-2-1 ``` 2. (Optional) Distribute the dataset to the individual nodes. This will make load times much better than reading from a network drive. To do this, run the below command on your SLURM head node (replace `svl` with your partition name and `cvgl` with your account name, as well as the paths with the respective network and local paths). Confirm via `squeue -u $USER` that all jobs have finished. **This step is already done for SVL and Viscam nodes** ```{.shell .annotate} sinfo -p svl -o "%N,%n" -h | \ sed s/,.*//g | \ xargs -L1 -I{} \ sbatch \ --account=cvgl --partition=svl --nodelist={} --mem=8G --cpus-per-task=4 \ --wrap 'cp -R /cvgl/group/Gibson/og-data-0-2-1 /scr-ssd/og-data-0-2-1' ``` 3. Download your desired image to a location that is accessible by the cluster nodes. (Replace the path with your own path, and feel free to replace `latest` with your desired branch tag). You have the option to mount code (meaning you don't need the container to come with all the code you want to run, just the right dependencies / environment setup) ```{.shell .annotate} enroot import --output /cvgl2/u/cgokmen/omnigibson.sqsh docker://stanfordvl/omnigibson:action-primitives ``` 4. (Optional) If you intend to mount code onto the container, make it available at a location that is accessible by the cluster nodes. You can mount arbitrary code, and you can also mount a custom version of OmniGibson (for the latter, you need to make sure you mount your copy of OmniGibson at /omnigibson-src inside the container). For example: ```{.shell .annotate} git clone https://github.com/StanfordVL/OmniGibson.git /cvgl2/u/cgokmen/OmniGibson ``` 5. Create your launch script. You can start with a copy of the script below. If you want to launch multiple workers, increase the job array option. You should keep the setting at at least 1 GPU per node, but can feel free to edit other settings. You can mount any additional code as you'd like, and you can change the entrypoint such that the container runs your mounted code upon launch. See the mounts section for an example. A copy of this script can be found in docker/sbatch_example.sh ```{.shell .annotate} #!/usr/bin/env bash #SBATCH --account=cvgl #SBATCH --partition=svl --qos=normal #SBATCH --nodes=1 #SBATCH --cpus-per-task=8 #SBATCH --mem=30G #SBATCH --gres=gpu:2080ti:1 IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh" GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1) ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}" # Define env kwargs to pass declare -A ENVS=( [NVIDIA_DRIVER_CAPABILITIES]=all [NVIDIA_VISIBLE_DEVICES]=0 [DISPLAY]="" [OMNIGIBSON_HEADLESS]=1 ) for env_var in "${!ENVS[@]}"; do # Add to env kwargs we'll pass to enroot command later ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}" done # Define mounts to create (maps local directory to container directory) declare -A MOUNTS=( [/scr-ssd/og-data-0-2-1]=/data [${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit [${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov [${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip [${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache [${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache [${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs [${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config [${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data [${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents # Feel free to include lines like the below to mount a workspace or a custom OG version # [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src # [/cvgl2/u/cgokmen/my-project]=/my-project ) MOUNT_KWARGS="" for mount in "${!MOUNTS[@]}"; do # Verify mount path in local directory exists, otherwise, create it if [ ! -e "$mount" ]; then mkdir -p ${mount} fi # Add to mount kwargs we'll pass to enroot command later MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}" done # Create the image if it doesn't already exist CONTAINER_NAME=omnigibson_${GPU_ID} enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH} # Remove leading space in string ENV_KWARGS="${ENV_KWARGS:1}" MOUNT_KWARGS="${MOUNT_KWARGS:1}" # The last line here is the command you want to run inside the container. # Here I'm running some unit tests. enroot start \ --root \ --rw \ ${ENV_KWARGS} \ ${MOUNT_KWARGS} \ ${CONTAINER_NAME} \ source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py # Clean up the image if possible. enroot remove -f ${CONTAINER_NAME} ``` 6. Launch your job using `sbatch your_script.sh` - and profit!
5,782
Markdown
46.01626
490
0.717226
lucasapchagas/Omniverse/README.md
# OmniVerse API 🌌 OmniVerse API is a straightforward API that provides access only to the basic CRUD concept routes, enabling efficient and consistent data manipulation. Our API uses the ViaCEP API, a well-known API that returns the data of a specific address based on the provided postal code (CEP). ## Setup 🔧 OmniVerse API is an API built on top of the Java Spring Boot framework, designed to be easily installed and deployed. For an easy setup, you'll need a MySQL server, but the API itself is prepared to accept any DB you want. Follow [MySQL Documentation](https://dev.mysql.com/doc/mysql-getting-started/en) link in order to setup a working server. 1. First thing you'll need after your MySQL server is running is to setup the API to be able to connect to it. You'll need to modify [**application.properties**](https://github.com/lucasapchagas/Omniverse/blob/main/src/main/resources/application.properties) file to your own needs. - `spring.datasource.url`, you must provide your MySQL server url. - `spring.datasource.username`, you must provide your MySQL server username. - `spring.datasource.password`, you must provide your MySQL server password. ❗**If you provide an url for a database which is not previously created the API will not start. Use `CREATE database <db_name>;` in order to properly create it.** 2. Building it 🔨 To build the project, you need to have Java 17 installed, but you can easily change the version by modifying the application's [**pom.xml**](https://github.com/lucasapchagas/Omniverse/blob/main/pom.xml) file. The project uses Maven as the build platform, which brings all the conveniences of Maven. - You can build it just by running `./mvnw pacakge` in the project root folder, the target file will be generated at `/target/` folder. 3. Using it 😯 Utilizing the API is as simple as modifying, understanding, and building it. Given that Java runs on the JVM, deploying the API becomes effortless—simply run the compiled JAR on any cloud service. - You can just use a [RELEASE](https://github.com/lucasapchagas/Omniverse/releases/tag/RELEASE) instead of compiling it. Please, always use the latest one. - In order to run it you must use the following command `java -jar OmniVerse-0.0.1-SNAPSHOT.jar`. By default it will try to open the api to [`http://localhost:8080/`](http://localhost:8080/). - Use the OmniverseCLI to test the API. https://github.com/lucasapchagas/OmniverseCLI ## Features 🪶 - Uses **viacep api** in order to register users address. - Migrations with flyway library. - Data validation with spring boot data validation. - JPA design pattern. ## API Usage 🍪 The OmniVerse API is user-friendly and comprises only 5 possible routes that align with the CRUD standard. You can use popular API testing tools like Insomnia. We have created a configuration that can be accessed on pastebin by [clicking here](https://pastebin.com/f1rBDfZP). Import it into your Insomnia to streamline your testing process. ### What is an user? Example: ```json { "id": 8, "name": "Lucas", "email": "[email protected]", "address": { "cep": "69050500", "place": "Rua Peru", "complement": "", "neighborhood": "Parque 10 de Novembro", "locality": "Manaus", "uf": "AM" } } ``` #### Register a user ```http POST /user ``` | Parameter | Type | Description | | :---------- | :--------- | :---------------------------------- | | `name` | `string` | User name | | `email` | `string` | Valid email | | `cep` | `string` | Valid cep, just numbers. | #### Returns an user ```http GET /user/{id} ``` #### Returns all users ```http GET /user ``` #### Delete a user ```http DELETE /user/{id} ``` #### Update a user Just the field you want to modify is needed as a Parameter. User id is a **must have**. ```http PUT /user ``` | Parameter | Type | Description | | :---------- | :--------- | :---------------------------------- | | `id` | `int` | User id| | `name` | `string` | User name | | `email` | `string` | Valid email | | `cep` | `string` | Valid cep, just numbers. | ## Roadmap - [x] Implement JPA pattern. - [x] Usage of **ViaCEP API** in order to generate user's adress. - [x] Implement Flyway migrations to our database. - [x] Implement Spring boot data validation. - [ ] Implement Spring boot security module. - [ ] Implement JSON Web Token usage.
4,499
Markdown
35.290322
302
0.673038
Toni-SM/skrl/pyproject.toml
[project] name = "skrl" version = "1.1.0" description = "Modular and flexible library for reinforcement learning on PyTorch and JAX" readme = "README.md" requires-python = ">=3.6" license = {text = "MIT License"} authors = [ {name = "Toni-SM"}, ] maintainers = [ {name = "Toni-SM"}, ] keywords = ["reinforcement-learning", "machine-learning", "reinforcement", "machine", "learning", "rl"] classifiers = [ "License :: OSI Approved :: MIT License", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python :: 3", "Operating System :: OS Independent", ] # dependencies / optional-dependencies dependencies = [ "gym", "gymnasium", "tqdm", "packaging", "tensorboard", ] [project.optional-dependencies] torch = [ "torch>=1.9", ] jax = [ "jax>=0.4.3", "jaxlib>=0.4.3", "flax", "optax", ] all = [ "torch>=1.9", "jax>=0.4.3", "jaxlib>=0.4.3", "flax", "optax", ] # urls [project.urls] "Homepage" = "https://github.com/Toni-SM/skrl" "Documentation" = "https://skrl.readthedocs.io" "Discussions" = "https://github.com/Toni-SM/skrl/discussions" "Bug Reports" = "https://github.com/Toni-SM/skrl/issues" "Say Thanks!" = "https://github.com/Toni-SM" "Source" = "https://github.com/Toni-SM/skrl" [tool.yapf] # run: yapf -p -m -i -r <folder> based_on_style = "pep8" blank_line_before_nested_class_or_def = false blank_lines_between_top_level_imports_and_variables = 2 column_limit = 120 join_multiple_lines = false space_between_ending_comma_and_closing_bracket = false spaces_around_power_operator = true split_all_top_level_comma_separated_values = true split_before_arithmetic_operator = true split_before_dict_set_generator = false split_before_dot = true split_complex_comprehension = true coalesce_brackets = true [tool.codespell] # run: codespell <folder> skip = "./docs/_build,./docs/source/_static" quiet-level = 3 count = "" [tool.isort] use_parentheses = false line_length = 120 multi_line_output = 3 lines_after_imports = 2 known_annotation = ["typing"] known_framework = [ "torch", "jax", "jaxlib", "flax", "optax", "numpy", ] sections = [ "FUTURE", "ANNOTATION", "STDLIB", "THIRDPARTY", "FRAMEWORK", "FIRSTPARTY", "LOCALFOLDER", ] no_lines_before = "THIRDPARTY" skip = ["docs"]
2,365
TOML
21.112149
103
0.671036
Toni-SM/skrl/CONTRIBUTING.md
First of all, **thank you**... For what? Because you are dedicating some time to reading these guidelines and possibly thinking about contributing <hr> ### I just want to ask a question! If you have a question, please do not open an issue for this. Instead, use the following resources for it (you will get a faster response): - [skrl's GitHub discussions](https://github.com/Toni-SM/skrl/discussions), a place to ask questions and discuss about the project - [Isaac Gym's forum](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/isaac-gym/322), a place to post your questions, find past answers, or just chat with other members of the community about Isaac Gym topics - [Omniverse Isaac Sim's forum](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/simulation/69), a place to post your questions, find past answers, or just chat with other members of the community about Omniverse Isaac Sim/Gym topics ### I have found a (good) bug. What can I do? Open an issue on [skrl's GitHub issues](https://github.com/Toni-SM/skrl/issues) and describe the bug. If possible, please provide some of the following items: - Minimum code that reproduces the bug... - or the exact steps to reproduce it - The error log or a screenshot of it - A link to the source code of the library that you are using (some problems may be due to the use of older versions. If possible, always use the latest version) - Any other information that you think may be useful or help to reproduce/describe the problem ### I want to contribute, but I don't know how There is a [board](https://github.com/users/Toni-SM/projects/2/views/8) containing relevant future implementations which can be a good starting place to identify contributions. Please consider the following points #### Notes about contributing - Try to **communicate your change first** to [discuss](https://github.com/Toni-SM/skrl/discussions) the implementation if you want to add a new feature or change an existing one - Modify only the minimum amount of code required and the files needed to make the change - Use the provided [pre-commit](https://pre-commit.com/) hooks to format the code. Install it by running `pre-commit install` in the root of the repository, running it periodically using `pre-commit run --all` helps reducing commit errors - Changes that are cosmetic in nature (code formatting, removing whitespace, etc.) or that correct grammatical, spelling or typo errors, and that do not add anything substantial to the functionality of the library will generally not be accepted as a pull request - The only exception are changes that results from the use of the pre-commit hooks #### Coding conventions **skrl** is designed with a focus on modularity, readability, simplicity and transparency of algorithm implementation. The file system structure groups components according to their functionality. Library components only inherit (and must inherit) from a single base class (no multilevel or multiple inheritance) that provides a uniform interface and implements common functionality that is not tied to the implementation details of the algorithms Read the code a little bit and you will understand it at first glance... Also - Use 4 indentation spaces - Follow, as much as possible, the PEP8 Style Guide for Python code - Document each module, class, function or method using the reStructuredText format - Annotate all functions, both for the parameters and for the return value - Follow the commit message style guide for Git described in https://commit.style - Capitalize (the first letter) and omit any trailing punctuation - Write it in the imperative tense - Aim for about 50 (or 72) characters - Add import statements at the top of each module as follows: ```ini function annotation (e.g. typing) # insert an empty line python libraries and other libraries (e.g. gym, numpy, time, etc.) # insert an empty line machine learning framework modules (e.g. torch, torch.nn) # insert an empty line skrl components ``` <hr> Thank you once again, Toni
4,086
Markdown
58.231883
447
0.773128
Toni-SM/skrl/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [1.1.0] - 2024-02-12 ### Added - MultiCategorical mixin to operate MultiDiscrete action spaces ### Changed (breaking changes) - Rename the `ManualTrainer` to `StepTrainer` - Output training/evaluation progress messages to system's stdout - Get single observation/action spaces for vectorized environments - Update Isaac Orbit environment wrapper ## [1.0.0] - 2023-08-16 Transition from pre-release versions (`1.0.0-rc.1` and`1.0.0-rc.2`) to a stable version. This release also announces the publication of the **skrl** paper in the Journal of Machine Learning Research (JMLR): https://www.jmlr.org/papers/v24/23-0112.html Summary of the most relevant features: - JAX support - New documentation theme and structure - Multi-agent Reinforcement Learning (MARL) ## [1.0.0-rc.2] - 2023-08-11 ### Added - Get truncation from `time_outs` info in Isaac Gym, Isaac Orbit and Omniverse Isaac Gym environments - Time-limit (truncation) boostrapping in on-policy actor-critic agents - Model instantiators `initial_log_std` parameter to set the log standard deviation's initial value ### Changed (breaking changes) - Structure environment loaders and wrappers file hierarchy coherently Import statements now follow the next convention: - Wrappers (e.g.): - `from skrl.envs.wrappers.torch import wrap_env` - `from skrl.envs.wrappers.jax import wrap_env` - Loaders (e.g.): - `from skrl.envs.loaders.torch import load_omniverse_isaacgym_env` - `from skrl.envs.loaders.jax import load_omniverse_isaacgym_env` ### Changed - Drop support for versions prior to PyTorch 1.9 (1.8.0 and 1.8.1) ## [1.0.0-rc.1] - 2023-07-25 ### Added - JAX support (with Flax and Optax) - RPO agent - IPPO and MAPPO multi-agent - Multi-agent base class - Bi-DexHands environment loader - Wrapper for PettingZoo and Bi-DexHands environments - Parameters `num_envs`, `headless` and `cli_args` for configuring Isaac Gym, Isaac Orbit and Omniverse Isaac Gym environments when they are loaded ### Changed - Migrate to `pyproject.toml` Python package development - Define ML framework dependencies as optional dependencies in the library installer - Move agent implementations with recurrent models to a separate file - Allow closing the environment at the end of execution instead of after training/evaluation - Documentation theme from *sphinx_rtd_theme* to *furo* - Update documentation structure and examples ### Fixed - Compatibility for Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier) - Disable PyTorch gradient computation during the environment stepping - Get categorical models' entropy - Typo in `KLAdaptiveLR` learning rate scheduler (keep the old name for compatibility with the examples of previous versions. The old name will be removed in future releases) ## [0.10.2] - 2023-03-23 ### Changed - Update loader and utils for OmniIsaacGymEnvs 2022.2.1.0 - Update Omniverse Isaac Gym real-world examples ## [0.10.1] - 2023-01-26 ### Fixed - Tensorboard writer instantiation when `write_interval` is zero ## [0.10.0] - 2023-01-22 ### Added - Isaac Orbit environment loader - Wrap an Isaac Orbit environment - Gaussian-Deterministic shared model instantiator ## [0.9.1] - 2023-01-17 ### Added - Utility for downloading models from Hugging Face Hub ### Fixed - Initialization of agent components if they have not been defined - Manual trainer `train`/`eval` method default arguments ## [0.9.0] - 2023-01-13 ### Added - Support for Farama Gymnasium interface - Wrapper for robosuite environments - Weights & Biases integration - Set the running mode (training or evaluation) of the agents - Allow clipping the gradient norm for DDPG, TD3 and SAC agents - Initialize model biases - Add RNN (RNN, LSTM, GRU and any other variant) support for A2C, DDPG, PPO, SAC, TD3 and TRPO agents - Allow disabling training/evaluation progressbar - Farama Shimmy and robosuite examples - KUKA LBR iiwa real-world example ### Changed (breaking changes) - Forward model inputs as a Python dictionary - Returns a Python dictionary with extra output values in model calls ### Changed - Adopt the implementation of `terminated` and `truncated` over `done` for all environments ### Fixed - Omniverse Isaac Gym simulation speed for the Franka Emika real-world example - Call agents' method `record_transition` instead of parent method to allow storing samples in memories during evaluation - Move TRPO policy optimization out of the value optimization loop - Access to the categorical model distribution - Call reset only once for Gym/Gymnasium vectorized environments ### Removed - Deprecated method `start` in trainers ## [0.8.0] - 2022-10-03 ### Added - AMP agent for physics-based character animation - Manual trainer - Gaussian model mixin - Support for creating shared models - Parameter `role` to model methods - Wrapper compatibility with the new OpenAI Gym environment API - Internal library colored logger - Migrate checkpoints/models from other RL libraries to skrl models/agents - Configuration parameter `store_separately` to agent configuration dict - Save/load agent modules (models, optimizers, preprocessors) - Set random seed and configure deterministic behavior for reproducibility - Benchmark results for Isaac Gym and Omniverse Isaac Gym on the GitHub discussion page - Franka Emika real-world example ### Changed (breaking changes) - Models implementation as Python mixin ### Changed - Multivariate Gaussian model (`GaussianModel` until 0.7.0) to `MultivariateGaussianMixin` - Trainer's `cfg` parameter position and default values - Show training/evaluation display progress using `tqdm` - Update Isaac Gym and Omniverse Isaac Gym examples ### Fixed - Missing recursive arguments during model weights initialization - Tensor dimension when computing preprocessor parallel variance - Models' clip tensors dtype to `float32` ### Removed - Parameter `inference` from model methods - Configuration parameter `checkpoint_policy_only` from agent configuration dict ## [0.7.0] - 2022-07-11 ### Added - A2C agent - Isaac Gym (preview 4) environment loader - Wrap an Isaac Gym (preview 4) environment - Support for OpenAI Gym vectorized environments - Running standard scaler for input preprocessing - Installation from PyPI (`pip install skrl`) ## [0.6.0] - 2022-06-09 ### Added - Omniverse Isaac Gym environment loader - Wrap an Omniverse Isaac Gym environment - Save best models during training ## [0.5.0] - 2022-05-18 ### Added - TRPO agent - DeepMind environment wrapper - KL Adaptive learning rate scheduler - Handle `gym.spaces.Dict` observation spaces (OpenAI Gym and DeepMind environments) - Forward environment info to agent `record_transition` method - Expose and document the random seeding mechanism - Define rewards shaping function in agents' config - Define learning rate scheduler in agents' config - Improve agent's algorithm description in documentation (PPO and TRPO at the moment) ### Changed - Compute the Generalized Advantage Estimation (GAE) in agent `_update` method - Move noises definition to `resources` folder - Update the Isaac Gym examples ### Removed - `compute_functions` for computing the GAE from memory base class ## [0.4.1] - 2022-03-22 ### Added - Examples of all Isaac Gym environments (preview 3) - Tensorboard file iterator for data post-processing ### Fixed - Init and evaluate agents in ParallelTrainer ## [0.4.0] - 2022-03-09 ### Added - CEM, SARSA and Q-learning agents - Tabular model - Parallel training using multiprocessing - Isaac Gym utilities ### Changed - Initialize agents in a separate method - Change the name of the `networks` argument to `models` ### Fixed - Reset environments after post-processing ## [0.3.0] - 2022-02-07 ### Added - DQN and DDQN agents - Export memory to files - Postprocessing utility to iterate over memory files - Model instantiator utility to allow fast development - More examples and contents in the documentation ### Fixed - Clip actions using the whole space's limits ## [0.2.0] - 2022-01-18 ### Added - First official release
8,132
Markdown
34.207792
162
0.764142
Toni-SM/skrl/README.md
[![pypi](https://img.shields.io/pypi/v/skrl)](https://pypi.org/project/skrl) [<img src="https://img.shields.io/badge/%F0%9F%A4%97%20models-hugging%20face-F8D521">](https://huggingface.co/skrl) ![discussions](https://img.shields.io/github/discussions/Toni-SM/skrl) <br> [![license](https://img.shields.io/github/license/Toni-SM/skrl)](https://github.com/Toni-SM/skrl) <span>&nbsp;&nbsp;&nbsp;&nbsp;</span> [![docs](https://readthedocs.org/projects/skrl/badge/?version=latest)](https://skrl.readthedocs.io/en/latest/?badge=latest) [![pytest](https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml/badge.svg)](https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml) [![pre-commit](https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml/badge.svg)](https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml) <br> <p align="center"> <a href="https://skrl.readthedocs.io"> <img width="300rem" src="https://raw.githubusercontent.com/Toni-SM/skrl/main/docs/source/_static/data/logo-light-mode.png"> </a> </p> <h2 align="center" style="border-bottom: 0 !important;">SKRL - Reinforcement Learning library</h2> <br> **skrl** is an open-source modular library for Reinforcement Learning written in Python (on top of [PyTorch](https://pytorch.org/) and [JAX](https://jax.readthedocs.io)) and designed with a focus on modularity, readability, simplicity, and transparency of algorithm implementation. In addition to supporting the OpenAI [Gym](https://www.gymlibrary.dev) / Farama [Gymnasium](https://gymnasium.farama.org) and [DeepMind](https://github.com/deepmind/dm_env) and other environment interfaces, it allows loading and configuring [NVIDIA Isaac Gym](https://developer.nvidia.com/isaac-gym/), [NVIDIA Isaac Orbit](https://isaac-orbit.github.io/orbit/index.html) and [NVIDIA Omniverse Isaac Gym](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html) environments, enabling agents' simultaneous training by scopes (subsets of environments among all available environments), which may or may not share resources, in the same run. <br> ### Please, visit the documentation for usage details and examples <strong>https://skrl.readthedocs.io</strong> <br> > **Note:** This project is under **active continuous development**. Please make sure you always have the latest version. Visit the [develop](https://github.com/Toni-SM/skrl/tree/develop) branch or its [documentation](https://skrl.readthedocs.io/en/develop) to access the latest updates to be released. <br> ### Citing this library To cite this library in publications, please use the following reference: ```bibtex @article{serrano2023skrl, author = {Antonio Serrano-Muñoz and Dimitrios Chrysostomou and Simon Bøgh and Nestor Arana-Arexolaleiba}, title = {skrl: Modular and Flexible Library for Reinforcement Learning}, journal = {Journal of Machine Learning Research}, year = {2023}, volume = {24}, number = {254}, pages = {1--9}, url = {http://jmlr.org/papers/v24/23-0112.html} } ```
3,043
Markdown
59.879999
942
0.744003
Toni-SM/skrl/skrl/__init__.py
from typing import Union import logging import sys import numpy as np __all__ = ["__version__", "logger", "config"] # read library version from metadata try: import importlib.metadata __version__ = importlib.metadata.version("skrl") except ImportError: __version__ = "unknown" # logger with format class _Formatter(logging.Formatter): _format = "[%(name)s:%(levelname)s] %(message)s" _formats = {logging.DEBUG: f"\x1b[38;20m{_format}\x1b[0m", logging.INFO: f"\x1b[38;20m{_format}\x1b[0m", logging.WARNING: f"\x1b[33;20m{_format}\x1b[0m", logging.ERROR: f"\x1b[31;20m{_format}\x1b[0m", logging.CRITICAL: f"\x1b[31;1m{_format}\x1b[0m"} def format(self, record): return logging.Formatter(self._formats.get(record.levelno)).format(record) _handler = logging.StreamHandler() _handler.setLevel(logging.DEBUG) _handler.setFormatter(_Formatter()) logger = logging.getLogger("skrl") logger.setLevel(logging.DEBUG) logger.addHandler(_handler) # machine learning framework configuration class _Config(object): def __init__(self) -> None: """Machine learning framework specific configuration """ class JAX(object): def __init__(self) -> None: """JAX configuration """ self._backend = "numpy" self._key = np.array([0, 0], dtype=np.uint32) @property def backend(self) -> str: """Backend used by the different components to operate and generate arrays This configuration excludes models and optimizers. Supported backend are: ``"numpy"`` and ``"jax"`` """ return self._backend @backend.setter def backend(self, value: str) -> None: if value not in ["numpy", "jax"]: raise ValueError("Invalid jax backend. Supported values are: numpy, jax") self._backend = value @property def key(self) -> "jax.Array": """Pseudo-random number generator (PRNG) key """ if isinstance(self._key, np.ndarray): try: import jax self._key = jax.random.PRNGKey(self._key[1]) except ImportError: pass return self._key @key.setter def key(self, value: Union[int, "jax.Array"]) -> None: if type(value) is int: # don't import JAX if it has not been imported before if "jax" in sys.modules: import jax value = jax.random.PRNGKey(value) else: value = np.array([0, value], dtype=np.uint32) self._key = value self.jax = JAX() config = _Config()
2,993
Python
30.851064
93
0.529569
Toni-SM/skrl/skrl/envs/jax.py
# TODO: Delete this file in future releases from skrl import logger # isort: skip logger.warning("Using `from skrl.envs.jax import ...` is deprecated and will be removed in future versions.") logger.warning(" - Import loaders using `from skrl.envs.loaders.jax import ...`") logger.warning(" - Import wrappers using `from skrl.envs.wrappers.jax import ...`") from skrl.envs.loaders.jax import ( load_bidexhands_env, load_isaac_orbit_env, load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4, load_omniverse_isaacgym_env ) from skrl.envs.wrappers.jax import MultiAgentEnvWrapper, Wrapper, wrap_env
654
Python
35.388887
109
0.740061
Toni-SM/skrl/skrl/envs/torch.py
# TODO: Delete this file in future releases from skrl import logger # isort: skip logger.warning("Using `from skrl.envs.torch import ...` is deprecated and will be removed in future versions.") logger.warning(" - Import loaders using `from skrl.envs.loaders.torch import ...`") logger.warning(" - Import wrappers using `from skrl.envs.wrappers.torch import ...`") from skrl.envs.loaders.torch import ( load_bidexhands_env, load_isaac_orbit_env, load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4, load_omniverse_isaacgym_env ) from skrl.envs.wrappers.torch import MultiAgentEnvWrapper, Wrapper, wrap_env
664
Python
35.944442
111
0.743976
Toni-SM/skrl/skrl/envs/loaders/torch/bidexhands_envs.py
from typing import Optional, Sequence import os import sys from contextlib import contextmanager from skrl import logger __all__ = ["load_bidexhands_env"] @contextmanager def cwd(new_path: str) -> None: """Context manager to change the current working directory This function restores the current working directory after the context manager exits :param new_path: The new path to change to :type new_path: str """ current_path = os.getcwd() os.chdir(new_path) try: yield finally: os.chdir(current_path) def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_bidexhands_env(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], bidexhands_path: str = "", show_cfg: bool = True): """Load a Bi-DexHands environment :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``--task TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: Isaac Gym environment configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param bidexhands_path: The path to the ``bidexhands`` directory (default: ``""``). If empty, the path will obtained from bidexhands package metadata :type bidexhands_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The bidexhands package is not installed or the path is wrong :return: Bi-DexHands environment (preview 4) :rtype: isaacgymenvs.tasks.base.vec_task.VecTask """ import isaacgym # isort:skip import bidexhands # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("--task"): defined = True break # get task name from command line arguments if defined: arg_index = sys.argv.index("--task") + 1 if arg_index >= len(sys.argv): raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") if task_name and task_name != sys.argv[arg_index]: logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})") # get task name from function arguments else: if task_name: sys.argv.append("--task") sys.argv.append(task_name) else: raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("--num_envs"): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None: logger.warning("Overriding num_envs with command line argument --num_envs") # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append("--num_envs") sys.argv.append(str(num_envs)) # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("--headless"): defined = True break # get headless from command line arguments if defined: if headless is not None: logger.warning("Overriding headless with command line argument --headless") # get headless from function arguments elif headless is not None: sys.argv.append("--headless") # others command line arguments sys.argv += cli_args # get bidexhands path from bidexhands package metadata if not bidexhands_path: if not hasattr(bidexhands, "__path__"): raise RuntimeError("bidexhands package is not installed") path = list(bidexhands.__path__)[0] else: path = bidexhands_path sys.path.append(path) status = True try: from utils.config import get_args, load_cfg, parse_sim_params # type: ignore from utils.parse_task import parse_task # type: ignore from utils.process_marl import get_AgentIndex # type: ignore except Exception as e: status = False logger.error(f"Failed to import required packages: {e}") if not status: raise RuntimeError(f"The path ({path}) is not valid") args = get_args() # print config if show_cfg: print(f"\nBi-DexHands environment ({args.task})") _print_cfg(vars(args)) # update task arguments args.task_type = "MultiAgent" # TODO: get from parameters args.cfg_train = os.path.join(path, args.cfg_train) args.cfg_env = os.path.join(path, args.cfg_env) # load environment with cwd(path): cfg, cfg_train, _ = load_cfg(args) agent_index = get_AgentIndex(cfg) sim_params = parse_sim_params(args, cfg, cfg_train) task, env = parse_task(args, cfg, cfg_train, sim_params, agent_index) return env
6,552
Python
36.445714
132
0.628205
Toni-SM/skrl/skrl/envs/loaders/torch/__init__.py
from skrl.envs.loaders.torch.bidexhands_envs import load_bidexhands_env from skrl.envs.loaders.torch.isaac_orbit_envs import load_isaac_orbit_env from skrl.envs.loaders.torch.isaacgym_envs import ( load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4 ) from skrl.envs.loaders.torch.omniverse_isaacgym_envs import load_omniverse_isaacgym_env
383
Python
41.666662
87
0.804178
Toni-SM/skrl/skrl/envs/loaders/torch/isaacgym_envs.py
from typing import Optional, Sequence import os import sys from contextlib import contextmanager from skrl import logger __all__ = ["load_isaacgym_env_preview2", "load_isaacgym_env_preview3", "load_isaacgym_env_preview4"] @contextmanager def cwd(new_path: str) -> None: """Context manager to change the current working directory This function restores the current working directory after the context manager exits :param new_path: The new path to change to :type new_path: str """ current_path = os.getcwd() os.chdir(new_path) try: yield finally: os.chdir(current_path) def _omegaconf_to_dict(config) -> dict: """Convert OmegaConf config to dict :param config: The OmegaConf config :type config: OmegaConf.Config :return: The config as dict :rtype: dict """ # return config.to_container(dict) from omegaconf import DictConfig d = {} for k, v in config.items(): d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v return d def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_isaacgym_env_preview2(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], isaacgymenvs_path: str = "", show_cfg: bool = True): """Load an Isaac Gym environment (preview 2) :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``--task TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: Isaac Gym environment configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param isaacgymenvs_path: The path to the ``rlgpu`` directory (default: ``""``). If empty, the path will obtained from isaacgym package metadata :type isaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The isaacgym package is not installed or the path is wrong :return: Isaac Gym environment (preview 2) :rtype: tasks.base.vec_task.VecTask """ import isaacgym # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("--task"): defined = True break # get task name from command line arguments if defined: arg_index = sys.argv.index("--task") + 1 if arg_index >= len(sys.argv): raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") if task_name and task_name != sys.argv[arg_index]: logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})") # get task name from function arguments else: if task_name: sys.argv.append("--task") sys.argv.append(task_name) else: raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("--num_envs"): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None: logger.warning("Overriding num_envs with command line argument --num_envs") # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append("--num_envs") sys.argv.append(str(num_envs)) # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("--headless"): defined = True break # get headless from command line arguments if defined: if headless is not None: logger.warning("Overriding headless with command line argument --headless") # get headless from function arguments elif headless is not None: sys.argv.append("--headless") # others command line arguments sys.argv += cli_args # get isaacgym envs path from isaacgym package metadata if not isaacgymenvs_path: if not hasattr(isaacgym, "__path__"): raise RuntimeError("isaacgym package is not installed or could not be accessed by the current Python environment") path = isaacgym.__path__ path = os.path.join(path[0], "..", "rlgpu") else: path = isaacgymenvs_path # import required packages sys.path.append(path) status = True try: from utils.config import get_args, load_cfg, parse_sim_params # type: ignore from utils.parse_task import parse_task # type: ignore except Exception as e: status = False logger.error(f"Failed to import required packages: {e}") if not status: raise RuntimeError(f"Path ({path}) is not valid or the isaacgym package is not installed in editable mode (pip install -e .)") args = get_args() # print config if show_cfg: print(f"\nIsaac Gym environment ({args.task})") _print_cfg(vars(args)) # update task arguments args.cfg_train = os.path.join(path, args.cfg_train) args.cfg_env = os.path.join(path, args.cfg_env) # load environment with cwd(path): cfg, cfg_train, _ = load_cfg(args) sim_params = parse_sim_params(args, cfg, cfg_train) task, env = parse_task(args, cfg, cfg_train, sim_params) return env def load_isaacgym_env_preview3(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], isaacgymenvs_path: str = "", show_cfg: bool = True): """Load an Isaac Gym environment (preview 3) Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: IsaacGymEnvs configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param isaacgymenvs_path: The path to the ``isaacgymenvs`` directory (default: ``""``). If empty, the path will obtained from isaacgymenvs package metadata :type isaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The isaacgymenvs package is not installed or the path is wrong :return: Isaac Gym environment (preview 3) :rtype: isaacgymenvs.tasks.base.vec_task.VecTask """ import isaacgym import isaacgymenvs from hydra._internal.hydra import Hydra from hydra._internal.utils import create_automatic_config_search_path, get_args_parser from hydra.types import RunMode from omegaconf import OmegaConf # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("task="): defined = True break # get task name from command line arguments if defined: if task_name and task_name != arg.split("task=")[1].split(" ")[0]: logger.warning("Overriding task name ({}) with command line argument ({})" \ .format(task_name, arg.split("task=")[1].split(" ")[0])) # get task name from function arguments else: if task_name: sys.argv.append(f"task={task_name}") else: raise ValueError("No task name defined. Set task_name parameter or use task=<task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("num_envs="): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None and num_envs != int(arg.split("num_envs=")[1].split(" ")[0]): logger.warning("Overriding num_envs ({}) with command line argument (num_envs={})" \ .format(num_envs, arg.split("num_envs=")[1].split(" ")[0])) # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append(f"num_envs={num_envs}") # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("headless="): defined = True break # get headless from command line arguments if defined: if headless is not None and str(headless).lower() != arg.split("headless=")[1].split(" ")[0].lower(): logger.warning("Overriding headless ({}) with command line argument (headless={})" \ .format(headless, arg.split("headless=")[1].split(" ")[0])) # get headless from function arguments elif headless is not None: sys.argv.append(f"headless={headless}") # others command line arguments sys.argv += cli_args # get isaacgymenvs path from isaacgymenvs package metadata if isaacgymenvs_path == "": if not hasattr(isaacgymenvs, "__path__"): raise RuntimeError("isaacgymenvs package is not installed") isaacgymenvs_path = list(isaacgymenvs.__path__)[0] config_path = os.path.join(isaacgymenvs_path, "cfg") # set omegaconf resolvers try: OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower()) except Exception as e: pass try: OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) except Exception as e: pass try: OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b) except Exception as e: pass try: OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg) except Exception as e: pass # get hydra config without use @hydra.main config_file = "config" args = get_args_parser().parse_args() search_path = create_automatic_config_search_path(config_file, None, config_path) hydra_object = Hydra.create_main_hydra2(task_name='load_isaacgymenv', config_search_path=search_path) config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN) cfg = _omegaconf_to_dict(config.task) # print config if show_cfg: print(f"\nIsaac Gym environment ({config.task.name})") _print_cfg(cfg) # load environment sys.path.append(isaacgymenvs_path) from tasks import isaacgym_task_map # type: ignore try: env = isaacgym_task_map[config.task.name](cfg=cfg, sim_device=config.sim_device, graphics_device_id=config.graphics_device_id, headless=config.headless) except TypeError as e: env = isaacgym_task_map[config.task.name](cfg=cfg, rl_device=config.rl_device, sim_device=config.sim_device, graphics_device_id=config.graphics_device_id, headless=config.headless, virtual_screen_capture=config.capture_video, # TODO: check force_render=config.force_render) return env def load_isaacgym_env_preview4(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], isaacgymenvs_path: str = "", show_cfg: bool = True): """Load an Isaac Gym environment (preview 4) Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: IsaacGymEnvs configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param isaacgymenvs_path: The path to the ``isaacgymenvs`` directory (default: ``""``). If empty, the path will obtained from isaacgymenvs package metadata :type isaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The isaacgymenvs package is not installed or the path is wrong :return: Isaac Gym environment (preview 4) :rtype: isaacgymenvs.tasks.base.vec_task.VecTask """ return load_isaacgym_env_preview3(task_name, num_envs, headless, cli_args, isaacgymenvs_path, show_cfg)
16,639
Python
42.446475
134
0.615241
Toni-SM/skrl/skrl/envs/loaders/torch/isaac_orbit_envs.py
from typing import Optional, Sequence import os import sys from skrl import logger __all__ = ["load_isaac_orbit_env"] def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_isaac_orbit_env(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], show_cfg: bool = True): """Load an Isaac Orbit environment Isaac Orbit: https://isaac-orbit.github.io/orbit/index.html This function includes the definition and parsing of command line arguments used by Isaac Orbit: - ``--headless``: Force display off at all times - ``--cpu``: Use CPU pipeline - ``--num_envs``: Number of environments to simulate - ``--task``: Name of the task - ``--num_envs``: Seed used for the environment :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``--task TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: Isaac Orbit configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :return: Isaac Orbit environment :rtype: gym.Env """ import argparse import atexit import gym # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("--task"): defined = True break # get task name from command line arguments if defined: arg_index = sys.argv.index("--task") + 1 if arg_index >= len(sys.argv): raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") if task_name and task_name != sys.argv[arg_index]: logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})") # get task name from function arguments else: if task_name: sys.argv.append("--task") sys.argv.append(task_name) else: raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("--num_envs"): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None: logger.warning("Overriding num_envs with command line argument (--num_envs)") # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append("--num_envs") sys.argv.append(str(num_envs)) # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("--headless"): defined = True break # get headless from command line arguments if defined: if headless is not None: logger.warning("Overriding headless with command line argument (--headless)") # get headless from function arguments elif headless is not None: sys.argv.append("--headless") # others command line arguments sys.argv += cli_args # parse arguments parser = argparse.ArgumentParser("Welcome to Orbit: Omniverse Robotics Environments!") parser.add_argument("--headless", action="store_true", default=False, help="Force display off at all times.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") args = parser.parse_args() # load the most efficient kit configuration in headless mode if args.headless: app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.gym.headless.kit" else: app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit" # launch the simulator from omni.isaac.kit import SimulationApp # type: ignore config = {"headless": args.headless} simulation_app = SimulationApp(config, experience=app_experience) @atexit.register def close_the_simulator(): simulation_app.close() # import orbit extensions import omni.isaac.contrib_envs # type: ignore import omni.isaac.orbit_envs # type: ignore from omni.isaac.orbit_envs.utils import parse_env_cfg # type: ignore cfg = parse_env_cfg(args.task, use_gpu=not args.cpu, num_envs=args.num_envs) # print config if show_cfg: print(f"\nIsaac Orbit environment ({args.task})") try: _print_cfg(cfg) except AttributeError as e: pass # load environment env = gym.make(args.task, cfg=cfg, headless=args.headless) return env
6,481
Python
37.814371
132
0.636013
Toni-SM/skrl/skrl/envs/loaders/torch/omniverse_isaacgym_envs.py
from typing import Optional, Sequence, Union import os import queue import sys from skrl import logger __all__ = ["load_omniverse_isaacgym_env"] def _omegaconf_to_dict(config) -> dict: """Convert OmegaConf config to dict :param config: The OmegaConf config :type config: OmegaConf.Config :return: The config as dict :rtype: dict """ # return config.to_container(dict) from omegaconf import DictConfig d = {} for k, v in config.items(): d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v return d def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_omniverse_isaacgym_env(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], omniisaacgymenvs_path: str = "", show_cfg: bool = True, multi_threaded: bool = False, timeout: int = 30) -> Union["VecEnvBase", "VecEnvMT"]: """Load an Omniverse Isaac Gym environment (OIGE) Omniverse Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: OIGE configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param omniisaacgymenvs_path: The path to the ``omniisaacgymenvs`` directory (default: ``""``). If empty, the path will obtained from omniisaacgymenvs package metadata :type omniisaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :param multi_threaded: Whether to use multi-threaded environment (default: ``False``) :type multi_threaded: bool, optional :param timeout: Seconds to wait for data when queue is empty in multi-threaded environment (default: ``30``) :type timeout: int, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The omniisaacgymenvs package is not installed or the path is wrong :return: Omniverse Isaac Gym environment :rtype: omni.isaac.gym.vec_env.vec_env_base.VecEnvBase or omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT """ import omegaconf import omniisaacgymenvs # type: ignore from hydra._internal.hydra import Hydra from hydra._internal.utils import create_automatic_config_search_path, get_args_parser from hydra.types import RunMode from omegaconf import OmegaConf from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT # type: ignore from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT # type: ignore import torch # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("task="): defined = True break # get task name from command line arguments if defined: if task_name and task_name != arg.split("task=")[1].split(" ")[0]: logger.warning("Overriding task name ({}) with command line argument (task={})" \ .format(task_name, arg.split("task=")[1].split(" ")[0])) # get task name from function arguments else: if task_name: sys.argv.append(f"task={task_name}") else: raise ValueError("No task name defined. Set task_name parameter or use task=<task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("num_envs="): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None and num_envs != int(arg.split("num_envs=")[1].split(" ")[0]): logger.warning("Overriding num_envs ({}) with command line argument (num_envs={})" \ .format(num_envs, arg.split("num_envs=")[1].split(" ")[0])) # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append(f"num_envs={num_envs}") # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("headless="): defined = True break # get headless from command line arguments if defined: if headless is not None and str(headless).lower() != arg.split("headless=")[1].split(" ")[0].lower(): logger.warning("Overriding headless ({}) with command line argument (headless={})" \ .format(headless, arg.split("headless=")[1].split(" ")[0])) # get headless from function arguments elif headless is not None: sys.argv.append(f"headless={headless}") # others command line arguments sys.argv += cli_args # get omniisaacgymenvs path from omniisaacgymenvs package metadata if omniisaacgymenvs_path == "": if not hasattr(omniisaacgymenvs, "__path__"): raise RuntimeError("omniisaacgymenvs package is not installed") omniisaacgymenvs_path = list(omniisaacgymenvs.__path__)[0] config_path = os.path.join(omniisaacgymenvs_path, "cfg") # set omegaconf resolvers OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b) OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg) # get hydra config without use @hydra.main config_file = "config" args = get_args_parser().parse_args() search_path = create_automatic_config_search_path(config_file, None, config_path) hydra_object = Hydra.create_main_hydra2(task_name='load_omniisaacgymenv', config_search_path=search_path) config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN) del config.hydra cfg = _omegaconf_to_dict(config) cfg["train"] = {} # print config if show_cfg: print(f"\nOmniverse Isaac Gym environment ({config.task.name})") _print_cfg(cfg) # internal classes class _OmniIsaacGymVecEnv(VecEnvBase): def step(self, actions): actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone() self._task.pre_physics_step(actions) for _ in range(self._task.control_frequency_inv): self._world.step(render=self._render) self.sim_frame_count += 1 observations, rewards, dones, info = self._task.post_physics_step() return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \ rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy() def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] class _OmniIsaacGymTrainerMT(TrainerMT): def run(self): pass def stop(self): pass class _OmniIsaacGymVecEnvMT(VecEnvMT): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.action_queue = queue.Queue(1) self.data_queue = queue.Queue(1) def run(self, trainer=None): super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer) def _parse_data(self, data): self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone() self._rewards = data["rew"].to(self._task.rl_device).clone() self._dones = data["reset"].to(self._task.rl_device).clone() self._info = data["extras"].copy() def step(self, actions): if self._stop: raise TaskStopException() actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone() self.send_actions(actions) data = self.get_data() return {"obs": self._observations}, self._rewards, self._dones, self._info def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] def close(self): # end stop signal to main thread self.send_actions(None) self.stop = True # load environment sys.path.append(omniisaacgymenvs_path) from utils.task_util import initialize_task # type: ignore try: if config.multi_gpu: rank = int(os.getenv("LOCAL_RANK", "0")) config.device_id = rank config.rl_device = f"cuda:{rank}" except omegaconf.errors.ConfigAttributeError: logger.warning("Using an older version of OmniIsaacGymEnvs (2022.2.0 or earlier)") enable_viewport = "enable_cameras" in config.task.sim and config.task.sim.enable_cameras if multi_threaded: try: env = _OmniIsaacGymVecEnvMT(headless=config.headless, sim_device=config.device_id, enable_livestream=config.enable_livestream, enable_viewport=enable_viewport) except (TypeError, omegaconf.errors.ConfigAttributeError): logger.warning("Using an older version of Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)") env = _OmniIsaacGymVecEnvMT(headless=config.headless) # Isaac Sim 2022.2.0 and earlier task = initialize_task(cfg, env, init_sim=False) env.initialize(env.action_queue, env.data_queue, timeout=timeout) else: try: env = _OmniIsaacGymVecEnv(headless=config.headless, sim_device=config.device_id, enable_livestream=config.enable_livestream, enable_viewport=enable_viewport) except (TypeError, omegaconf.errors.ConfigAttributeError): logger.warning("Using an older version of Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)") env = _OmniIsaacGymVecEnv(headless=config.headless) # Isaac Sim 2022.2.0 and earlier task = initialize_task(cfg, env, init_sim=True) return env
12,134
Python
42.808664
133
0.619829
Toni-SM/skrl/skrl/envs/loaders/jax/bidexhands_envs.py
# since Bi-DexHands environments are implemented on top of PyTorch, the loader is the same from skrl.envs.loaders.torch import load_bidexhands_env
148
Python
36.249991
90
0.810811
Toni-SM/skrl/skrl/envs/loaders/jax/__init__.py
from skrl.envs.loaders.jax.bidexhands_envs import load_bidexhands_env from skrl.envs.loaders.jax.isaac_orbit_envs import load_isaac_orbit_env from skrl.envs.loaders.jax.isaacgym_envs import ( load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4 ) from skrl.envs.loaders.jax.omniverse_isaacgym_envs import load_omniverse_isaacgym_env
375
Python
40.777773
85
0.8
Toni-SM/skrl/skrl/envs/loaders/jax/isaacgym_envs.py
# since Isaac Gym (preview) environments are implemented on top of PyTorch, the loaders are the same from skrl.envs.loaders.torch import ( # isort:skip load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4, )
252
Python
30.624996
100
0.746032
Toni-SM/skrl/skrl/envs/loaders/jax/isaac_orbit_envs.py
# since Isaac Orbit environments are implemented on top of PyTorch, the loader is the same from skrl.envs.loaders.torch import load_isaac_orbit_env
149
Python
36.499991
90
0.805369
Toni-SM/skrl/skrl/envs/loaders/jax/omniverse_isaacgym_envs.py
# since Omniverse Isaac Gym environments are implemented on top of PyTorch, the loader is the same from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
164
Python
40.24999
98
0.817073
Toni-SM/skrl/skrl/envs/wrappers/torch/gym_envs.py
from typing import Any, Optional, Tuple import gym from packaging import version import numpy as np import torch from skrl import logger from skrl.envs.wrappers.torch.base import Wrapper class GymWrapper(Wrapper): def __init__(self, env: Any) -> None: """OpenAI Gym environment wrapper :param env: The environment to wrap :type env: Any supported OpenAI Gym environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gym.vector.SyncVectorEnv) or isinstance(env, gym.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") self._deprecated_api = version.parse(gym.__version__) < version.parse("0.25.0") if self._deprecated_api: logger.warning(f"Using a deprecated version of OpenAI Gym's API: {gym.__version__}") @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gym.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gym.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gym.Space] = None) -> torch.Tensor: """Convert the OpenAI Gym observation to a flat tensor :param observation: The OpenAI Gym observation to convert to a tensor :type observation: Any supported OpenAI Gym observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gym.spaces.MultiDiscrete): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, int): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gym.spaces.Discrete): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gym.spaces.Box): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gym.spaces.Dict): tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the OpenAI Gym expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the OpenAI Gym format :rtype: Any supported OpenAI Gym action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gym.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Tuple): if isinstance(space[0], gym.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(space.shape) elif isinstance(space[0], gym.spaces.Discrete): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(-1) elif isinstance(space, gym.spaces.Discrete): return actions.item() elif isinstance(space, gym.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ if self._deprecated_api: observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions)) # truncated: https://gymnasium.farama.org/tutorials/handling_time_limits if type(info) is list: truncated = np.array([d.get("TimeLimit.truncated", False) for d in info], dtype=terminated.dtype) terminated *= np.logical_not(truncated) else: truncated = info.get("TimeLimit.truncated", False) if truncated: terminated = False else: observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to torch observation = self._observation_to_tensor(observation) reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1) terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs if self._deprecated_api: observation = self._env.reset() info = {} else: observation, info = self._env.reset() return self._observation_to_tensor(observation), info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
7,739
Python
40.612903
113
0.625275
Toni-SM/skrl/skrl/envs/wrappers/torch/bidexhands_envs.py
from typing import Any, Mapping, Sequence, Tuple import gym import torch from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper class BiDexHandsWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """Bi-DexHands wrapper :param env: The environment to wrap :type env: Any supported Bi-DexHands environment """ super().__init__(env) self._reset_once = True self._obs_buf = None self._shared_obs_buf = None self.possible_agents = [f"agent_{i}" for i in range(self.num_agents)] @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self.possible_agents @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.observation_space)} @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.action_space)} @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.share_observation_space)} def step(self, actions: Mapping[str, torch.Tensor]) -> \ Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dictionary of torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of dictionaries torch.Tensor and any other info """ actions = [actions[uid] for uid in self.possible_agents] obs_buf, shared_obs_buf, reward_buf, terminated_buf, info, _ = self._env.step(actions) self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} reward = {uid: reward_buf[:,i].view(-1, 1) for i, uid in enumerate(self.possible_agents)} terminated = {uid: terminated_buf[:,i].view(-1, 1) for i, uid in enumerate(self.possible_agents)} truncated = {uid: torch.zeros_like(value) for uid, value in terminated.items()} info = {"shared_states": self._shared_obs_buf} return self._obs_buf, reward, terminated, truncated, info def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ if self._reset_once: obs_buf, shared_obs_buf, _ = self._env.reset() self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._reset_once = False return self._obs_buf, {"shared_states": self._shared_obs_buf}
3,394
Python
38.476744
107
0.629641
Toni-SM/skrl/skrl/envs/wrappers/torch/robosuite_envs.py
from typing import Any, Optional, Tuple import collections import gym import numpy as np import torch from skrl.envs.wrappers.torch.base import Wrapper class RobosuiteWrapper(Wrapper): def __init__(self, env: Any) -> None: """Robosuite environment wrapper :param env: The environment to wrap :type env: Any supported robosuite environment """ super().__init__(env) # observation and action spaces self._observation_space = self._spec_to_space(self._env.observation_spec()) self._action_space = self._spec_to_space(self._env.action_spec) @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ return self._observation_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space def _spec_to_space(self, spec: Any) -> gym.Space: """Convert the robosuite spec to a Gym space :param spec: The robosuite spec to convert :type spec: Any supported robosuite spec :raises: ValueError if the spec type is not supported :return: The Gym space :rtype: gym.Space """ if type(spec) is tuple: return gym.spaces.Box(shape=spec[0].shape, dtype=np.float32, low=spec[0], high=spec[1]) elif isinstance(spec, np.ndarray): return gym.spaces.Box(shape=spec.shape, dtype=np.float32, low=np.full(spec.shape, float("-inf")), high=np.full(spec.shape, float("inf"))) elif isinstance(spec, collections.OrderedDict): return gym.spaces.Dict({k: self._spec_to_space(v) for k, v in spec.items()}) else: raise ValueError(f"Spec type {type(spec)} not supported. Please report this issue") def _observation_to_tensor(self, observation: Any, spec: Optional[Any] = None) -> torch.Tensor: """Convert the observation to a flat tensor :param observation: The observation to convert to a tensor :type observation: Any supported observation :raises: ValueError if the observation spec type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ spec = spec if spec is not None else self._env.observation_spec() if isinstance(spec, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1) elif isinstance(spec, collections.OrderedDict): return torch.cat([self._observation_to_tensor(observation[k], spec[k]) \ for k in sorted(spec.keys())], dim=-1).reshape(self.num_envs, -1) else: raise ValueError(f"Observation spec type {type(spec)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the robosuite expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the robosuite expected format :rtype: Any supported robosuite action """ spec = self._env.action_spec if type(spec) is tuple: return np.array(actions.cpu().numpy(), dtype=np.float32).reshape(spec[0].shape) else: raise ValueError(f"Action spec type {type(spec)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions)) truncated = False info = {} # convert response to torch return self._observation_to_tensor(observation), \ torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1), \ torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: The state of the environment :rtype: torch.Tensor """ observation = self._env.reset() return self._observation_to_tensor(observation), {} def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
5,343
Python
35.108108
112
0.600786
Toni-SM/skrl/skrl/envs/wrappers/torch/base.py
from typing import Any, Mapping, Sequence, Tuple import gym import torch class Wrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for RL environments :param env: The environment to wrap :type env: Any supported RL environment """ self._env = env # device (faster than @property) if hasattr(self._env, "device"): self.device = torch.device(self._env.device) else: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # spaces try: self._action_space = self._env.single_action_space self._observation_space = self._env.single_observation_space except AttributeError: self._action_space = self._env.action_space self._observation_space = self._env.observation_space self._state_space = self._env.state_space if hasattr(self._env, "state_space") else self._observation_space def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: torch.Tensor and any other info """ raise NotImplementedError def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def state_space(self) -> gym.Space: """State space If the wrapped environment does not have the ``state_space`` property, the value of the ``observation_space`` property will be used """ return self._state_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space class MultiAgentEnvWrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for multi-agent environments :param env: The multi-agent environment to wrap :type env: Any supported multi-agent environment """ self._env = env # device (faster than @property) if hasattr(self._env, "device"): self.device = torch.device(self._env.device) else: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.possible_agents = [] def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ raise NotImplementedError def step(self, actions: Mapping[str, torch.Tensor]) -> \ Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dictionary of torch.Tensor :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ raise NotImplementedError @property def state_spaces(self) -> Mapping[str, gym.Space]: """State spaces An alias for the ``observation_spaces`` property """ return self.observation_spaces @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ raise NotImplementedError @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ raise NotImplementedError @property def shared_state_spaces(self) -> Mapping[str, gym.Space]: """Shared state spaces An alias for the ``shared_observation_spaces`` property """ return self.shared_observation_spaces @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ raise NotImplementedError def state_space(self, agent: str) -> gym.Space: """State space :param agent: Name of the agent :type agent: str :return: The state space for the specified agent :rtype: gym.Space """ return self.state_spaces[agent] def observation_space(self, agent: str) -> gym.Space: """Observation space :param agent: Name of the agent :type agent: str :return: The observation space for the specified agent :rtype: gym.Space """ return self.observation_spaces[agent] def action_space(self, agent: str) -> gym.Space: """Action space :param agent: Name of the agent :type agent: str :return: The action space for the specified agent :rtype: gym.Space """ return self.action_spaces[agent] def shared_state_space(self, agent: str) -> gym.Space: """Shared state space :param agent: Name of the agent :type agent: str :return: The shared state space for the specified agent :rtype: gym.Space """ return self.shared_state_spaces[agent] def shared_observation_space(self, agent: str) -> gym.Space: """Shared observation space :param agent: Name of the agent :type agent: str :return: The shared observation space for the specified agent :rtype: gym.Space """ return self.shared_observation_spaces[agent]
8,836
Python
28.85473
117
0.601517
Toni-SM/skrl/skrl/envs/wrappers/torch/__init__.py
from typing import Any, Union import gym import gymnasium from skrl import logger from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper, Wrapper from skrl.envs.wrappers.torch.bidexhands_envs import BiDexHandsWrapper from skrl.envs.wrappers.torch.deepmind_envs import DeepMindWrapper from skrl.envs.wrappers.torch.gym_envs import GymWrapper from skrl.envs.wrappers.torch.gymnasium_envs import GymnasiumWrapper from skrl.envs.wrappers.torch.isaac_orbit_envs import IsaacOrbitWrapper from skrl.envs.wrappers.torch.isaacgym_envs import IsaacGymPreview2Wrapper, IsaacGymPreview3Wrapper from skrl.envs.wrappers.torch.omniverse_isaacgym_envs import OmniverseIsaacGymWrapper from skrl.envs.wrappers.torch.pettingzoo_envs import PettingZooWrapper from skrl.envs.wrappers.torch.robosuite_envs import RobosuiteWrapper __all__ = ["wrap_env", "Wrapper", "MultiAgentEnvWrapper"] def wrap_env(env: Any, wrapper: str = "auto", verbose: bool = True) -> Union[Wrapper, MultiAgentEnvWrapper]: """Wrap an environment to use a common interface Example:: >>> from skrl.envs.wrappers.torch import wrap_env >>> >>> # assuming that there is an environment called "env" >>> env = wrap_env(env) :param env: The environment to be wrapped :type env: gym.Env, gymnasium.Env, dm_env.Environment or VecTask :param wrapper: The type of wrapper to use (default: ``"auto"``). If ``"auto"``, the wrapper will be automatically selected based on the environment class. The supported wrappers are described in the following table: +--------------------+-------------------------+ |Environment |Wrapper tag | +====================+=========================+ |OpenAI Gym |``"gym"`` | +--------------------+-------------------------+ |Gymnasium |``"gymnasium"`` | +--------------------+-------------------------+ |Petting Zoo |``"pettingzoo"`` | +--------------------+-------------------------+ |DeepMind |``"dm"`` | +--------------------+-------------------------+ |Robosuite |``"robosuite"`` | +--------------------+-------------------------+ |Bi-DexHands |``"bidexhands"`` | +--------------------+-------------------------+ |Isaac Gym preview 2 |``"isaacgym-preview2"`` | +--------------------+-------------------------+ |Isaac Gym preview 3 |``"isaacgym-preview3"`` | +--------------------+-------------------------+ |Isaac Gym preview 4 |``"isaacgym-preview4"`` | +--------------------+-------------------------+ |Omniverse Isaac Gym |``"omniverse-isaacgym"`` | +--------------------+-------------------------+ |Isaac Sim (orbit) |``"isaac-orbit"`` | +--------------------+-------------------------+ :type wrapper: str, optional :param verbose: Whether to print the wrapper type (default: ``True``) :type verbose: bool, optional :raises ValueError: Unknown wrapper type :return: Wrapped environment :rtype: Wrapper or MultiAgentEnvWrapper """ if verbose: logger.info("Environment class: {}".format(", ".join([str(base).replace("<class '", "").replace("'>", "") \ for base in env.__class__.__bases__]))) if wrapper == "auto": base_classes = [str(base) for base in env.__class__.__bases__] if "<class 'omni.isaac.gym.vec_env.vec_env_base.VecEnvBase'>" in base_classes or \ "<class 'omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT'>" in base_classes: if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif isinstance(env, gym.core.Env) or isinstance(env, gym.core.Wrapper): # isaac-orbit if hasattr(env, "sim") and hasattr(env, "env_ns"): if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) # gym if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif isinstance(env, gymnasium.core.Env) or isinstance(env, gymnasium.core.Wrapper): if verbose: logger.info("Environment wrapper: Gymnasium") return GymnasiumWrapper(env) elif "<class 'pettingzoo.utils.env" in base_classes[0] or "<class 'pettingzoo.utils.wrappers" in base_classes[0]: if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif "<class 'dm_env._environment.Environment'>" in base_classes: if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif "<class 'robosuite.environments." in base_classes[0]: if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif "<class 'rlgpu.tasks.base.vec_task.VecTask'>" in base_classes: if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3/4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "gym": if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif wrapper == "gymnasium": if verbose: logger.info("Environment wrapper: gymnasium") return GymnasiumWrapper(env) elif wrapper == "pettingzoo": if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif wrapper == "dm": if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif wrapper == "robosuite": if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif wrapper == "bidexhands": if verbose: logger.info("Environment wrapper: Bi-DexHands") return BiDexHandsWrapper(env) elif wrapper == "isaacgym-preview2": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) elif wrapper == "isaacgym-preview3": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3)") return IsaacGymPreview3Wrapper(env) elif wrapper == "isaacgym-preview4": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "omniverse-isaacgym": if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif wrapper == "isaac-orbit": if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) else: raise ValueError(f"Unknown wrapper type: {wrapper}")
7,723
Python
46.975155
121
0.537356
Toni-SM/skrl/skrl/envs/wrappers/torch/isaacgym_envs.py
from typing import Any, Tuple import torch from skrl.envs.wrappers.torch.base import Wrapper class IsaacGymPreview2Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 2) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 2) environment """ super().__init__(env) self._reset_once = True self._obs_buf = None def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_buf, reward, terminated, info = self._env.step(actions) truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated) return self._obs_buf, reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_buf = self._env.reset() self._reset_once = False return self._obs_buf, {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass class IsaacGymPreview3Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 3) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 3) environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_dict, reward, terminated, info = self._env.step(actions) truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated) return self._obs_dict["obs"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return self._obs_dict["obs"], {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass
3,182
Python
30.83
112
0.595223
Toni-SM/skrl/skrl/envs/wrappers/torch/isaac_orbit_envs.py
from typing import Any, Tuple import torch from skrl.envs.wrappers.torch.base import Wrapper class IsaacOrbitWrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Orbit environment wrapper :param env: The environment to wrap :type env: Any supported Isaac Orbit environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_dict, reward, terminated, truncated, info = self._env.step(actions) return self._obs_dict["policy"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_dict, info = self._env.reset() self._reset_once = False return self._obs_dict["policy"], info def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
1,553
Python
28.884615
112
0.597553
Toni-SM/skrl/skrl/envs/wrappers/torch/gymnasium_envs.py
from typing import Any, Optional, Tuple import gymnasium import numpy as np import torch from skrl import logger from skrl.envs.wrappers.torch.base import Wrapper class GymnasiumWrapper(Wrapper): def __init__(self, env: Any) -> None: """Gymnasium environment wrapper :param env: The environment to wrap :type env: Any supported Gymnasium environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gymnasium.vector.SyncVectorEnv) or isinstance(env, gymnasium.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") @property def state_space(self) -> gymnasium.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gymnasium.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gymnasium.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gymnasium.Space] = None) -> torch.Tensor: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gymnasium.spaces.MultiDiscrete): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, int): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Discrete): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Dict): tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gymnasium.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Tuple): if isinstance(space[0], gymnasium.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(space.shape) elif isinstance(space[0], gymnasium.spaces.Discrete): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(-1) if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to torch observation = self._observation_to_tensor(observation) reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1) terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs observation, info = self._env.reset() return self._observation_to_tensor(observation), info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
6,882
Python
40.463855
115
0.639494
Toni-SM/skrl/skrl/envs/wrappers/torch/pettingzoo_envs.py
from typing import Any, Mapping, Sequence, Tuple import collections import gymnasium import numpy as np import torch from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper class PettingZooWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """PettingZoo (parallel) environment wrapper :param env: The environment to wrap :type env: Any supported PettingZoo (parallel) environment """ super().__init__(env) self.possible_agents = self._env.possible_agents self._shared_observation_space = self._compute_shared_observation_space(self._env.observation_spaces) def _compute_shared_observation_space(self, observation_spaces): space = next(iter(observation_spaces.values())) shape = (len(self.possible_agents),) + space.shape return gymnasium.spaces.Box(low=np.stack([space.low for _ in self.possible_agents], axis=0), high=np.stack([space.high for _ in self.possible_agents], axis=0), dtype=space.dtype, shape=shape) @property def num_agents(self) -> int: """Number of agents """ return len(self.possible_agents) @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self._env.agents @property def observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Observation spaces """ return {uid: self._env.observation_space(uid) for uid in self.possible_agents} @property def action_spaces(self) -> Mapping[str, gymnasium.Space]: """Action spaces """ return {uid: self._env.action_space(uid) for uid in self.possible_agents} @property def shared_observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Shared observation spaces """ return {uid: self._shared_observation_space for uid in self.possible_agents} def _observation_to_tensor(self, observation: Any, space: gymnasium.Space) -> torch.Tensor: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ if isinstance(observation, int): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Discrete): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Dict): tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor, space: gymnasium.Space) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Mapping[str, torch.Tensor]) -> \ Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dictionary of torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of dictionaries torch.Tensor and any other info """ actions = {uid: self._tensor_to_action(action, self._env.action_space(uid)) for uid, action in actions.items()} observations, rewards, terminated, truncated, infos = self._env.step(actions) # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to torch observations = {uid: self._observation_to_tensor(value, self._env.observation_space(uid)) for uid, value in observations.items()} rewards = {uid: torch.tensor(value, device=self.device, dtype=torch.float32).view(self.num_envs, -1) for uid, value in rewards.items()} terminated = {uid: torch.tensor(value, device=self.device, dtype=torch.bool).view(self.num_envs, -1) for uid, value in terminated.items()} truncated = {uid: torch.tensor(value, device=self.device, dtype=torch.bool).view(self.num_envs, -1) for uid, value in truncated.items()} return observations, rewards, terminated, truncated, infos def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ outputs = self._env.reset() if isinstance(outputs, collections.abc.Mapping): observations = outputs infos = {uid: {} for uid in self.possible_agents} else: observations, infos = outputs # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to torch observations = {uid: self._observation_to_tensor(observation, self._env.observation_space(uid)) for uid, observation in observations.items()} return observations, infos def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
7,391
Python
44.07317
149
0.652686
Toni-SM/skrl/skrl/envs/wrappers/torch/omniverse_isaacgym_envs.py
from typing import Any, Optional, Tuple import torch from skrl.envs.wrappers.torch.base import Wrapper class OmniverseIsaacGymWrapper(Wrapper): def __init__(self, env: Any) -> None: """Omniverse Isaac Gym environment wrapper :param env: The environment to wrap :type env: Any supported Omniverse Isaac Gym environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def run(self, trainer: Optional["omni.isaac.gym.vec_env.vec_env_mt.TrainerMT"] = None) -> None: """Run the simulation in the main thread This method is valid only for the Omniverse Isaac Gym multi-threaded environments :param trainer: Trainer which should implement a ``run`` method that initiates the RL loop on a new thread :type trainer: omni.isaac.gym.vec_env.vec_env_mt.TrainerMT, optional """ self._env.run(trainer) def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_dict, reward, terminated, info = self._env.step(actions) truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated) return self._obs_dict["obs"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return self._obs_dict["obs"], {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
2,133
Python
32.873015
114
0.619316
Toni-SM/skrl/skrl/envs/wrappers/torch/deepmind_envs.py
from typing import Any, Optional, Tuple import collections import gym import numpy as np import torch from skrl.envs.wrappers.torch.base import Wrapper class DeepMindWrapper(Wrapper): def __init__(self, env: Any) -> None: """DeepMind environment wrapper :param env: The environment to wrap :type env: Any supported DeepMind environment """ super().__init__(env) from dm_env import specs self._specs = specs # observation and action spaces self._observation_space = self._spec_to_space(self._env.observation_spec()) self._action_space = self._spec_to_space(self._env.action_spec()) @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ return self._observation_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space def _spec_to_space(self, spec: Any) -> gym.Space: """Convert the DeepMind spec to a Gym space :param spec: The DeepMind spec to convert :type spec: Any supported DeepMind spec :raises: ValueError if the spec type is not supported :return: The Gym space :rtype: gym.Space """ if isinstance(spec, self._specs.DiscreteArray): return gym.spaces.Discrete(spec.num_values) elif isinstance(spec, self._specs.BoundedArray): return gym.spaces.Box(shape=spec.shape, dtype=spec.dtype, low=spec.minimum if spec.minimum.ndim else np.full(spec.shape, spec.minimum), high=spec.maximum if spec.maximum.ndim else np.full(spec.shape, spec.maximum)) elif isinstance(spec, self._specs.Array): return gym.spaces.Box(shape=spec.shape, dtype=spec.dtype, low=np.full(spec.shape, float("-inf")), high=np.full(spec.shape, float("inf"))) elif isinstance(spec, collections.OrderedDict): return gym.spaces.Dict({k: self._spec_to_space(v) for k, v in spec.items()}) else: raise ValueError(f"Spec type {type(spec)} not supported. Please report this issue") def _observation_to_tensor(self, observation: Any, spec: Optional[Any] = None) -> torch.Tensor: """Convert the DeepMind observation to a flat tensor :param observation: The DeepMind observation to convert to a tensor :type observation: Any supported DeepMind observation :raises: ValueError if the observation spec type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ spec = spec if spec is not None else self._env.observation_spec() if isinstance(spec, self._specs.DiscreteArray): return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1) elif isinstance(spec, self._specs.Array): # includes BoundedArray return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1) elif isinstance(spec, collections.OrderedDict): return torch.cat([self._observation_to_tensor(observation[k], spec[k]) \ for k in sorted(spec.keys())], dim=-1).reshape(self.num_envs, -1) else: raise ValueError(f"Observation spec type {type(spec)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the DeepMind expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the DeepMind expected format :rtype: Any supported DeepMind action """ spec = self._env.action_spec() if isinstance(spec, self._specs.DiscreteArray): return np.array(actions.item(), dtype=spec.dtype) elif isinstance(spec, self._specs.Array): # includes BoundedArray return np.array(actions.cpu().numpy(), dtype=spec.dtype).reshape(spec.shape) else: raise ValueError(f"Action spec type {type(spec)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ timestep = self._env.step(self._tensor_to_action(actions)) observation = timestep.observation reward = timestep.reward if timestep.reward is not None else 0 terminated = timestep.last() truncated = False info = {} # convert response to torch return self._observation_to_tensor(observation), \ torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1), \ torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: The state of the environment :rtype: torch.Tensor """ timestep = self._env.reset() return self._observation_to_tensor(timestep.observation), {} def render(self, *args, **kwargs) -> None: """Render the environment OpenCV is used to render the environment. Install OpenCV with ``pip install opencv-python`` """ frame = self._env.physics.render(480, 640, camera_id=0) # render the frame using OpenCV import cv2 cv2.imshow("env", cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) cv2.waitKey(1) def close(self) -> None: """Close the environment """ self._env.close()
6,468
Python
37.278106
112
0.615646
Toni-SM/skrl/skrl/envs/wrappers/jax/gym_envs.py
from typing import Any, Optional, Tuple, Union import gym from packaging import version import jax import numpy as np from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper class GymWrapper(Wrapper): def __init__(self, env: Any) -> None: """OpenAI Gym environment wrapper :param env: The environment to wrap :type env: Any supported OpenAI Gym environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gym.vector.SyncVectorEnv) or isinstance(env, gym.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") self._deprecated_api = version.parse(gym.__version__) < version.parse("0.25.0") if self._deprecated_api: logger.warning(f"Using a deprecated version of OpenAI Gym's API: {gym.__version__}") @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gym.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gym.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gym.Space] = None) -> np.ndarray: """Convert the OpenAI Gym observation to a flat tensor :param observation: The OpenAI Gym observation to convert to a tensor :type observation: Any supported OpenAI Gym observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: np.ndarray """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gym.spaces.MultiDiscrete): return observation.reshape(self.num_envs, -1).astype(np.int32) elif isinstance(observation, int): return np.array(observation, dtype=np.int32).reshape(self.num_envs, -1) elif isinstance(observation, np.ndarray): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gym.spaces.Discrete): return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1) elif isinstance(space, gym.spaces.Box): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gym.spaces.Dict): tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], axis=-1).reshape(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: np.ndarray) -> Any: """Convert the action to the OpenAI Gym expected format :param actions: The actions to perform :type actions: np.ndarray :raise ValueError: If the action space type is not supported :return: The action in the OpenAI Gym format :rtype: Any supported OpenAI Gym action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gym.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Tuple): if isinstance(space[0], gym.spaces.Box): return actions.astype(space[0].dtype).reshape(space.shape) elif isinstance(space[0], gym.spaces.Discrete): return actions.astype(space[0].dtype).reshape(-1) elif isinstance(space, gym.spaces.Discrete): return actions.item() elif isinstance(space, gym.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Box): return actions.astype(space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ if self._jax: actions = jax.device_get(actions) if self._deprecated_api: observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions)) # truncated: https://gymnasium.farama.org/tutorials/handling_time_limits if type(info) is list: truncated = np.array([d.get("TimeLimit.truncated", False) for d in info], dtype=terminated.dtype) terminated *= np.logical_not(truncated) else: truncated = info.get("TimeLimit.truncated", False) if truncated: terminated = False else: observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to numpy or jax observation = self._observation_to_tensor(observation) reward = np.array(reward, dtype=np.float32).reshape(self.num_envs, -1) terminated = np.array(terminated, dtype=np.int8).reshape(self.num_envs, -1) truncated = np.array(truncated, dtype=np.int8).reshape(self.num_envs, -1) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs if self._deprecated_api: observation = self._env.reset() info = {} else: observation, info = self._env.reset() return self._observation_to_tensor(observation), info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
7,637
Python
39.2
113
0.618437
Toni-SM/skrl/skrl/envs/wrappers/jax/bidexhands_envs.py
from typing import Any, Mapping, Sequence, Tuple, Union import gym import jax import jax.dlpack import numpy as np try: import torch import torch.utils.dlpack except: pass # TODO: show warning message from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper def _jax2torch(array, device, from_jax=True): return torch.utils.dlpack.from_dlpack(jax.dlpack.to_dlpack(array)) if from_jax else torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): return jax.dlpack.from_dlpack(torch.utils.dlpack.to_dlpack(tensor.contiguous())) if to_jax else tensor.cpu().numpy() class BiDexHandsWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """Bi-DexHands wrapper :param env: The environment to wrap :type env: Any supported Bi-DexHands environment """ super().__init__(env) self._reset_once = True self._obs_buf = None self._shared_obs_buf = None self.possible_agents = [f"agent_{i}" for i in range(self.num_agents)] @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self.possible_agents @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.observation_space)} @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.action_space)} @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.share_observation_space)} def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \ Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dict of nd.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of dict of nd.ndarray or jax.Array and any other info """ actions = [_jax2torch(actions[uid], self.device, self._jax) for uid in self.possible_agents] with torch.no_grad(): obs_buf, shared_obs_buf, reward_buf, terminated_buf, info, _ = self._env.step(actions) obs_buf = _torch2jax(obs_buf, self._jax) shared_obs_buf = _torch2jax(shared_obs_buf, self._jax) reward_buf = _torch2jax(reward_buf, self._jax) terminated_buf = _torch2jax(terminated_buf.to(dtype=torch.int8), self._jax) self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} reward = {uid: reward_buf[:,i].reshape(-1, 1) for i, uid in enumerate(self.possible_agents)} terminated = {uid: terminated_buf[:,i].reshape(-1, 1) for i, uid in enumerate(self.possible_agents)} truncated = terminated info = {"shared_states": self._shared_obs_buf} return self._obs_buf, reward, terminated, truncated, info def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dict of np.ndarray of jax.Array and any other info """ if self._reset_once: obs_buf, shared_obs_buf, _ = self._env.reset() obs_buf = _torch2jax(obs_buf, self._jax) shared_obs_buf = _torch2jax(shared_obs_buf, self._jax) self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._reset_once = False return self._obs_buf, {"shared_states": self._shared_obs_buf}
4,383
Python
37.45614
122
0.635866
Toni-SM/skrl/skrl/envs/wrappers/jax/base.py
from typing import Any, Mapping, Sequence, Tuple, Union import gym import jax import numpy as np from skrl import config class Wrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for RL environments :param env: The environment to wrap :type env: Any supported RL environment """ self._jax = config.jax.backend == "jax" self._env = env # device (faster than @property) self.device = jax.devices()[0] if hasattr(self._env, "device"): try: self.device = jax.devices(self._env.device.split(':')[0] if type(self._env.device) == str else self._env.device.type)[0] except RuntimeError: pass # spaces try: self._action_space = self._env.single_action_space self._observation_space = self._env.single_observation_space except AttributeError: self._action_space = self._env.action_space self._observation_space = self._env.observation_space self._state_space = self._env.state_space if hasattr(self._env, "state_space") else self._observation_space def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ raise NotImplementedError def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def state_space(self) -> gym.Space: """State space If the wrapped environment does not have the ``state_space`` property, the value of the ``observation_space`` property will be used """ return self._state_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space class MultiAgentEnvWrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for multi-agent environments :param env: The multi-agent environment to wrap :type env: Any supported multi-agent environment """ self._jax = config.jax.backend == "jax" self._env = env # device (faster than @property) self.device = jax.devices()[0] if hasattr(self._env, "device"): try: self.device = jax.devices(self._env.device.split(':')[0] if type(self._env.device) == str else self._env.device.type)[0] except RuntimeError: pass self.possible_agents = [] def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ raise NotImplementedError def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \ Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dict of np.ndarray or jax.Array :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ raise NotImplementedError @property def state_spaces(self) -> Mapping[str, gym.Space]: """State spaces An alias for the ``observation_spaces`` property """ return self.observation_spaces @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ raise NotImplementedError @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ raise NotImplementedError @property def shared_state_spaces(self) -> Mapping[str, gym.Space]: """Shared state spaces An alias for the ``shared_observation_spaces`` property """ return self.shared_observation_spaces @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ raise NotImplementedError def state_space(self, agent: str) -> gym.Space: """State space :param agent: Name of the agent :type agent: str :return: The state space for the specified agent :rtype: gym.Space """ return self.state_spaces[agent] def observation_space(self, agent: str) -> gym.Space: """Observation space :param agent: Name of the agent :type agent: str :return: The observation space for the specified agent :rtype: gym.Space """ return self.observation_spaces[agent] def action_space(self, agent: str) -> gym.Space: """Action space :param agent: Name of the agent :type agent: str :return: The action space for the specified agent :rtype: gym.Space """ return self.action_spaces[agent] def shared_state_space(self, agent: str) -> gym.Space: """Shared state space :param agent: Name of the agent :type agent: str :return: The shared state space for the specified agent :rtype: gym.Space """ return self.shared_state_spaces[agent] def shared_observation_space(self, agent: str) -> gym.Space: """Shared observation space :param agent: Name of the agent :type agent: str :return: The shared observation space for the specified agent :rtype: gym.Space """ return self.shared_observation_spaces[agent]
9,431
Python
29.425806
136
0.597498
Toni-SM/skrl/skrl/envs/wrappers/jax/__init__.py
from typing import Any, Union import gym import gymnasium from skrl import logger from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper, Wrapper from skrl.envs.wrappers.jax.bidexhands_envs import BiDexHandsWrapper from skrl.envs.wrappers.jax.gym_envs import GymWrapper from skrl.envs.wrappers.jax.gymnasium_envs import GymnasiumWrapper from skrl.envs.wrappers.jax.isaac_orbit_envs import IsaacOrbitWrapper from skrl.envs.wrappers.jax.isaacgym_envs import IsaacGymPreview2Wrapper, IsaacGymPreview3Wrapper from skrl.envs.wrappers.jax.omniverse_isaacgym_envs import OmniverseIsaacGymWrapper from skrl.envs.wrappers.jax.pettingzoo_envs import PettingZooWrapper __all__ = ["wrap_env", "Wrapper", "MultiAgentEnvWrapper"] def wrap_env(env: Any, wrapper: str = "auto", verbose: bool = True) -> Union[Wrapper, MultiAgentEnvWrapper]: """Wrap an environment to use a common interface Example:: >>> from skrl.envs.wrappers.jax import wrap_env >>> >>> # assuming that there is an environment called "env" >>> env = wrap_env(env) :param env: The environment to be wrapped :type env: gym.Env, gymnasium.Env, dm_env.Environment or VecTask :param wrapper: The type of wrapper to use (default: ``"auto"``). If ``"auto"``, the wrapper will be automatically selected based on the environment class. The supported wrappers are described in the following table: +--------------------+-------------------------+ |Environment |Wrapper tag | +====================+=========================+ |OpenAI Gym |``"gym"`` | +--------------------+-------------------------+ |Gymnasium |``"gymnasium"`` | +--------------------+-------------------------+ |Petting Zoo |``"pettingzoo"`` | +--------------------+-------------------------+ |Bi-DexHands |``"bidexhands"`` | +--------------------+-------------------------+ |Isaac Gym preview 2 |``"isaacgym-preview2"`` | +--------------------+-------------------------+ |Isaac Gym preview 3 |``"isaacgym-preview3"`` | +--------------------+-------------------------+ |Isaac Gym preview 4 |``"isaacgym-preview4"`` | +--------------------+-------------------------+ |Omniverse Isaac Gym |``"omniverse-isaacgym"`` | +--------------------+-------------------------+ |Isaac Sim (orbit) |``"isaac-orbit"`` | +--------------------+-------------------------+ :type wrapper: str, optional :param verbose: Whether to print the wrapper type (default: ``True``) :type verbose: bool, optional :raises ValueError: Unknown wrapper type :return: Wrapped environment :rtype: Wrapper or MultiAgentEnvWrapper """ if verbose: logger.info("Environment class: {}".format(", ".join([str(base).replace("<class '", "").replace("'>", "") \ for base in env.__class__.__bases__]))) if wrapper == "auto": base_classes = [str(base) for base in env.__class__.__bases__] if "<class 'omni.isaac.gym.vec_env.vec_env_base.VecEnvBase'>" in base_classes or \ "<class 'omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT'>" in base_classes: if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif isinstance(env, gym.core.Env) or isinstance(env, gym.core.Wrapper): # isaac-orbit if hasattr(env, "sim") and hasattr(env, "env_ns"): if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) # gym if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif isinstance(env, gymnasium.core.Env) or isinstance(env, gymnasium.core.Wrapper): if verbose: logger.info("Environment wrapper: Gymnasium") return GymnasiumWrapper(env) elif "<class 'pettingzoo.utils.env" in base_classes[0] or "<class 'pettingzoo.utils.wrappers" in base_classes[0]: if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif "<class 'dm_env._environment.Environment'>" in base_classes: if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif "<class 'robosuite.environments." in base_classes[0]: if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif "<class 'rlgpu.tasks.base.vec_task.VecTask'>" in base_classes: if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3/4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "gym": if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif wrapper == "gymnasium": if verbose: logger.info("Environment wrapper: gymnasium") return GymnasiumWrapper(env) elif wrapper == "pettingzoo": if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif wrapper == "dm": if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif wrapper == "robosuite": if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif wrapper == "bidexhands": if verbose: logger.info("Environment wrapper: Bi-DexHands") return BiDexHandsWrapper(env) elif wrapper == "isaacgym-preview2": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) elif wrapper == "isaacgym-preview3": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3)") return IsaacGymPreview3Wrapper(env) elif wrapper == "isaacgym-preview4": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "omniverse-isaacgym": if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif wrapper == "isaac-orbit": if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) else: raise ValueError(f"Unknown wrapper type: {wrapper}")
7,293
Python
46.058064
121
0.546551
Toni-SM/skrl/skrl/envs/wrappers/jax/isaacgym_envs.py
from typing import Any, Tuple, Union import jax import jax.dlpack as jax_dlpack import numpy as np try: import torch import torch.utils.dlpack as torch_dlpack except: pass # TODO: show warning message from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper # ML frameworks conversion utilities # jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided. _CPU = jax.devices()[0].device_kind.lower() == "cpu" if _CPU: logger.warning("IsaacGymEnvs runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.") def _jax2torch(array, device, from_jax=True): if from_jax: return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device) return torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): if to_jax: return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous())) return tensor.cpu().numpy() class IsaacGymPreview2Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 2) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 2) environment """ super().__init__(env) self._reset_once = True self._obs_buf = None def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env.device, self._jax) with torch.no_grad(): self._obs_buf, reward, terminated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated) return _torch2jax(self._obs_buf, self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_buf = self._env.reset() self._reset_once = False return _torch2jax(self._obs_buf, self._jax), {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass class IsaacGymPreview3Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 3) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 3) environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: Union[np.ndarray, jax.Array]) ->\ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env.device, self._jax) with torch.no_grad(): self._obs_dict, reward, terminated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated) return _torch2jax(self._obs_dict["obs"], self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return _torch2jax(self._obs_dict["obs"], self._jax), {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass
5,142
Python
33.059602
121
0.608129
Toni-SM/skrl/skrl/envs/wrappers/jax/isaac_orbit_envs.py
from typing import Any, Tuple, Union import jax import jax.dlpack as jax_dlpack import numpy as np try: import torch import torch.utils.dlpack as torch_dlpack except: pass # TODO: show warning message from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper # ML frameworks conversion utilities # jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided. _CPU = jax.devices()[0].device_kind.lower() == "cpu" if _CPU: logger.warning("Isaac Orbit runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.") def _jax2torch(array, device, from_jax=True): if from_jax: return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device) return torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): if to_jax: return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous())) return tensor.cpu().numpy() class IsaacOrbitWrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Orbit environment wrapper :param env: The environment to wrap :type env: Any supported Isaac Orbit environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env.device, self._jax) with torch.no_grad(): self._obs_dict, reward, terminated, truncated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = truncated.to(dtype=torch.int8) return _torch2jax(self._obs_dict["policy"], self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_dict, info = self._env.reset() self._reset_once = False return _torch2jax(self._obs_dict["policy"], self._jax), info def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
3,029
Python
31.934782
121
0.626279
Toni-SM/skrl/skrl/envs/wrappers/jax/gymnasium_envs.py
from typing import Any, Optional, Tuple, Union import gymnasium import jax import numpy as np from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper class GymnasiumWrapper(Wrapper): def __init__(self, env: Any) -> None: """Gymnasium environment wrapper :param env: The environment to wrap :type env: Any supported Gymnasium environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gymnasium.vector.SyncVectorEnv) or isinstance(env, gymnasium.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") @property def state_space(self) -> gymnasium.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gymnasium.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gymnasium.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gymnasium.Space] = None) -> np.ndarray: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: np.ndarray """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gymnasium.spaces.MultiDiscrete): return observation.reshape(self.num_envs, -1).astype(np.int32) elif isinstance(observation, int): return np.array(observation, dtype=np.int32).reshape(self.num_envs, -1) elif isinstance(observation, np.ndarray): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Discrete): return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Dict): tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], axis=-1).reshape(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: np.ndarray) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: np.ndarray :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gymnasium.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Tuple): if isinstance(space[0], gymnasium.spaces.Box): return actions.astype(space[0].dtype).reshape(space.shape) elif isinstance(space[0], gymnasium.spaces.Discrete): return actions.astype(space[0].dtype).reshape(-1) if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Box): return actions.astype(space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ if self._jax: actions = jax.device_get(actions) observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to numpy or jax observation = self._observation_to_tensor(observation) reward = np.array(reward, dtype=np.float32).reshape(self.num_envs, -1) terminated = np.array(terminated, dtype=np.int8).reshape(self.num_envs, -1) truncated = np.array(truncated, dtype=np.int8).reshape(self.num_envs, -1) # if self._jax: # HACK: jax.device_put(...).block_until_ready() # observation = jax.device_put(observation) # reward = jax.device_put(reward) # terminated = jax.device_put(terminated) # truncated = jax.device_put(truncated) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs observation, info = self._env.reset() # convert response to numpy or jax observation = self._observation_to_tensor(observation) # if self._jax: # HACK: jax.device_put(...).block_until_ready() # observation = jax.device_put(observation) return observation, info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
7,269
Python
39.165746
115
0.628422
Toni-SM/skrl/skrl/envs/wrappers/jax/pettingzoo_envs.py
from typing import Any, Mapping, Sequence, Tuple, Union import collections import gymnasium import jax import numpy as np from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper class PettingZooWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """PettingZoo (parallel) environment wrapper :param env: The environment to wrap :type env: Any supported PettingZoo (parallel) environment """ super().__init__(env) self.possible_agents = self._env.possible_agents self._shared_observation_space = self._compute_shared_observation_space(self._env.observation_spaces) def _compute_shared_observation_space(self, observation_spaces): space = next(iter(observation_spaces.values())) shape = (len(self.possible_agents),) + space.shape return gymnasium.spaces.Box(low=np.stack([space.low for _ in self.possible_agents], axis=0), high=np.stack([space.high for _ in self.possible_agents], axis=0), dtype=space.dtype, shape=shape) @property def num_agents(self) -> int: """Number of agents """ return len(self.possible_agents) @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self._env.agents @property def observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Observation spaces """ return {uid: self._env.observation_space(uid) for uid in self.possible_agents} @property def action_spaces(self) -> Mapping[str, gymnasium.Space]: """Action spaces """ return {uid: self._env.action_space(uid) for uid in self.possible_agents} @property def shared_observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Shared observation spaces """ return {uid: self._shared_observation_space for uid in self.possible_agents} def _observation_to_tensor(self, observation: Any, space: gymnasium.Space) -> np.ndarray: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: np.ndarray """ if isinstance(observation, int): return np.array(observation, dtype=np.int32).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Discrete): return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Dict): tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], axis=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: np.ndarray, space: gymnasium.Space) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: np.ndarray :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.Box): return actions.astype(space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \ Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dict of np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ if self._jax: actions = jax.device_get(actions) actions = {uid: self._tensor_to_action(action, self._env.action_space(uid)) for uid, action in actions.items()} observations, rewards, terminated, truncated, infos = self._env.step(actions) # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to numpy or jax observations = {uid: self._observation_to_tensor(value, self._env.observation_space(uid)) for uid, value in observations.items()} rewards = {uid: np.array(value, dtype=np.float32).reshape(self.num_envs, -1) for uid, value in rewards.items()} terminated = {uid: np.array(value, dtype=np.int8).reshape(self.num_envs, -1) for uid, value in terminated.items()} truncated = {uid: np.array(value, dtype=np.int8).reshape(self.num_envs, -1) for uid, value in truncated.items()} return observations, rewards, terminated, truncated, infos def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ outputs = self._env.reset() if isinstance(outputs, collections.abc.Mapping): observations = outputs infos = {uid: {} for uid in self.possible_agents} else: observations, infos = outputs # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to numpy or jax observations = {uid: self._observation_to_tensor(observation, self._env.observation_space(uid)) for uid, observation in observations.items()} return observations, infos def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
7,386
Python
43.233533
149
0.645681
Toni-SM/skrl/skrl/envs/wrappers/jax/omniverse_isaacgym_envs.py
from typing import Any, Optional, Tuple, Union import jax import jax.dlpack as jax_dlpack import numpy as np try: import torch import torch.utils.dlpack as torch_dlpack except: pass # TODO: show warning message from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper # ML frameworks conversion utilities # jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided. _CPU = jax.devices()[0].device_kind.lower() == "cpu" if _CPU: logger.warning("OmniIsaacGymEnvs runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.") def _jax2torch(array, device, from_jax=True): if from_jax: return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device) return torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): if to_jax: return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous())) return tensor.cpu().numpy() class OmniverseIsaacGymWrapper(Wrapper): def __init__(self, env: Any) -> None: """Omniverse Isaac Gym environment wrapper :param env: The environment to wrap :type env: Any supported Omniverse Isaac Gym environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def run(self, trainer: Optional["omni.isaac.gym.vec_env.vec_env_mt.TrainerMT"] = None) -> None: """Run the simulation in the main thread This method is valid only for the Omniverse Isaac Gym multi-threaded environments :param trainer: Trainer which should implement a ``run`` method that initiates the RL loop on a new thread :type trainer: omni.isaac.gym.vec_env.vec_env_mt.TrainerMT, optional """ self._env.run(trainer) def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env._task.device, self._jax) with torch.no_grad(): self._obs_dict, reward, terminated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated) return _torch2jax(self._obs_dict["obs"], self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return _torch2jax(self._obs_dict["obs"], self._jax), {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
3,590
Python
34.205882
121
0.635655
Toni-SM/skrl/skrl/agents/torch/base.py
from typing import Any, Mapping, Optional, Tuple, Union import collections import copy import datetime import os import gym import gymnasium import numpy as np import torch from torch.utils.tensorboard import SummaryWriter from skrl import logger from skrl.memories.torch import Memory from skrl.models.torch import Model class Agent: def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Base class that represent a RL agent :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ self.models = models self.observation_space = observation_space self.action_space = action_space self.cfg = cfg if cfg is not None else {} self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) if type(memory) is list: self.memory = memory[0] self.secondary_memories = memory[1:] else: self.memory = memory self.secondary_memories = [] # convert the models to their respective device for model in self.models.values(): if model is not None: model.to(model.device) self.tracking_data = collections.defaultdict(list) self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000) self._track_rewards = collections.deque(maxlen=100) self._track_timesteps = collections.deque(maxlen=100) self._cumulative_rewards = None self._cumulative_timesteps = None self.training = True # checkpoint self.checkpoint_modules = {} self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000) self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False) self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": False, "modules": {}} # experiment directory directory = self.cfg.get("experiment", {}).get("directory", "") experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "") if not directory: directory = os.path.join(os.getcwd(), "runs") if not experiment_name: experiment_name = "{}_{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), self.__class__.__name__) self.experiment_dir = os.path.join(directory, experiment_name) def __str__(self) -> str: """Generate a representation of the agent as string :return: Representation of the agent as string :rtype: str """ string = f"Agent: {repr(self)}" for k, v in self.cfg.items(): if type(v) is dict: string += f"\n |-- {k}" for k1, v1 in v.items(): string += f"\n | |-- {k1}: {v1}" else: string += f"\n |-- {k}: {v}" return string def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any: """Empty preprocess method This method is defined because PyTorch multiprocessing can't pickle lambdas :param _input: Input to preprocess :type _input: Any :return: Preprocessed input :rtype: Any """ return _input def _get_internal_value(self, _module: Any) -> Any: """Get internal module/variable state/value :param _module: Module or variable :type _module: Any :return: Module/variable state/value :rtype: Any """ return _module.state_dict() if hasattr(_module, "state_dict") else _module def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent This method should be called before the agent is used. It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory :param trainer_cfg: Trainer configuration :type trainer_cfg: dict, optional """ # setup Weights & Biases if self.cfg.get("experiment", {}).get("wandb", False): # save experiment config trainer_cfg = trainer_cfg if trainer_cfg is not None else {} try: models_cfg = {k: v.net._modules for (k, v) in self.models.items()} except AttributeError: models_cfg = {k: v._modules for (k, v) in self.models.items()} config={**self.cfg, **trainer_cfg, **models_cfg} # set default values wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {})) wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1]) wandb_kwargs.setdefault("sync_tensorboard", True) wandb_kwargs.setdefault("config", {}) wandb_kwargs["config"].update(config) # init Weights & Biases import wandb wandb.init(**wandb_kwargs) # main entry to log data for consumption and visualization by TensorBoard if self.write_interval > 0: self.writer = SummaryWriter(log_dir=self.experiment_dir) if self.checkpoint_interval > 0: os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True) def track_data(self, tag: str, value: float) -> None: """Track data to TensorBoard Currently only scalar data are supported :param tag: Data identifier (e.g. 'Loss / policy loss') :type tag: str :param value: Value to track :type value: float """ self.tracking_data[tag].append(value) def write_tracking_data(self, timestep: int, timesteps: int) -> None: """Write tracking data to TensorBoard :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for k, v in self.tracking_data.items(): if k.endswith("(min)"): self.writer.add_scalar(k, np.min(v), timestep) elif k.endswith("(max)"): self.writer.add_scalar(k, np.max(v), timestep) else: self.writer.add_scalar(k, np.mean(v), timestep) # reset data containers for next iteration self._track_rewards.clear() self._track_timesteps.clear() self.tracking_data.clear() def write_checkpoint(self, timestep: int, timesteps: int) -> None: """Write checkpoint (modules) to disk The checkpoints are saved in the directory 'checkpoints' in the experiment directory. The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time. :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f")) # separated modules if self.checkpoint_store_separately: for name, module in self.checkpoint_modules.items(): torch.save(self._get_internal_value(module), os.path.join(self.experiment_dir, "checkpoints", f"{name}_{tag}.pt")) # whole agent else: modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = self._get_internal_value(module) torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pt")) # best modules if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]: # separated modules if self.checkpoint_store_separately: for name, module in self.checkpoint_modules.items(): torch.save(self.checkpoint_best_modules["modules"][name], os.path.join(self.experiment_dir, "checkpoints", f"best_{name}.pt")) # whole agent else: modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = self.checkpoint_best_modules["modules"][name] torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", "best_agent.pt")) self.checkpoint_best_modules["saved"] = True def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Actions :rtype: torch.Tensor """ raise NotImplementedError def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory (to be implemented by the inheriting classes) Inheriting classes must call this method to record episode information (rewards, timesteps, etc.). In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded. :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if self.write_interval > 0: # compute the cumulative sum of the rewards and timesteps if self._cumulative_rewards is None: self._cumulative_rewards = torch.zeros_like(rewards, dtype=torch.float32) self._cumulative_timesteps = torch.zeros_like(rewards, dtype=torch.int32) self._cumulative_rewards.add_(rewards) self._cumulative_timesteps.add_(1) # check ended episodes finished_episodes = (terminated + truncated).nonzero(as_tuple=False) if finished_episodes.numel(): # storage cumulative rewards and timesteps self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist()) self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist()) # reset the cumulative rewards and timesteps self._cumulative_rewards[finished_episodes] = 0 self._cumulative_timesteps[finished_episodes] = 0 # record data self.tracking_data["Reward / Instantaneous reward (max)"].append(torch.max(rewards).item()) self.tracking_data["Reward / Instantaneous reward (min)"].append(torch.min(rewards).item()) self.tracking_data["Reward / Instantaneous reward (mean)"].append(torch.mean(rewards).item()) if len(self._track_rewards): track_rewards = np.array(self._track_rewards) track_timesteps = np.array(self._track_timesteps) self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards)) self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards)) self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards)) self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps)) self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps)) self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps)) def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ for model in self.models.values(): if model is not None: model.set_mode(mode) def set_running_mode(self, mode: str) -> None: """Set the current running mode (training or evaluation) This method sets the value of the ``training`` property (boolean). This property can be used to know if the agent is running in training or evaluation mode. :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ self.training = mode == "train" def save(self, path: str) -> None: """Save the agent to the specified path :param path: Path to save the model to :type path: str """ modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = self._get_internal_value(module) torch.save(modules, path) def load(self, path: str) -> None: """Load the model from the specified path The final storage device is determined by the constructor of the model :param path: Path to load the model from :type path: str """ modules = torch.load(path, map_location=self.device) if type(modules) is dict: for name, data in modules.items(): module = self.checkpoint_modules.get(name, None) if module is not None: if hasattr(module, "load_state_dict"): module.load_state_dict(data) if hasattr(module, "eval"): module.eval() else: raise NotImplementedError else: logger.warning(f"Cannot load the {name} module. The agent doesn't have such an instance") def migrate(self, path: str, name_map: Mapping[str, Mapping[str, str]] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal checkpoint to the current agent The final storage device is determined by the constructor of the agent. Only files generated by the *rl_games* library are supported at the moment For ambiguous models (where 2 or more parameters, for source or current model, have equal shape) it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully :param path: Path to the external checkpoint to migrate from :type path: str :param name_map: Name map to use for the migration (default: ``{}``). Keys are the current parameter names and values are the external parameter names :type name_map: Mapping[str, Mapping[str, str]], optional :param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``) :type auto_mapping: bool, optional :param verbose: Show model names and migration (default: ``False``) :type verbose: bool, optional :raises ValueError: If the correct file type cannot be identified from the ``path`` parameter :return: True if the migration was successful, False otherwise. Migration is successful if all parameters of the current model are found in the external model :rtype: bool Example:: # migrate a rl_games checkpoint with ambiguous state_dict >>> agent.migrate(path="./runs/Cartpole/nn/Cartpole.pth", verbose=False) [skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight] [skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight] [skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias] False >>> name_map = {"policy": {"net.0.bias": "a2c_network.actor_mlp.0.bias", ... "net.2.bias": "a2c_network.actor_mlp.2.bias", ... "net.4.weight": "a2c_network.mu.weight", ... "net.4.bias": "a2c_network.mu.bias"}, ... "value": {"net.0.bias": "a2c_network.actor_mlp.0.bias", ... "net.2.bias": "a2c_network.actor_mlp.2.bias", ... "net.4.weight": "a2c_network.value.weight", ... "net.4.bias": "a2c_network.value.bias"}} >>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", name_map=name_map, verbose=True) [skrl:INFO] Modules [skrl:INFO] |-- current [skrl:INFO] | |-- policy (Policy) [skrl:INFO] | | |-- log_std_parameter : [1] [skrl:INFO] | | |-- net.0.weight : [32, 4] [skrl:INFO] | | |-- net.0.bias : [32] [skrl:INFO] | | |-- net.2.weight : [32, 32] [skrl:INFO] | | |-- net.2.bias : [32] [skrl:INFO] | | |-- net.4.weight : [1, 32] [skrl:INFO] | | |-- net.4.bias : [1] [skrl:INFO] | |-- value (Value) [skrl:INFO] | | |-- net.0.weight : [32, 4] [skrl:INFO] | | |-- net.0.bias : [32] [skrl:INFO] | | |-- net.2.weight : [32, 32] [skrl:INFO] | | |-- net.2.bias : [32] [skrl:INFO] | | |-- net.4.weight : [1, 32] [skrl:INFO] | | |-- net.4.bias : [1] [skrl:INFO] | |-- optimizer (Adam) [skrl:INFO] | | |-- state (dict) [skrl:INFO] | | |-- param_groups (list) [skrl:INFO] | |-- state_preprocessor (RunningStandardScaler) [skrl:INFO] | | |-- running_mean : [4] [skrl:INFO] | | |-- running_variance : [4] [skrl:INFO] | | |-- current_count : [] [skrl:INFO] | |-- value_preprocessor (RunningStandardScaler) [skrl:INFO] | | |-- running_mean : [1] [skrl:INFO] | | |-- running_variance : [1] [skrl:INFO] | | |-- current_count : [] [skrl:INFO] |-- source [skrl:INFO] | |-- model (OrderedDict) [skrl:INFO] | | |-- value_mean_std.running_mean : [1] [skrl:INFO] | | |-- value_mean_std.running_var : [1] [skrl:INFO] | | |-- value_mean_std.count : [] [skrl:INFO] | | |-- running_mean_std.running_mean : [4] [skrl:INFO] | | |-- running_mean_std.running_var : [4] [skrl:INFO] | | |-- running_mean_std.count : [] [skrl:INFO] | | |-- a2c_network.sigma : [1] [skrl:INFO] | | |-- a2c_network.actor_mlp.0.weight : [32, 4] [skrl:INFO] | | |-- a2c_network.actor_mlp.0.bias : [32] [skrl:INFO] | | |-- a2c_network.actor_mlp.2.weight : [32, 32] [skrl:INFO] | | |-- a2c_network.actor_mlp.2.bias : [32] [skrl:INFO] | | |-- a2c_network.value.weight : [1, 32] [skrl:INFO] | | |-- a2c_network.value.bias : [1] [skrl:INFO] | | |-- a2c_network.mu.weight : [1, 32] [skrl:INFO] | | |-- a2c_network.mu.bias : [1] [skrl:INFO] | |-- epoch (int) [skrl:INFO] | |-- optimizer (dict) [skrl:INFO] | |-- frame (int) [skrl:INFO] | |-- last_mean_rewards (float32) [skrl:INFO] | |-- env_state (NoneType) [skrl:INFO] Migration [skrl:INFO] Model: policy (Policy) [skrl:INFO] Models [skrl:INFO] |-- current: 7 items [skrl:INFO] | |-- log_std_parameter : [1] [skrl:INFO] | |-- net.0.weight : [32, 4] [skrl:INFO] | |-- net.0.bias : [32] [skrl:INFO] | |-- net.2.weight : [32, 32] [skrl:INFO] | |-- net.2.bias : [32] [skrl:INFO] | |-- net.4.weight : [1, 32] [skrl:INFO] | |-- net.4.bias : [1] [skrl:INFO] |-- source: 9 items [skrl:INFO] | |-- a2c_network.sigma : [1] [skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : [32, 4] [skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : [32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : [32, 32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : [32] [skrl:INFO] | |-- a2c_network.value.weight : [1, 32] [skrl:INFO] | |-- a2c_network.value.bias : [1] [skrl:INFO] | |-- a2c_network.mu.weight : [1, 32] [skrl:INFO] | |-- a2c_network.mu.bias : [1] [skrl:INFO] Migration [skrl:INFO] |-- auto: log_std_parameter <- a2c_network.sigma [skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight [skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias [skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight [skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias [skrl:INFO] |-- map: net.4.weight <- a2c_network.mu.weight [skrl:INFO] |-- map: net.4.bias <- a2c_network.mu.bias [skrl:INFO] Model: value (Value) [skrl:INFO] Models [skrl:INFO] |-- current: 6 items [skrl:INFO] | |-- net.0.weight : [32, 4] [skrl:INFO] | |-- net.0.bias : [32] [skrl:INFO] | |-- net.2.weight : [32, 32] [skrl:INFO] | |-- net.2.bias : [32] [skrl:INFO] | |-- net.4.weight : [1, 32] [skrl:INFO] | |-- net.4.bias : [1] [skrl:INFO] |-- source: 9 items [skrl:INFO] | |-- a2c_network.sigma : [1] [skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : [32, 4] [skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : [32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : [32, 32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : [32] [skrl:INFO] | |-- a2c_network.value.weight : [1, 32] [skrl:INFO] | |-- a2c_network.value.bias : [1] [skrl:INFO] | |-- a2c_network.mu.weight : [1, 32] [skrl:INFO] | |-- a2c_network.mu.bias : [1] [skrl:INFO] Migration [skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight [skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias [skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight [skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias [skrl:INFO] |-- map: net.4.weight <- a2c_network.value.weight [skrl:INFO] |-- map: net.4.bias <- a2c_network.value.bias True """ # load state_dict from path if path is not None: # rl_games checkpoint if path.endswith(".pt") or path.endswith(".pth"): checkpoint = torch.load(path, map_location=self.device) else: raise ValueError("Cannot identify file type") # show modules if verbose: logger.info("Modules") logger.info(" |-- current") for name, module in self.checkpoint_modules.items(): logger.info(f" | |-- {name} ({type(module).__name__})") if hasattr(module, "state_dict"): for k, v in module.state_dict().items(): if hasattr(v, "shape"): logger.info(f" | | |-- {k} : {list(v.shape)}") else: logger.info(f" | | |-- {k} ({type(v).__name__})") logger.info(" |-- source") for name, module in checkpoint.items(): logger.info(f" | |-- {name} ({type(module).__name__})") if name == "model": for k, v in module.items(): logger.info(f" | | |-- {k} : {list(v.shape)}") else: if hasattr(module, "state_dict"): for k, v in module.state_dict().items(): if hasattr(v, "shape"): logger.info(f" | | |-- {k} : {list(v.shape)}") else: logger.info(f" | | |-- {k} ({type(v).__name__})") logger.info("Migration") if "optimizer" in self.checkpoint_modules: # loaded state dict contains a parameter group that doesn't match the size of optimizer's group # self.checkpoint_modules["optimizer"].load_state_dict(checkpoint["optimizer"]) pass # state_preprocessor if "state_preprocessor" in self.checkpoint_modules: if "running_mean_std.running_mean" in checkpoint["model"]: state_dict = copy.deepcopy(self.checkpoint_modules["state_preprocessor"].state_dict()) state_dict["running_mean"] = checkpoint["model"]["running_mean_std.running_mean"] state_dict["running_variance"] = checkpoint["model"]["running_mean_std.running_var"] state_dict["current_count"] = checkpoint["model"]["running_mean_std.count"] self.checkpoint_modules["state_preprocessor"].load_state_dict(state_dict) del checkpoint["model"]["running_mean_std.running_mean"] del checkpoint["model"]["running_mean_std.running_var"] del checkpoint["model"]["running_mean_std.count"] # value_preprocessor if "value_preprocessor" in self.checkpoint_modules: if "value_mean_std.running_mean" in checkpoint["model"]: state_dict = copy.deepcopy(self.checkpoint_modules["value_preprocessor"].state_dict()) state_dict["running_mean"] = checkpoint["model"]["value_mean_std.running_mean"] state_dict["running_variance"] = checkpoint["model"]["value_mean_std.running_var"] state_dict["current_count"] = checkpoint["model"]["value_mean_std.count"] self.checkpoint_modules["value_preprocessor"].load_state_dict(state_dict) del checkpoint["model"]["value_mean_std.running_mean"] del checkpoint["model"]["value_mean_std.running_var"] del checkpoint["model"]["value_mean_std.count"] # TODO: AMP state preprocessor # model status = True for name, module in self.checkpoint_modules.items(): if module not in ["state_preprocessor", "value_preprocessor", "optimizer"] and hasattr(module, "migrate"): if verbose: logger.info(f"Model: {name} ({type(module).__name__})") status *= module.migrate(state_dict=checkpoint["model"], name_map=name_map.get(name, {}), auto_mapping=auto_mapping, verbose=verbose) self.set_mode("eval") return bool(status) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ timestep += 1 # update best models and write checkpoints if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval: # update best models reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31)) if reward > self.checkpoint_best_modules["reward"]: self.checkpoint_best_modules["timestep"] = timestep self.checkpoint_best_modules["reward"] = reward self.checkpoint_best_modules["saved"] = False self.checkpoint_best_modules["modules"] = {k: copy.deepcopy(self._get_internal_value(v)) for k, v in self.checkpoint_modules.items()} # write checkpoints self.write_checkpoint(timestep, timesteps) # write to tensorboard if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval: self.write_tracking_data(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes """ raise NotImplementedError
33,314
Python
49.097744
149
0.555082
Toni-SM/skrl/skrl/agents/torch/__init__.py
from skrl.agents.torch.base import Agent
41
Python
19.99999
40
0.829268
Toni-SM/skrl/skrl/agents/torch/trpo/__init__.py
from skrl.agents.torch.trpo.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.agents.torch.trpo.trpo_rnn import TRPO_RNN
119
Python
38.999987
65
0.815126
Toni-SM/skrl/skrl/agents/torch/trpo/trpo.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.convert_parameters import parameters_to_vector, vector_to_parameters from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TRPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "value_learning_rate": 1e-3, # value learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "value_loss_scale": 1.0, # value loss scaling factor "damping": 0.1, # damping coefficient for computing the Hessian-vector product "max_kl_divergence": 0.01, # maximum KL divergence between old and new policy "conjugate_gradient_steps": 10, # maximum number of iterations for the conjugate gradient algorithm "max_backtrack_steps": 10, # maximum number of backtracking steps during line search "accept_ratio": 0.5, # accept ratio for the line search loss improvement "step_fraction": 1.0, # fraction of the step size for the line search "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TRPO(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Trust Region Policy Optimization (TRPO) https://arxiv.org/abs/1502.05477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TRPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) self.backup_policy = copy.deepcopy(self.policy) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._value_loss_scale = self.cfg["value_loss_scale"] self._max_kl_divergence = self.cfg["max_kl_divergence"] self._damping = self.cfg["damping"] self._conjugate_gradient_steps = self.cfg["conjugate_gradient_steps"] self._max_backtrack_steps = self.cfg["max_backtrack_steps"] self._accept_ratio = self.cfg["accept_ratio"] self._step_fraction = self.cfg["step_fraction"] self._value_learning_rate = self.cfg["value_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: self.value_optimizer = torch.optim.Adam(self.value.parameters(), lr=self._value_learning_rate) if self._learning_rate_scheduler is not None: self.value_scheduler = self._learning_rate_scheduler(self.value_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names_policy = ["states", "actions", "log_prob", "advantages"] self._tensors_names_value = ["states", "returns"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO: fix for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages def surrogate_loss(policy: Model, states: torch.Tensor, actions: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor) -> torch.Tensor: """Compute the surrogate objective (policy loss) :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param actions: Actions :type actions: torch.Tensor :param log_prob: Log probability :type log_prob: torch.Tensor :param advantages: Advantages :type advantages: torch.Tensor :return: Surrogate loss :rtype: torch.Tensor """ _, new_log_prob, _ = policy.act({"states": states, "taken_actions": actions}, role="policy") return (advantages * torch.exp(new_log_prob - log_prob.detach())).mean() def conjugate_gradient(policy: Model, states: torch.Tensor, b: torch.Tensor, num_iterations: float = 10, residual_tolerance: float = 1e-10) -> torch.Tensor: """Conjugate gradient algorithm to solve Ax = b using the iterative method https://en.wikipedia.org/wiki/Conjugate_gradient_method#As_an_iterative_method :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param b: Vector b :type b: torch.Tensor :param num_iterations: Number of iterations (default: ``10``) :type num_iterations: float, optional :param residual_tolerance: Residual tolerance (default: ``1e-10``) :type residual_tolerance: float, optional :return: Conjugate vector :rtype: torch.Tensor """ x = torch.zeros_like(b) r = b.clone() p = b.clone() rr_old = torch.dot(r, r) for _ in range(num_iterations): hv = fisher_vector_product(policy, states, p, damping=self._damping) alpha = rr_old / torch.dot(p, hv) x += alpha * p r -= alpha * hv rr_new = torch.dot(r, r) if rr_new < residual_tolerance: break p = r + rr_new / rr_old * p rr_old = rr_new return x def fisher_vector_product(policy: Model, states: torch.Tensor, vector: torch.Tensor, damping: float = 0.1) -> torch.Tensor: """Compute the Fisher vector product (direct method) https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/ :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param vector: Vector :type vector: torch.Tensor :param damping: Damping (default: ``0.1``) :type damping: float, optional :return: Hessian vector product :rtype: torch.Tensor """ kl = kl_divergence(policy, policy, states) kl_gradient = torch.autograd.grad(kl, policy.parameters(), create_graph=True) flat_kl_gradient = torch.cat([gradient.view(-1) for gradient in kl_gradient]) hessian_vector_gradient = torch.autograd.grad((flat_kl_gradient * vector).sum(), policy.parameters()) flat_hessian_vector_gradient = torch.cat([gradient.contiguous().view(-1) for gradient in hessian_vector_gradient]) return flat_hessian_vector_gradient + damping * vector def kl_divergence(policy_1: Model, policy_2: Model, states: torch.Tensor) -> torch.Tensor: """Compute the KL divergence between two distributions https://en.wikipedia.org/wiki/Normal_distribution#Other_properties :param policy_1: First policy :type policy_1: Model :param policy_2: Second policy :type policy_2: Model :param states: States :type states: torch.Tensor :return: KL divergence :rtype: torch.Tensor """ mu_1 = policy_1.act({"states": states}, role="policy")[2]["mean_actions"] logstd_1 = policy_1.get_log_std(role="policy") mu_1, logstd_1 = mu_1.detach(), logstd_1.detach() mu_2 = policy_2.act({"states": states}, role="policy")[2]["mean_actions"] logstd_2 = policy_2.get_log_std(role="policy") kl = logstd_1 - logstd_2 + 0.5 * (torch.square(logstd_1.exp()) + torch.square(mu_1 - mu_2)) \ / torch.square(logstd_2.exp()) - 0.5 return torch.sum(kl, dim=-1).mean() # compute returns and advantages with torch.no_grad(): self.value.train(False) last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample all from memory sampled_states, sampled_actions, sampled_log_prob, sampled_advantages \ = self.memory.sample_all(names=self._tensors_names_policy, mini_batches=1)[0] sampled_states = self._state_preprocessor(sampled_states, train=True) # compute policy loss gradient policy_loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) policy_loss_gradient = torch.autograd.grad(policy_loss, self.policy.parameters()) flat_policy_loss_gradient = torch.cat([gradient.view(-1) for gradient in policy_loss_gradient]) # compute the search direction using the conjugate gradient algorithm search_direction = conjugate_gradient(self.policy, sampled_states, flat_policy_loss_gradient.data, num_iterations=self._conjugate_gradient_steps) # compute step size and full step xHx = (search_direction * fisher_vector_product(self.policy, sampled_states, search_direction, self._damping)) \ .sum(0, keepdim=True) step_size = torch.sqrt(2 * self._max_kl_divergence / xHx)[0] full_step = step_size * search_direction # backtracking line search restore_policy_flag = True self.backup_policy.update_parameters(self.policy) params = parameters_to_vector(self.policy.parameters()) expected_improvement = (flat_policy_loss_gradient * full_step).sum(0, keepdim=True) for alpha in [self._step_fraction * 0.5 ** i for i in range(self._max_backtrack_steps)]: new_params = params + alpha * full_step vector_to_parameters(new_params, self.policy.parameters()) expected_improvement *= alpha kl = kl_divergence(self.backup_policy, self.policy, sampled_states) loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) if kl < self._max_kl_divergence and (loss - policy_loss) / expected_improvement > self._accept_ratio: restore_policy_flag = False break if restore_policy_flag: self.policy.update_parameters(self.backup_policy) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names_value, mini_batches=self._mini_batches) cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): # mini-batches loop for sampled_states, sampled_returns in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=not epoch) # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value") value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step (value) self.value_optimizer.zero_grad() value_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.value.parameters(), self._grad_norm_clip) self.value_optimizer.step() # update cumulative losses cumulative_value_loss += value_loss.item() # update learning rate if self._learning_rate_scheduler: self.value_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Value learning rate", self.value_scheduler.get_last_lr()[0])
26,328
Python
45.682624
136
0.598184
Toni-SM/skrl/skrl/agents/torch/trpo/trpo_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.convert_parameters import parameters_to_vector, vector_to_parameters from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TRPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "value_learning_rate": 1e-3, # value learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "value_loss_scale": 1.0, # value loss scaling factor "damping": 0.1, # damping coefficient for computing the Hessian-vector product "max_kl_divergence": 0.01, # maximum KL divergence between old and new policy "conjugate_gradient_steps": 10, # maximum number of iterations for the conjugate gradient algorithm "max_backtrack_steps": 10, # maximum number of backtracking steps during line search "accept_ratio": 0.5, # accept ratio for the line search loss improvement "step_fraction": 1.0, # fraction of the step size for the line search "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TRPO_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Trust Region Policy Optimization (TRPO) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1502.05477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TRPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) self.backup_policy = copy.deepcopy(self.policy) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._value_loss_scale = self.cfg["value_loss_scale"] self._max_kl_divergence = self.cfg["max_kl_divergence"] self._damping = self.cfg["damping"] self._conjugate_gradient_steps = self.cfg["conjugate_gradient_steps"] self._max_backtrack_steps = self.cfg["max_backtrack_steps"] self._accept_ratio = self.cfg["accept_ratio"] self._step_fraction = self.cfg["step_fraction"] self._value_learning_rate = self.cfg["value_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: self.value_optimizer = torch.optim.Adam(self.value.parameters(), lr=self._value_learning_rate) if self._learning_rate_scheduler is not None: self.value_scheduler = self._learning_rate_scheduler(self.value_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names_policy = ["states", "actions", "terminated", "log_prob", "advantages"] self._tensors_names_value = ["states", "terminated", "returns"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages def surrogate_loss(policy: Model, states: torch.Tensor, actions: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor) -> torch.Tensor: """Compute the surrogate objective (policy loss) :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param actions: Actions :type actions: torch.Tensor :param log_prob: Log probability :type log_prob: torch.Tensor :param advantages: Advantages :type advantages: torch.Tensor :return: Surrogate loss :rtype: torch.Tensor """ _, new_log_prob, _ = policy.act({"states": states, "taken_actions": actions, **rnn_policy}, role="policy") return (advantages * torch.exp(new_log_prob - log_prob.detach())).mean() def conjugate_gradient(policy: Model, states: torch.Tensor, b: torch.Tensor, num_iterations: float = 10, residual_tolerance: float = 1e-10) -> torch.Tensor: """Conjugate gradient algorithm to solve Ax = b using the iterative method https://en.wikipedia.org/wiki/Conjugate_gradient_method#As_an_iterative_method :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param b: Vector b :type b: torch.Tensor :param num_iterations: Number of iterations (default: ``10``) :type num_iterations: float, optional :param residual_tolerance: Residual tolerance (default: ``1e-10``) :type residual_tolerance: float, optional :return: Conjugate vector :rtype: torch.Tensor """ x = torch.zeros_like(b) r = b.clone() p = b.clone() rr_old = torch.dot(r, r) for _ in range(num_iterations): hv = fisher_vector_product(policy, states, p, damping=self._damping) alpha = rr_old / torch.dot(p, hv) x += alpha * p r -= alpha * hv rr_new = torch.dot(r, r) if rr_new < residual_tolerance: break p = r + rr_new / rr_old * p rr_old = rr_new return x def fisher_vector_product(policy: Model, states: torch.Tensor, vector: torch.Tensor, damping: float = 0.1) -> torch.Tensor: """Compute the Fisher vector product (direct method) https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/ :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param vector: Vector :type vector: torch.Tensor :param damping: Damping (default: ``0.1``) :type damping: float, optional :return: Hessian vector product :rtype: torch.Tensor """ kl = kl_divergence(policy, policy, states) kl_gradient = torch.autograd.grad(kl, policy.parameters(), create_graph=True) flat_kl_gradient = torch.cat([gradient.view(-1) for gradient in kl_gradient]) hessian_vector_gradient = torch.autograd.grad((flat_kl_gradient * vector).sum(), policy.parameters()) flat_hessian_vector_gradient = torch.cat([gradient.contiguous().view(-1) for gradient in hessian_vector_gradient]) return flat_hessian_vector_gradient + damping * vector def kl_divergence(policy_1: Model, policy_2: Model, states: torch.Tensor) -> torch.Tensor: """Compute the KL divergence between two distributions https://en.wikipedia.org/wiki/Normal_distribution#Other_properties :param policy_1: First policy :type policy_1: Model :param policy_2: Second policy :type policy_2: Model :param states: States :type states: torch.Tensor :return: KL divergence :rtype: torch.Tensor """ mu_1 = policy_1.act({"states": states, **rnn_policy}, role="policy")[2]["mean_actions"] logstd_1 = policy_1.get_log_std(role="policy") mu_1, logstd_1 = mu_1.detach(), logstd_1.detach() with torch.backends.cudnn.flags(enabled=not self._rnn): mu_2 = policy_2.act({"states": states, **rnn_policy}, role="policy")[2]["mean_actions"] logstd_2 = policy_2.get_log_std(role="policy") kl = logstd_1 - logstd_2 + 0.5 * (torch.square(logstd_1.exp()) + torch.square(mu_1 - mu_2)) \ / torch.square(logstd_2.exp()) - 0.5 return torch.sum(kl, dim=-1).mean() # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample all from memory sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_advantages \ = self.memory.sample_all(names=self._tensors_names_policy, mini_batches=1, sequence_length=self._rnn_sequence_length)[0] sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=1, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches], "terminated": sampled_dones} else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches, self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=True) # compute policy loss gradient policy_loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) policy_loss_gradient = torch.autograd.grad(policy_loss, self.policy.parameters()) flat_policy_loss_gradient = torch.cat([gradient.view(-1) for gradient in policy_loss_gradient]) # compute the search direction using the conjugate gradient algorithm search_direction = conjugate_gradient(self.policy, sampled_states, flat_policy_loss_gradient.data, num_iterations=self._conjugate_gradient_steps) # compute step size and full step xHx = (search_direction * fisher_vector_product(self.policy, sampled_states, search_direction, self._damping)) \ .sum(0, keepdim=True) step_size = torch.sqrt(2 * self._max_kl_divergence / xHx)[0] full_step = step_size * search_direction # backtracking line search restore_policy_flag = True self.backup_policy.update_parameters(self.policy) params = parameters_to_vector(self.policy.parameters()) expected_improvement = (flat_policy_loss_gradient * full_step).sum(0, keepdim=True) for alpha in [self._step_fraction * 0.5 ** i for i in range(self._max_backtrack_steps)]: new_params = params + alpha * full_step vector_to_parameters(new_params, self.policy.parameters()) expected_improvement *= alpha kl = kl_divergence(self.backup_policy, self.policy, sampled_states) loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) if kl < self._max_kl_divergence and (loss - policy_loss) / expected_improvement > self._accept_ratio: restore_policy_flag = False break if restore_policy_flag: self.policy.update_parameters(self.backup_policy) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names_value, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_value = {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): # mini-batches loop for i, (sampled_states, sampled_dones, sampled_returns) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_value = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} else: rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=not epoch) # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, **rnn_value}, role="value") value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step (value) self.value_optimizer.zero_grad() value_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.value.parameters(), self._grad_norm_clip) self.value_optimizer.step() # update cumulative losses cumulative_value_loss += value_loss.item() # update learning rate if self._learning_rate_scheduler: self.value_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Value learning rate", self.value_scheduler.get_last_lr()[0])
31,186
Python
47.427019
174
0.594145
Toni-SM/skrl/skrl/agents/torch/q_learning/__init__.py
from skrl.agents.torch.q_learning.q_learning import Q_LEARNING, Q_LEARNING_DEFAULT_CONFIG
90
Python
44.499978
89
0.822222
Toni-SM/skrl/skrl/agents/torch/q_learning/q_learning.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] Q_LEARNING_DEFAULT_CONFIG = { "discount_factor": 0.99, # discount factor (gamma) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "learning_rate": 0.5, # learning rate (alpha) "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class Q_LEARNING(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Q-learning https://www.academia.edu/3294050/Learning_from_delayed_rewards :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(Q_LEARNING_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration self._discount_factor = self.cfg["discount_factor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._learning_rate = self.cfg["learning_rate"] self._rewards_shaper = self.cfg["rewards_shaper"] # create temporary variables needed for storage and computation self._current_states = None self._current_actions = None self._current_rewards = None self._current_next_states = None self._current_dones = None def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample actions from policy return self.policy.act({"states": states}, role="policy") def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self._current_states = states self._current_actions = actions self._current_rewards = rewards self._current_next_states = next_states self._current_dones = terminated + truncated if self.memory is not None: self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ q_table = self.policy.table() env_ids = torch.arange(self._current_rewards.shape[0]).view(-1, 1) # compute next actions next_actions = torch.argmax(q_table[env_ids, self._current_next_states], dim=-1, keepdim=True).view(-1,1) # update Q-table q_table[env_ids, self._current_states, self._current_actions] += self._learning_rate \ * (self._current_rewards + self._discount_factor * self._current_dones.logical_not() \ * q_table[env_ids, self._current_next_states, next_actions] \ - q_table[env_ids, self._current_states, self._current_actions])
9,186
Python
40.759091
123
0.609514
Toni-SM/skrl/skrl/agents/torch/cem/cem.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn.functional as F from skrl import logger from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] CEM_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "percentile": 0.70, # percentile to compute the reward bound [0, 1] "discount_factor": 0.99, # discount factor (gamma) "learning_rate": 1e-2, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class CEM(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Cross-Entropy Method (CEM) https://ieeexplore.ieee.org/abstract/document/6796865/ :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(CEM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration: self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._percentile = self.cfg["percentile"] self._discount_factor = self.cfg["discount_factor"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._episode_tracking = [] # set up optimizer and learning rate scheduler if self.policy is not None: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.int64) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ states = self._state_preprocessor(states) # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions return self.policy.act({"states": states}, role="policy") def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) if self.memory is not None: self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) # track episodes internally if self._rollout: indexes = torch.nonzero(terminated + truncated) if indexes.numel(): for i in indexes[:, 0]: self._episode_tracking[i.item()].append(self._rollout + 1) else: self._episode_tracking = [[0] for _ in range(rewards.size(-1))] def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self._rollout = 0 self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample all memory sampled_states, sampled_actions, sampled_rewards, _, _ = self.memory.sample_all(names=self.tensors_names)[0] sampled_states = self._state_preprocessor(sampled_states, train=True) with torch.no_grad(): # compute discounted return threshold limits = [] returns = [] for e in range(sampled_rewards.size(-1)): for i, j in zip(self._episode_tracking[e][:-1], self._episode_tracking[e][1:]): limits.append([e + i, e + j]) rewards = sampled_rewards[e + i: e + j] returns.append(torch.sum(rewards * self._discount_factor ** \ torch.arange(rewards.size(0), device=rewards.device).flip(-1).view(rewards.size()))) if not len(returns): logger.warning("No returns to update. Consider increasing the number of rollouts") return returns = torch.tensor(returns) return_threshold = torch.quantile(returns, self._percentile, dim=-1) # get elite states and actions indexes = torch.nonzero(returns >= return_threshold) elite_states = torch.cat([sampled_states[limits[i][0]:limits[i][1]] for i in indexes[:, 0]], dim=0) elite_actions = torch.cat([sampled_actions[limits[i][0]:limits[i][1]] for i in indexes[:, 0]], dim=0) # compute scores for the elite states _, _, outputs = self.policy.act({"states": elite_states}, role="policy") scores = outputs["net_output"] # compute policy loss policy_loss = F.cross_entropy(scores, elite_actions.view(-1)) # optimization step self.optimizer.zero_grad() policy_loss.backward() self.optimizer.step() # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Coefficient / Return threshold", return_threshold.item()) self.track_data("Coefficient / Mean discounted returns", torch.mean(returns).item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
13,279
Python
42.398693
124
0.609308
Toni-SM/skrl/skrl/agents/torch/cem/__init__.py
from skrl.agents.torch.cem.cem import CEM, CEM_DEFAULT_CONFIG
62
Python
30.499985
61
0.806452
Toni-SM/skrl/skrl/agents/torch/sac/__init__.py
from skrl.agents.torch.sac.sac import SAC, SAC_DEFAULT_CONFIG from skrl.agents.torch.sac.sac_rnn import SAC_RNN
112
Python
36.666654
61
0.803571
Toni-SM/skrl/skrl/agents/torch/sac/sac.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] SAC_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "learn_entropy": True, # learn entropy "entropy_learning_rate": 1e-3, # entropy learning rate "initial_entropy_value": 0.2, # initial entropy value "target_entropy": None, # target entropy "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "base_directory": "", # base directory for the experiment "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class SAC(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Soft Actor-Critic (SAC) https://arxiv.org/abs/1801.01290 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(SAC_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_learning_rate = self.cfg["entropy_learning_rate"] self._learn_entropy = self.cfg["learn_entropy"] self._entropy_coefficient = self.cfg["initial_entropy_value"] self._rewards_shaper = self.cfg["rewards_shaper"] # entropy if self._learn_entropy: self._target_entropy = self.cfg["target_entropy"] if self._target_entropy is None: if issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): self._target_entropy = -np.prod(self.action_space.shape).astype(np.float32) elif issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): self._target_entropy = -self.action_space.n else: self._target_entropy = 0 self.log_entropy_coefficient = torch.log(torch.ones(1, device=self.device) * self._entropy_coefficient).requires_grad_(True) self.entropy_optimizer = torch.optim.Adam([self.log_entropy_coefficient], lr=self._entropy_learning_rate) self.checkpoint_modules["entropy_optimizer"] = self.entropy_optimizer # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, next_log_prob, _ = self.policy.act({"states": sampled_next_states}, role="policy") target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) - self._entropy_coefficient * next_log_prob target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_2") critic_loss = (F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values)) / 2 # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, log_prob, _ = self.policy.act({"states": sampled_states}, role="policy") critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": actions}, role="critic_2") policy_loss = (self._entropy_coefficient * log_prob - torch.min(critic_1_values, critic_2_values)).mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # entropy learning if self._learn_entropy: # compute entropy loss entropy_loss = -(self.log_entropy_coefficient * (log_prob + self._target_entropy).detach()).mean() # optimization step (entropy) self.entropy_optimizer.zero_grad() entropy_loss.backward() self.entropy_optimizer.step() # compute entropy coefficient self._entropy_coefficient = torch.exp(self.log_entropy_coefficient.detach()) # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if self.write_interval > 0: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learn_entropy: self.track_data("Loss / Entropy loss", entropy_loss.item()) self.track_data("Coefficient / Entropy coefficient", self._entropy_coefficient.item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
19,278
Python
48.181122
153
0.615002
Toni-SM/skrl/skrl/agents/torch/sac/sac_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] SAC_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "learn_entropy": True, # learn entropy "entropy_learning_rate": 1e-3, # entropy learning rate "initial_entropy_value": 0.2, # initial entropy value "target_entropy": None, # target entropy "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "base_directory": "", # base directory for the experiment "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class SAC_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Soft Actor-Critic (SAC) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1801.01290 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(SAC_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_learning_rate = self.cfg["entropy_learning_rate"] self._learn_entropy = self.cfg["learn_entropy"] self._entropy_coefficient = self.cfg["initial_entropy_value"] self._rewards_shaper = self.cfg["rewards_shaper"] # entropy if self._learn_entropy: self._target_entropy = self.cfg["target_entropy"] if self._target_entropy is None: if issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): self._target_entropy = -np.prod(self.action_space.shape).astype(np.float32) elif issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): self._target_entropy = -self.action_space.n else: self._target_entropy = 0 self.log_entropy_coefficient = torch.log(torch.ones(1, device=self.device) * self._entropy_coefficient).requires_grad_(True) self.entropy_optimizer = torch.optim.Adam([self.log_entropy_coefficient], lr=self._entropy_learning_rate) self.checkpoint_modules["entropy_optimizer"] = self.entropy_optimizer # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": []} self._rnn_initial_states = {"policy": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) # update RNN states if self._rnn: # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: sampled_rnn = self.memory.sample_by_index(names=self._rnn_tensors_names, indexes=self.memory.get_sampling_indexes())[0] rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn], "terminated": sampled_dones} # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, next_log_prob, _ = self.policy.act({"states": sampled_next_states, **rnn_policy}, role="policy") target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) - self._entropy_coefficient * next_log_prob target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_2") critic_loss = (F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values)) / 2 # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, log_prob, _ = self.policy.act({"states": sampled_states, **rnn_policy}, role="policy") critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic_2") policy_loss = (self._entropy_coefficient * log_prob - torch.min(critic_1_values, critic_2_values)).mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # entropy learning if self._learn_entropy: # compute entropy loss entropy_loss = -(self.log_entropy_coefficient * (log_prob + self._target_entropy).detach()).mean() # optimization step (entropy) self.entropy_optimizer.zero_grad() entropy_loss.backward() self.entropy_optimizer.step() # compute entropy coefficient self._entropy_coefficient = torch.exp(self.log_entropy_coefficient.detach()) # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if self.write_interval > 0: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learn_entropy: self.track_data("Loss / Entropy loss", entropy_loss.item()) self.track_data("Coefficient / Entropy coefficient", self._entropy_coefficient.item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
21,582
Python
48.730415
167
0.612177
Toni-SM/skrl/skrl/agents/torch/td3/__init__.py
from skrl.agents.torch.td3.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.agents.torch.td3.td3_rnn import TD3_RNN
112
Python
36.666654
61
0.803571
Toni-SM/skrl/skrl/agents/torch/td3/td3.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl import logger from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TD3_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "policy_delay": 2, # policy delay update with respect to critic update "smooth_regularization_noise": None, # smooth noise for regularization "smooth_regularization_clip": 0.5, # clip for smooth regularization "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TD3(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Twin Delayed DDPG (TD3) https://arxiv.org/abs/1802.09477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TD3_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._policy_delay = self.cfg["policy_delay"] self._critic_update_counter = 0 self._smooth_regularization_noise = self.cfg["smooth_regularization_noise"] self._smooth_regularization_clip = self.cfg["smooth_regularization_clip"] if self._smooth_regularization_noise is None: logger.warning("agents:TD3: No smooth regularization noise specified to reduce variance during training") self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) with torch.no_grad(): # target policy smoothing next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy") if self._smooth_regularization_noise is not None: noises = torch.clamp(self._smooth_regularization_noise.sample(next_actions.shape), min=-self._smooth_regularization_clip, max=self._smooth_regularization_clip) next_actions.add_(noises) next_actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # compute target values target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_2") critic_loss = F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # delayed update self._critic_update_counter += 1 if not self._critic_update_counter % self._policy_delay: # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states}, role="policy") critic_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions}, role="critic_1") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) self.target_policy.update_parameters(self.policy, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if not self._critic_update_counter % self._policy_delay: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
20,883
Python
48.138823
153
0.612316
Toni-SM/skrl/skrl/agents/torch/td3/td3_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl import logger from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TD3_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "policy_delay": 2, # policy delay update with respect to critic update "smooth_regularization_noise": None, # smooth noise for regularization "smooth_regularization_clip": 0.5, # clip for smooth regularization "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TD3_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Twin Delayed DDPG (TD3) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1802.09477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TD3_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._policy_delay = self.cfg["policy_delay"] self._critic_update_counter = 0 self._smooth_regularization_noise = self.cfg["smooth_regularization_noise"] self._smooth_regularization_clip = self.cfg["smooth_regularization_clip"] if self._smooth_regularization_noise is None: logger.warning("agents:TD3: No smooth regularization noise specified to reduce variance during training") self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": []} self._rnn_initial_states = {"policy": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) # update RNN states if self._rnn: # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: sampled_rnn = self.memory.sample_by_index(names=self._rnn_tensors_names, indexes=self.memory.get_sampling_indexes())[0] rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn], "terminated": sampled_dones} # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) with torch.no_grad(): # target policy smoothing next_actions, _, _ = self.target_policy.act({"states": sampled_next_states, **rnn_policy}, role="target_policy") if self._smooth_regularization_noise is not None: noises = torch.clamp(self._smooth_regularization_noise.sample(next_actions.shape), min=-self._smooth_regularization_clip, max=self._smooth_regularization_clip) next_actions.add_(noises) next_actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # compute target values target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_2") critic_loss = F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # delayed update self._critic_update_counter += 1 if not self._critic_update_counter % self._policy_delay: # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states, **rnn_policy}, role="policy") critic_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic_1") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) self.target_policy.update_parameters(self.policy, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if not self._critic_update_counter % self._policy_delay: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
23,172
Python
48.620985
167
0.60996
Toni-SM/skrl/skrl/agents/torch/ddpg/ddpg.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DDPG_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DDPG(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Deep Deterministic Policy Gradient (DDPG) https://arxiv.org/abs/1509.02971 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DDPG_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic = self.models.get("critic", None) self.target_critic = self.models.get("target_critic", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic"] = self.critic self.checkpoint_modules["target_critic"] = self.target_critic if self.target_policy is not None and self.target_critic is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic.update_parameters(self.critic, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy") target_q_values, _, _ = self.target_critic.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic") target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic") critic_loss = F.mse_loss(critic_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.critic.parameters(), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states}, role="policy") critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": actions}, role="critic") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_policy.update_parameters(self.policy, polyak=self._polyak) self.target_critic.update_parameters(self.critic, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
17,961
Python
45.776042
148
0.613941
Toni-SM/skrl/skrl/agents/torch/ddpg/__init__.py
from skrl.agents.torch.ddpg.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.agents.torch.ddpg.ddpg_rnn import DDPG_RNN
119
Python
38.999987
65
0.815126
Toni-SM/skrl/skrl/agents/torch/ddpg/ddpg_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DDPG_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DDPG_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Deep Deterministic Policy Gradient (DDPG) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1509.02971 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DDPG_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic = self.models.get("critic", None) self.target_critic = self.models.get("target_critic", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic"] = self.critic self.checkpoint_modules["target_critic"] = self.target_critic if self.target_policy is not None and self.target_critic is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic.update_parameters(self.critic, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": []} self._rnn_initial_states = {"policy": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) # update RNN states if self._rnn: # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: sampled_rnn = self.memory.sample_by_index(names=self._rnn_tensors_names, indexes=self.memory.get_sampling_indexes())[0] rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn], "terminated": sampled_dones} # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, _, _ = self.target_policy.act({"states": sampled_next_states, **rnn_policy}, role="target_policy") target_q_values, _, _ = self.target_critic.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic") target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic") critic_loss = F.mse_loss(critic_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.critic.parameters(), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states, **rnn_policy}, role="policy") critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_policy.update_parameters(self.policy, polyak=self._polyak) self.target_critic.update_parameters(self.critic, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
20,222
Python
46.471831
162
0.611018
Toni-SM/skrl/skrl/agents/torch/dqn/dqn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import math import gym import gymnasium import torch import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DQN_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "update_interval": 1, # agent update interval "target_update_interval": 10, # target network update interval "exploration": { "initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration "final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration "timesteps": 1000, # timesteps for epsilon-greedy decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DQN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Deep Q-Network (DQN) https://arxiv.org/abs/1312.5602 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DQN_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.q_network = self.models.get("q_network", None) self.target_q_network = self.models.get("target_q_network", None) # checkpoint models self.checkpoint_modules["q_network"] = self.q_network self.checkpoint_modules["target_q_network"] = self.target_q_network if self.target_q_network is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_q_network.freeze_parameters(True) # update target networks (hard update) self.target_q_network.update_parameters(self.q_network, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._update_interval = self.cfg["update_interval"] self._target_update_interval = self.cfg["target_update_interval"] self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"] self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizer and learning rate scheduler if self.q_network is not None: self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.int64) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ states = self._state_preprocessor(states) if not self._exploration_timesteps: return torch.argmax(self.q_network.act({"states": states}, role="q_network")[0], dim=1, keepdim=True), None, None # sample random actions actions = self.q_network.random_act({"states": states}, role="q_network")[0] if timestep < self._random_timesteps: return actions, None, None # sample actions with epsilon-greedy policy epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \ * math.exp(-1.0 * timestep / self._exploration_timesteps) indexes = (torch.rand(states.shape[0], device=self.device) >= epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.argmax(self.q_network.act({"states": states[indexes]}, role="q_network")[0], dim=1, keepdim=True) # record epsilon self.track_data("Exploration / Exploration epsilon", epsilon) return actions, None, None def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts and not timestep % self._update_interval: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network") target_q_values = torch.max(next_q_values, dim=-1, keepdim=True)[0] target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute Q-network loss q_values = torch.gather(self.q_network.act({"states": sampled_states}, role="q_network")[0], dim=1, index=sampled_actions.long()) q_network_loss = F.mse_loss(q_values, target_values) # optimize Q-network self.optimizer.zero_grad() q_network_loss.backward() self.optimizer.step() # update target network if not timestep % self._target_update_interval: self.target_q_network.update_parameters(self.q_network, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Q-network loss", q_network_loss.item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
14,654
Python
44.092308
134
0.617101
Toni-SM/skrl/skrl/agents/torch/dqn/__init__.py
from skrl.agents.torch.dqn.ddqn import DDQN, DDQN_DEFAULT_CONFIG from skrl.agents.torch.dqn.dqn import DQN, DQN_DEFAULT_CONFIG
127
Python
41.666653
64
0.811024
Toni-SM/skrl/skrl/agents/torch/dqn/ddqn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import math import gym import gymnasium import torch import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DDQN_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "update_interval": 1, # agent update interval "target_update_interval": 10, # target network update interval "exploration": { "initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration "final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration "timesteps": 1000, # timesteps for epsilon-greedy decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DDQN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Double Deep Q-Network (DDQN) https://ojs.aaai.org/index.php/AAAI/article/view/10295 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DDQN_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.q_network = self.models.get("q_network", None) self.target_q_network = self.models.get("target_q_network", None) # checkpoint models self.checkpoint_modules["q_network"] = self.q_network self.checkpoint_modules["target_q_network"] = self.target_q_network if self.target_q_network is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_q_network.freeze_parameters(True) # update target networks (hard update) self.target_q_network.update_parameters(self.q_network, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._update_interval = self.cfg["update_interval"] self._target_update_interval = self.cfg["target_update_interval"] self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"] self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizer and learning rate scheduler if self.q_network is not None: self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.int64) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ states = self._state_preprocessor(states) if not self._exploration_timesteps: return torch.argmax(self.q_network.act({"states": states}, role="q_network")[0], dim=1, keepdim=True), None, None # sample random actions actions = self.q_network.random_act({"states": states}, role="q_network")[0] if timestep < self._random_timesteps: return actions, None, None # sample actions with epsilon-greedy policy epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \ * math.exp(-1.0 * timestep / self._exploration_timesteps) indexes = (torch.rand(states.shape[0], device=self.device) >= epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.argmax(self.q_network.act({"states": states[indexes]}, role="q_network")[0], dim=1, keepdim=True) # record epsilon self.track_data("Exploration / Exploration epsilon", epsilon) return actions, None, None def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts and not timestep % self._update_interval: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network") target_q_values = torch.gather(next_q_values, dim=1, index=torch.argmax(self.q_network.act({"states": sampled_next_states}, \ role="q_network")[0], dim=1, keepdim=True)) target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute Q-network loss q_values = torch.gather(self.q_network.act({"states": sampled_states}, role="q_network")[0], dim=1, index=sampled_actions.long()) q_network_loss = F.mse_loss(q_values, target_values) # optimize Q-network self.optimizer.zero_grad() q_network_loss.backward() self.optimizer.step() # update target network if not timestep % self._target_update_interval: self.target_q_network.update_parameters(self.q_network, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Q-network loss", q_network_loss.item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
14,774
Python
44.461538
141
0.618993
Toni-SM/skrl/skrl/agents/torch/sarsa/__init__.py
from skrl.agents.torch.sarsa.sarsa import SARSA, SARSA_DEFAULT_CONFIG
70
Python
34.499983
69
0.828571
Toni-SM/skrl/skrl/agents/torch/sarsa/sarsa.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] SARSA_DEFAULT_CONFIG = { "discount_factor": 0.99, # discount factor (gamma) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "learning_rate": 0.5, # learning rate (alpha) "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class SARSA(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """State Action Reward State Action (SARSA) https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.17.2539 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(SARSA_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration self._discount_factor = self.cfg["discount_factor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._learning_rate = self.cfg["learning_rate"] self._rewards_shaper = self.cfg["rewards_shaper"] # create temporary variables needed for storage and computation self._current_states = None self._current_actions = None self._current_rewards = None self._current_next_states = None self._current_dones = None def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample actions from policy return self.policy.act({"states": states}, role="policy") def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self._current_states = states self._current_actions = actions self._current_rewards = rewards self._current_next_states = next_states self._current_dones = terminated + truncated if self.memory is not None: self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ q_table = self.policy.table() env_ids = torch.arange(self._current_rewards.shape[0]).view(-1, 1) # compute next actions next_actions = self.policy.act({"states": self._current_next_states}, role="policy")[0] # update Q-table q_table[env_ids, self._current_states, self._current_actions] += self._learning_rate \ * (self._current_rewards + self._discount_factor * self._current_dones.logical_not() \ * q_table[env_ids, self._current_next_states, next_actions] \ - q_table[env_ids, self._current_states, self._current_actions])
9,185
Python
40.754545
123
0.609145
Toni-SM/skrl/skrl/agents/torch/a2c/a2c.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] A2C_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "mini_batches": 1, # number of mini batches to use for updating "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "entropy_loss_scale": 0.0, # entropy loss scaling factor "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class A2C(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Advantage Actor Critic (A2C) https://arxiv.org/abs/1602.01783 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(A2C_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names = ["states", "actions", "log_prob", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=True) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute approximate KL divergence for KLAdaptive learning rate scheduler if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss policy_loss = -(sampled_advantages * next_log_prob).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value") value_loss = F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / len(sampled_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / len(sampled_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / len(sampled_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
19,522
Python
44.93647
134
0.604702
Toni-SM/skrl/skrl/agents/torch/a2c/a2c_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] A2C_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "mini_batches": 1, # number of mini batches to use for updating "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "entropy_loss_scale": 0.0, # entropy loss scaling factor "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class A2C_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Advantage Actor Critic (A2C) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1602.01783 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(A2C_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names = ["states", "actions", "terminated", "log_prob", "returns", "advantages"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_policy, rnn_value = {}, {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 kl_divergences = [] # mini-batches loop for i, (sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_returns, sampled_advantages) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} rnn_value = rnn_policy else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=True) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="policy") # compute approximate KL divergence for KLAdaptive learning rate scheduler if isinstance(self.scheduler, KLAdaptiveLR): with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss policy_loss = -(sampled_advantages * next_log_prob).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, **rnn_value}, role="value") value_loss = F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / len(sampled_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / len(sampled_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / len(sampled_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
23,813
Python
47.012097
172
0.598119
Toni-SM/skrl/skrl/agents/torch/a2c/__init__.py
from skrl.agents.torch.a2c.a2c import A2C, A2C_DEFAULT_CONFIG from skrl.agents.torch.a2c.a2c_rnn import A2C_RNN
112
Python
36.666654
61
0.803571
Toni-SM/skrl/skrl/agents/torch/ppo/ppo_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] PPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class PPO_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Proximal Policy Optimization (PPO) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1707.06347 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "terminated", "log_prob", "values", "returns", "advantages"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_policy, rnn_value = {}, {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for i, (sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} rnn_value = rnn_policy else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=not epoch) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, **rnn_value}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip, max=self._value_clip) value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
25,800
Python
48.332696
176
0.592287