file_path
stringlengths
21
224
content
stringlengths
0
80.8M
StanfordVL/OmniGibson/docs/gen_ref_pages.py
"""Generate the code reference pages.""" from pathlib import Path import mkdocs_gen_files nav = mkdocs_gen_files.Nav() src = "omnigibson" for path in sorted(Path(src).rglob("*.py")): # module_path = path.relative_to(src).with_suffix("") # doc_path = path.relative_to(src).with_suffix(".md") # full_doc_path = Path("reference", doc_path) # parts = tuple(module_path.parts) if parts[-1] == "__init__": parts = parts[:-1] doc_path = doc_path.with_name("index.md") full_doc_path = full_doc_path.with_name("index.md") elif parts[-1] == "__main__": continue # print(f"parts: {parts}") if parts == (): continue # parts = (src,) # input(f"parts: {parts}") nav[parts] = doc_path.as_posix() with mkdocs_gen_files.open(full_doc_path, "w") as fd: ident = ".".join(parts) fd.write(f"# {parts[-1]}\n\n::: {ident}") mkdocs_gen_files.set_edit_path(full_doc_path, Path("../../") / path) with mkdocs_gen_files.open("reference/SUMMARY.md", "w") as nav_file: nav_file.writelines(nav.build_literate_nav()) # parts = list(module_path.parts) # if parts[-1] == "__init__": # # parts = parts[:-1] # elif parts[-1] == "__main__": # continue # with mkdocs_gen_files.open(full_doc_path, "w") as fd: # # identifier = ".".join(parts) # # print("::: " + identifier, file=fd) # # mkdocs_gen_files.set_edit_path(full_doc_path, Path("../") / path) #
StanfordVL/OmniGibson/docs/index.md
--- title: Title template: index.html ---
StanfordVL/OmniGibson/docs/modules/scene.md
--- icon: material/home-outline --- # ๐Ÿ  **Scene** Scene are one level higher than objects. A scene consists of multiple objects that interacts with each other. OmniGibson currently supports two types of scenes: - `EmptyScene`: This is an empty scene that can be used to create custom scenes. It does not contain any pre-defined objects. - `InteractiveTraversableScene`: This type of scene are interactive and traversible. It comes with traversable maps that enables robots to perform navigation tasks. Users can choose from the predefined 51 scenes in the OmniGibson dataset. Here's a list of all the `InteractiveTraversableScene` scenes available in OmniGibson: <table markdown="span"> <tr> <td valign="top" width="30%"> **`Beechwood_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Beechwood_0_garden.png" alt="Beechwood_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Beechwood_0_garden.png" alt="Beechwood_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Beechwood_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Beechwood_0_int.png" alt="Beechwood_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Beechwood_0_int.png" alt="Beechwood_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Beechwood_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Beechwood_1_int.png" alt="Beechwood_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Beechwood_1_int.png" alt="Beechwood_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Benevolence_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Benevolence_0_int.png" alt="Benevolence_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Benevolence_0_int.png" alt="Benevolence_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Benevolence_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Benevolence_1_int.png" alt="Benevolence_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Benevolence_1_int.png" alt="Benevolence_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Benevolence_2_int`** </td> <td> <img src="../assets/scenes/birds-eye-views/Benevolence_2_int.png" alt="Benevolence_2_int"> </td> <td> <img src="../assets/scenes/scene-views/Benevolence_2_int.png" alt="Benevolence_2_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Ihlen_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Ihlen_0_int.png" alt="Ihlen_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Ihlen_0_int.png" alt="Ihlen_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Ihlen_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Ihlen_1_int.png" alt="Ihlen_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Ihlen_1_int.png" alt="Ihlen_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Merom_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Merom_0_garden.png" alt="Merom_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Merom_0_garden.png" alt="Merom_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Merom_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Merom_0_int.png" alt="Merom_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Merom_0_int.png" alt="Merom_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Merom_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Merom_1_int.png" alt="Merom_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Merom_1_int.png" alt="Merom_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_0_garden.png" alt="Pomaria_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_0_garden.png" alt="Pomaria_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_0_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_0_int.png" alt="Pomaria_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_0_int.png" alt="Pomaria_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_1_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_1_int.png" alt="Pomaria_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_1_int.png" alt="Pomaria_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Pomaria_2_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Pomaria_2_int.png" alt="Pomaria_2_int"> </td> <td> <img src="../assets/scenes/scene-views/Pomaria_2_int.png" alt="Pomaria_2_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Rs_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Rs_garden.png" alt="Rs_garden"> </td> <td> <img src="../assets/scenes/scene-views/Rs_garden.png" alt="Rs_garden"> </td> </tr> <tr> <td valign="top" width="30%"> **`Rs_int`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Rs_int.png" alt="Rs_int"> </td> <td> <img src="../assets/scenes/scene-views/Rs_int.png" alt="Rs_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`Wainscott_0_garden`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Wainscott_0_garden.png" alt="Wainscott_0_garden"> </td> <td> <img src="../assets/scenes/scene-views/Wainscott_0_garden.png" alt="Wainscott_0_garden"> </td> </tr> <tr> <td valign="top" width="30%"> [**`Wainscott_0_int`**](../reference/scene/Wainscott_0_int.html)<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Wainscott_0_int.png" alt="Wainscott_0_int"> </td> <td> <img src="../assets/scenes/scene-views/Wainscott_0_int.png" alt="Wainscott_0_int"> </td> </tr> <tr> <td valign="top" width="30%"> [**`Wainscott_1_int`**](../reference/scene/Wainscott_1_int.html)<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/Wainscott_1_int.png" alt="Wainscott_1_int"> </td> <td> <img src="../assets/scenes/scene-views/Wainscott_1_int.png" alt="Wainscott_1_int"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_asian`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_asian.png" alt="grocery_store_asian"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_asian.png" alt="grocery_store_asian"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_cafe`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_cafe.png" alt="grocery_store_cafe"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_cafe.png" alt="grocery_store_cafe"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_convenience`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_convenience.png" alt="grocery_store_convenience"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_convenience.png" alt="grocery_store_convenience"> </td> </tr> <tr> <td valign="top" width="30%"> **`grocery_store_half_stocked`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/grocery_store_half_stocked.png" alt="grocery_store_half_stocked"> </td> <td> <img src="../assets/scenes/scene-views/grocery_store_half_stocked.png" alt="grocery_store_half_stocked"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_arch_wood`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_arch_wood.png" alt="hall_arch_wood"> </td> <td> <img src="../assets/scenes/scene-views/hall_arch_wood.png" alt="hall_arch_wood"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_conference_large`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_conference_large.png" alt="hall_conference_large"> </td> <td> <img src="../assets/scenes/scene-views/hall_conference_large.png" alt="hall_conference_large"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_glass_ceiling`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_glass_ceiling.png " alt="hall_glass_ceiling"> </td> <td> <img src="../assets/scenes/scene-views/hall_glass_ceiling.png" alt="hall_glass_ceiling"> </td> </tr> <tr> <td valign="top" width="30%"> **`hall_train_station`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hall_train_station.png" alt="hall_train_station"> </td> <td> <img src="../assets/scenes/scene-views/hall_train_station.png" alt="hall_train_station"> </td> </tr> <tr> <td valign="top" width="30%"> **`hotel_gym_spa`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hotel_gym_spa.png" alt="hotel_gym_spa"> </td> <td> <img src="../assets/scenes/scene-views/hotel_gym_spa.png" alt="hotel_gym_spa"> </td> </tr> <tr> <td valign="top" width="30%"> **`hotel_suite_large`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hotel_suite_large.png" alt="hotel_suite_large"> </td> <td> <img src="../assets/scenes/scene-views/hotel_suite_large.png" alt="hotel_suite_large"> </td> </tr> <tr> <td valign="top" width="30%"> **`hotel_suite_small`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/hotel_suite_small.png" alt="hotel_suite_small"> </td> <td> <img src="../assets/scenes/scene-views/hotel_suite_small.png" alt="hotel_suite_small"> </td> </tr> <tr> <td valign="top" width="30%"> **`house_double_floor_lower`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/house_double_floor_lower.png" alt="house_double_floor_lower"> </td> <td> <img src="../assets/scenes/scene-views/house_double_floor_lower.png" alt="house_double_floor_lower"> </td> </tr> <tr> <td valign="top" width="30%"> **`house_double_floor_upper`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/house_double_floor_upper.png" alt="house_double_floor_upper"> </td> <td> <img src="../assets/scenes/scene-views/house_double_floor_upper.png" alt="house_double_floor_upper"> </td> </tr> <tr> <td valign="top" width="30%"> **`house_single_floor`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/house_single_floor.png" alt="house_single_floor"> </td> <td> <img src="../assets/scenes/scene-views/house_single_floor.png" alt="house_single_floor"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_bike`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_bike.png" alt="office_bike"> </td> <td> <img src="../assets/scenes/scene-views/office_bike.png" alt="office_bike"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_cubicles_left`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_cubicles_left.png" alt="office_cubicles_left"> </td> <td> <img src="../assets/scenes/scene-views/office_cubicles_left.png" alt="office_cubicles_left"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_cubicles_right`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_cubicles_right.png" alt="office_cubicles_right"> </td> <td> <img src="../assets/scenes/scene-views/office_cubicles_right.png" alt="office_cubicles_right"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_large`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_large.png" alt="office_large"> </td> <td> <img src="../assets/scenes/scene-views/office_large.png" alt="office_large"> </td> </tr> <tr> <td valign="top" width="30%"> **`office_vendor_machine`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/office_vendor_machine.png" alt="office_vendor_machine"> </td> <td> <img src="../assets/scenes/scene-views/office_vendor_machine.png" alt="office_vendor_machine"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_asian`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_asian.png" alt="restaurant_asian"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_asian.png" alt="restaurant_asian"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_brunch`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_brunch.png" alt="restaurant_brunch"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_brunch.png" alt="restaurant_brunch"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_cafeteria`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_cafeteria.png" alt="restaurant_cafeteria"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_cafeteria.png" alt="restaurant_cafeteria"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_diner`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_diner.png" alt="restaurant_diner"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_diner.png" alt="restaurant_diner"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_hotel`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_hotel.png" alt="restaurant_hotel"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_hotel.png" alt="restaurant_hotel"> </td> </tr> <tr> <td valign="top" width="30%"> **`restaurant_urban`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/restaurant_urban.png" alt="restaurant_urban"> </td> <td> <img src="../assets/scenes/scene-views/restaurant_urban.png" alt="restaurant_urban"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_biology`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_biology.png" alt="school_biology"> </td> <td> <img src="../assets/scenes/scene-views/school_biology.png" alt="school_biology"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_chemistry`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_chemistry.png" alt="school_chemistry"> </td> <td> <img src="../assets/scenes/scene-views/school_chemistry.png" alt="school_chemistry"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_computer_lab_and_infirmary`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_computer_lab_and_infirmary.png" alt="school_computer_lab_and_infirmary"> </td> <td> <img src="../assets/scenes/scene-views/school_computer_lab_and_infirmary.png" alt="school_computer_lab_and_infirmary"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_geography`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_geography.png" alt="school_geography"> </td> <td> <img src="../assets/scenes/scene-views/school_geography.png" alt="school_geography"> </td> </tr> <tr> <td valign="top" width="30%"> **`school_gym`**<br><br> </td> <td> <img src="../assets/scenes/birds-eye-views/school_gym.png" alt="school_gym"> </td> <td> <img src="../assets/scenes/scene-views/school_gym.png" alt="school_gym"> </td> </tr> </table>
StanfordVL/OmniGibson/docs/modules/controllers.md
--- icon: material/knob --- # ๐ŸŽ›๏ธ **Controllers** ## Description In **`OmniGibson`**, `Controller`s convert high-level actions into low-level joint motor (position, velocity, or effort) controls for a subset of an individual [`Robot`](./robots.md)'s joints. In an [`Environment`](./environment.md) instance, actions are passed to controllers via the `env.step(action)` call, resulting in the following behavior: <div class="annotate" markdown> - When `env.step(action)` is called, actions are parsed and passed to the respective robot owned by the environment (`env.robots`) via `robot.apply_action(action)` - For a given robot, its `action` is parsed and passed to the respective controllers owned by the robot (`robot.controllers`) via `controller.update_goal(command)` - For a given controller, the inputted `command` is preprocessed (re-scaled and shifted) and then converted into an internally tracked `goal` - Each time a physic step occurs (1), all controllers computes and deploys their desired joint controls via `controller.compute_control()` towards reaching their respective `goal`s </div> 1. Note that because environments operate at `action_frequency <= physics_frequency`, this means that a controller may take _multiple_ control steps per single `env.step(action)` call! **`OmniGibson`** supports multiple types of controllers, which are intended to control a specific subset of a robot's set of joints. Some are more general (such as the `JointController`, which can broadly be applied to any part of a robot), while others are more specific to a robot's morphology (such as the `InverseKinematicsController`, which is intended to be used to control a manipulation robot's end-effector pose). It is important to note that a single robot can potentially own multiple controllers. For example, `Turtlebot` only owns a single controller (to control its two-wheeled base), whereas the mobile-manipulator `Fetch` robot owns four (one to control its base, head, trunk + arm, and gripper). This allows for modular action space composition, where fine-grained modification of the action space can be achieved by modifying / swapping out individual controllers. For more information about the specific number of controllers each robot has, please see our [list of robots](./robots.md#models). ## Usage ### Definition Controllers can be specified in the config that is passed to the `Environment` constructor via the `['robots'][i]['controller_config']` key. This is expected to be a nested dictionary, mapping controller name (1) to the desired specific controller configuration. the desired configuration for a single robot to be created. For each individual controller dict, the `name` key is required and specifies the desired controller class. Additional keys can be specified and will be passed directly to the specific controller class constructor. An example of a robot controller configuration is shown below in `.yaml` form: { .annotate } 1. See `robot.controller_order` for the full list of expected controller names for a given robot ??? code "single_fetch_controller_config_example.yaml" ``` yaml linenums="1" robots: - type: Fetch controller_config: base: name: DifferentialDriveController arm_0: name: InverseKinematicsController kv: 2.0 gripper_0: name: MultiFingerGripperController mode: binary camera: name: JointController use_delta_commands: False ``` ### Runtime Usually, actions are passed to robots, parsed, and passed to individual controllers via `env.step(action)` --> `robot.apply_action(action)` --> `controller.update_goal(command)`. However, specific controller commands can be directly deployed with this API outside of the `env.step()` loop. A controller's internal state can be cleared by calling `controller.reset()`, and no-op actions can computed via `compute_no_op_goal`. Relevant properties, such as `control_type`, `control_dim`, `command_dim`, etc. are all queryable at runtime as well. ## Types **`OmniGibson`** currently supports 6 controllers, consisting of 2 general joint controllers, 1 locomotion-specific controller, 2 arm manipulation-specific controllers, and 1 gripper-specific controller. Below, we provide a brief overview of each controller type: ### General Controllers These are general-purpose controllers that are agnostic to a robot's morphology, and therefore can be used on any robot. <table markdown="span"> <tr> <td valign="top"> [**`JointController`**](../reference/controllers/joint_controller.html)<br><br> Directly controls individual joints. Either outputs low-level joint position or velocity controls if `use_impedance=False`, otherwise will internally compensate the desired gains with the robot's mass matrix and output joint effort controls.<br><br> <ul> <li>_Command Dim_: n_joints</li> <li>_Command Description_: desired per-joint `[q_0, q_1, ...q_n]` position / velocity / effort setpoints, which are assumed to be absolute joint values unless `use_delta` is set</li> <li>_Control Dim_: n_joints</li> <li>_Control Type_: position / velocity / effort</li> </ul> </td> </tr> <tr> <td valign="top"> [**`NullJointController`**](../reference/controllers/null_joint_controller.html)<br><br> Directly controls individual joints via an internally stored `default_command`. Inputted commands will be ignored unless `default_command` is updated.<br><br> <ul> <li>_Command Dim_: n_joints</li> <li>_Command Description_: `[q_0, ..., q_n]` N/A </li> <li>_Control Dim_: n_joints</li> <li>_Control Type_: position / velocity / effort</li> </ul> </td> </tr> </table> ### Locomotion Controllers These are controllers specifically meant for robots with navigation capabilities. <table markdown="span" width="100%"> <tr> <td valign="top" width="100%"> [**`DifferentialDriveController`**](../reference/controllers/dd_controller.html)<br><br> Commands 2-wheeled robots by setting linear / angular velocity setpoints and converting them into per-joint velocity control.<br><br> <ul> <li>_Command Dim_: n_joints</li> <li>_Command Description_: desired `[lin_vel, ang_vel]` setpoints </li> <li>_Control Dim_: 2</li> <li>_Control Type_: velocity</li> </ul> </td> </tr> </table> ### Manipulation Arm Controllers These are controllers specifically meant for robots with manipulation capabilities, and are intended to control a robot's end-effector pose <table markdown="span"> <tr> <td valign="top"> [**`InverseKinematicsController`**](../reference/controllers/ik_controller.html)<br><br> Controls a robot's end-effector by iteratively solving inverse kinematics to output a desired joint configuration to reach the desired end effector pose, and then runs an underlying `JointController` to reach the target joint configuration. Multiple modes are available, and dictate both the command dimension and behavior of the controller. `condition_on_current_position` can be set to seed the IK solver with the robot's current joint state, and `use_impedance` can be set if the robot's per-joint inertia should be taken into account when attempting to reach the target joint configuration.<br><br> Note: Orientation convention is axis-angle `[ax,ay,az]` representation, and commands are expressed in the robot base frame unless otherwise noted.<br><br> <ul> <li>_Command Dim_: 3 / 6</li> <li>_Command Description_: desired pose command, depending on `mode`: <ul> <li>`absolute_pose`: 6DOF `[x,y,z,ax,ay,az]` absolute position, absolute orientation</li> <li>`pose_absolute_ori`: 6DOF `[dx,dy,dz,ax,ay,az]` delta position, absolute orientation</li> <li>`pose_delta_ori`: 6DOF `[dx,dy,dz,dax,day,daz]` delta position, delta orientation</li> <li>`position_fixed_ori`: 3DOF `[dx,dy,dz]` delta position, orientation setpoint is kept as fixed initial absolute orientation</li> <li>`position_compliant_ori`: 3DOF `[dx,dy,dz]` delta position, delta orientation setpoint always kept as 0s (so can drift over time)</li> </ul></li> <li>_Control Dim_: n_arm_joints</li> <li>_Control Type_: position / effort</li> </ul> </td> </tr> <tr> <td valign="top"> [**`OperationalSpaceController`**](../reference/controllers/osc_controller.html)<br><br> Controls a robot's end-effector by applying the [operational space control](https://khatib.stanford.edu/publications/pdfs/Khatib_1987_RA.pdf) algorithm to apply per-joint efforts to perturb the robot's end effector with impedances ("force") along all six (x,y,z,ax,ay,az) axes. Unlike `InverseKinematicsController`, this controller is inherently compliant and especially useful for contact-rich tasks or settings where fine-grained forces are required. For robots with >6 arm joints, an additional null command is used as a secondary objective and is defined as joint state `reset_joint_pos`.<br><br> Note: Orientation convention is axis-angle `[ax,ay,az]` representation, and commands are expressed in the robot base frame unless otherwise noted.<br><br> <ul> <li>_Command Dim_: 3 / 6</li> <li>_Command Description_: desired pose command, depending on `mode`: <ul> <li>`absolute_pose`: 6DOF `[x,y,z,ax,ay,az]` absolute position, absolute orientation</li> <li>`pose_absolute_ori`: 6DOF `[dx,dy,dz,ax,ay,az]` delta position, absolute orientation</li> <li>`pose_delta_ori`: 6DOF `[dx,dy,dz,dax,day,daz]` delta position, delta orientation</li> <li>`position_fixed_ori`: 3DOF `[dx,dy,dz]` delta position, orientation setpoint is kept as fixed initial absolute orientation</li> <li>`position_compliant_ori`: 3DOF `[dx,dy,dz]` delta position, delta orientation setpoint always kept as 0s (so can drift over time)</li> </ul></li> <li>_Control Dim_: n_arm_joints</li> <li>_Control Type_: effort</li> </ul> </td> </tr> </table> ### Manipulation Gripper Controllers These are controllers specifically meant for robots with manipulation capabilities, and are intended to control a robot's end-effector gripper <table markdown="span" width="100%"> <tr> <td valign="top" width="100%"> [**`MultiFingerGripperController`**](../reference/controllers/multi_finger_gripper_controller.html)<br><br> Commands a robot's gripper joints, with behavior defined via `mode`. By default, &lt;closed, open&gt; is assumed to correspond to &lt;q_lower_limit, q_upper_limit&gt; for each joint, though this can be manually set via the `closed_qpos` and `open_qpos` arguments.<br><br> <ul> <li>_Command Dim_: 1 / n_gripper_joints</li> <li>_Command Description_: desired gripper command, depending on `mode`: <ul> <li>`binary`: 1DOF `[open / close]` binary command, where &gt;0 corresponds to open unless `inverted` is set, in which case &lt;0 corresponds to open</li> <li>`smooth`: 1DOF `[q]` command, which gets broadcasted across all finger joints</li> <li>`independent`: NDOF `[q_0, ..., q_n]` per-finger joint commands</li> </ul></li> <li>_Control Dim_: n_gripper_joints</li> <li>_Control Type_: position / velocity / effort</li> </ul> </td> </tr> </table>
StanfordVL/OmniGibson/docs/modules/object.md
--- icon: material/food-apple-outline --- # ๐ŸŽ **Object** Objects, such as furnitures, are essential to building manipulation environments. We designed the MujocoObject interfaces to standardize and simplify the procedure for importing 3D models into the scene or procedurally generate new objects. MuJoCo defines models via the MJCF XML format. These MJCF files can either be stored as XML files on disk and loaded into simulator, or be created on-the-fly by code prior to simulation. ## Usage ### Importing Objects Objects can be added to a given `Environment` instance by specifying them in the config that is passed to the environment constructor via the `objects` key. This is expected to be a list of dictionaries, each of which specifies the desired configuration for a single object to be created. For each dict, the `type` key is required and specifies the desired object class, and global `position` and `orientation` (in (x,y,z,w) quaternion form) can also be specified. Additional keys can be specified and will be passed directly to the specific robot class constructor. An example of a robot configuration is shown below in `.yaml` form: ??? code "single_object_config_example.yaml" ``` yaml linenums="1" robots: - type: USDObject name: some_usd_object usd_path: your_path_to_model.usd visual_only: False position: [0, 0, 0] orientation: [0, 0, 0, 1] scale: [0.5, 0.6, 0.7] ``` `OmniGibson` supports 6 types of objects shown as follows: - `ControllableObject`: This class represents objects that can be controller through joint controllers. It is used as the parent class of the robot classes and provide functionalities to apply control actions to the objects. In general, users should not create object of this class, but rather directly spawn the desired robot type in the `robots` section of the config. - `StatefulObject`: This class represents objects that comes with object states. For more information regarding object states please take a look at `object_states`. This is also meant to be a parent class, and should generally not be instantiated directly. - `PrimitiveObject`: This class represents primitive shape objects (Cubes, Spheres, Cones, etc.) This are usually used as visual objects in the scene. For example, users can instantiate a sphere object to visualize the target location of a robot reaching task, and set it's property `visual_only` to true to disable it's kinematics and collision with other objects. - `LightObject`: This class specifically represents lights in the scene, and provide funtionalities to modify the properties of lights. There are 6 types of lights users can instantiate in OmniGibson, cylinder light, disk light, distant light, dome light, geometry ligtht, rectangle light, and sphere light. Users can choose whichever type of light that works for the best, and set the `intensity` property to control the brightness of it. - `USDObject`: This class represents objects loaded through a USD file. This is useful when users want to load a custom USD asset into the simulator. Users should specify the `usd_path` parameter of the `USDObject` in order to load the desired file of their choice. - `DatasetObject`: This class inherits from `USDObject` and represents object from the OmniGibson dataset. Users should specify the category of objects they want to load, as well as the model id, which is a 6 character string unique to each dataset object. For the possible categories and models, please refer to our [Knowledgebase Dashboard](https://behavior.stanford.edu/knowledgebase/) ### Runtime Usually, objects are instantiated upon startup. We can modify certain properties of the object when the simulator is running. For example, one might desire to teleop the object from one place to another, then simply call `object.set_position_orientation(new_pos, new_orn)` will do the job. Another example might be to highlight an object by setting `object.highlighed = True`, the object when then be highlighted in pick in the scene. To access the objects from the environment, one can call `env.scene.object_registry`. Here are a couple examples: - `env.scene.object_registry("name", OBJECT_NAME): get the object by its name - `env.scene.object_registry("category", CATEGORY): get the object by its category - `env.scene.object_registry("prim_path", PRIM_PATH): get the object by its prim path
StanfordVL/OmniGibson/docs/modules/robots.md
--- icon: material/robot-outline --- # ๐Ÿค– **Robots** ## Description In **`OmniGibson`**, `Robot`s define agents that can interact with other objects in a given environment. Each robot can _interact_ by deploying joint commands via its set of [`Controller`](./controllers.md)s, and can _perceive_ its surroundings via its set of [`Sensor`](./sensor.md)s. **`OmniGibson`** supports both navigation and manipulation robots, and allows for modular specification of individual controllers for controlling the different components of a given robot. For example, the `Fetch` robot is a mobile manipulator composed of a mobile (two-wheeled) base, two head joints, a trunk, seven arm joints, and two gripper finger joints. `Fetch` owns 4 controllers, one for controlling the base, the head, the trunk + arm, and the gripper. There are multiple options for each controller depending on the desired action space. For more information, check out our [robot examples](../getting_started/examples.md#robots). It is important to note that robots are full-fledged `StatefulObject`s, and thus leverage the same APIs as normal scene objects and can be treated as such. Robots can be thought of as `StatefulObject`s that additionally own controllers (`robot.controllers`) and sensors (`robot.sensors`). ## Usage ### Importing Robots can be added to a given `Environment` instance by specifying them in the config that is passed to the environment constructor via the `robots` key. This is expected to be a list of dictionaries, where each dictionary specifies the desired configuration for a single robot to be created. For each dict, the `type` key is required and specifies the desired robot class, and global `position` and `orientation` (in (x,y,z,w) quaternion form) can also be specified. Additional keys can be specified and will be passed directly to the specific robot class constructor. An example of a robot configuration is shown below in `.yaml` form: ??? code "single_fetch_config_example.yaml" ``` yaml linenums="1" robots: - type: Fetch position: [0, 0, 0] orientation: [0, 0, 0, 1] obs_modalities: [scan, rgb, depth] scale: 1.0 self_collision: false action_normalize: true action_type: continuous grasping_mode: physical rigid_trunk: false default_trunk_offset: 0.365 default_arm_pose: diagonal30 reset_joint_pos: tuck sensor_config: VisionSensor: sensor_kwargs: image_height: 128 image_width: 128 ScanSensor: sensor_kwargs: min_range: 0.05 max_range: 10.0 controller_config: base: name: DifferentialDriveController arm_0: name: InverseKinematicsController kv: 2.0 gripper_0: name: MultiFingerGripperController mode: binary camera: name: JointController use_delta_commands: False ``` ### Runtime Usually, actions are passed to robots and observations retrieved via the `obs, info, reward, done = env.step(action)`. However, actions can be directly deployed and observations retrieved from the robot using the following APIs: <div class="annotate" markdown> - **Applying actions**: `robot.apply_action(action)` (1) - **Retrieving observations**: `obs, info = robot.get_obs()` (2) </div> 1. `action` is a 1D-numpy array. For more information, please see the [Controller](./controllers.md) section! 2. `obs` is a dict mapping observation name to observation data, and `info` is a dict of relevant metadata about the observations. For more information, please see the [Sensor](./sensor.md) section! Controllers and sensors can be accessed directly via the `controllers` and `sensors` properties, respectively. And, like all objects in **`OmniGibson`**, common information such as joint data and object states can also be directly accessed from the `robot` class. ## Models **`OmniGibson`** currently supports 9 robots, consisting of 4 mobile robots, 2 manipulation robots, 2 mobile manipulation robots, and 1 anthropomorphic "robot" (a bimanual agent proxy used for VR teleoperation). Below, we provide a brief overview of each model: ### Mobile Robots These are navigation-only robots (an instance of [`LocomotionRobot`](../reference/robots/locomotion_robot.html)) that solely consist of a base that can move. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`Turtlebot`**](../reference/robots/turtlebot.html)<br><br> The two-wheeled <a href="https://www.turtlebot.com/turtlebot2/">Turtlebot 2</a> model with the Kobuki base.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Turtlebot.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Locobot`**](../reference/robots/locobot.html)<br><br> The two-wheeled, open-source <a href="http://www.locobot.org/">LoCoBot</a> model.<br><br> Note that in our model the arm is disabled and is fixed to the base.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Locobot.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Husky`**](../reference/robots/husky.html)<br><br> The four-wheeled <a href="https://clearpathrobotics.com/husky-unmanned-ground-vehicle-robot/">Husky UAV</a> model from Clearpath Robotics.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Husky.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Freight`**](../reference/robots/freight.html)<br><br> The two-wheeled <a href="https://docs.fetchrobotics.com/">Freight</a> model which serves as the base for the Fetch robot.<br><br> <ul> <li>_Controllers_: Base</li> <li>_Sensors_: Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Freight.png" alt="rgb"> </td> </tr> </table> ### Manipulation Robots These are manipulation-only robots (an instance of [`ManipulationRobot`](../reference/robots/manipulation_robot.html)) that cannot move and solely consist of an actuated arm with a gripper attached to its end effector. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`Franka`**](../reference/robots/franka.html)<br><br> The popular 7-DOF <a href="https://franka.de/">Franka Research 3</a> model equipped with a parallel jaw gripper. Note that OmniGibson also includes two alternative versions of Franka: FrankaAllegro (equipped with an Allegro hand) and FrankaLeap (equipped with a Leap hand).<br><br> <ul> <li>_Controllers_: Arm, Gripper</li> <li>_Sensors_: Wrist Camera</li> </ul> </td> <td> <img src="../assets/robots/FrankaPanda.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`VX300S`**](../reference/robots/vx300s.html)<br><br> The 6-DOF <a href="https://www.trossenrobotics.com/viperx-300-robot-arm-6dof.aspx">ViperX 300 6DOF</a> model from Trossen Robotics equipped with a parallel jaw gripper.<br><br> <ul> <li>_Controllers_: Arm, Gripper</li> <li>_Sensors_: Wrist Camera</li> </ul> </td> <td> <img src="../assets/robots/VX300S.png" alt="rgb"> </td> </tr> </table> ### Mobile Manipulation Robots These are robots that can both navigate and manipulate (and inherit from both [`LocomotionRobot`](../reference/robots/locomotion_robot.html) and [`ManipulationRobot`](../reference/robots/manipulation_robot.html)), and are equipped with both a base that can move as well as one or more gripper-equipped arms that can actuate. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`Fetch`**](../reference/robots/fetch.html)<br><br> The <a href="https://docs.fetchrobotics.com/">Fetch</a> model, composed of a two-wheeled base, linear trunk, 2-DOF head, 7-DOF arm, and 2-DOF parallel jaw gripper.<br><br> <ul> <li>_Controllers_: Base, Head, Arm, Gripper</li> <li>_Sensors_: Head Camera, LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Fetch.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Tiago`**](../reference/robots/tiago.html)<br><br> The bimanual <a href="https://pal-robotics.com/robots/tiago/">Tiago</a> model from PAL robotics, composed of a holonomic base (which we model as a 3-DOF (x,y,rz) set of joints), linear trunk, 2-DOF head, x2 7-DOF arm, and x2 2-DOF parallel jaw grippers.<br><br> <ul> <li>_Controllers_: Base, Head, Left Arm, Right Arm, Left Gripper, Right Gripper</li> <li>_Sensors_: Head Camera, Rear LIDAR, Front LIDAR</li> </ul> </td> <td> <img src="../assets/robots/Tiago.png" alt="rgb"> </td> </tr> </table> ### Additional Robots <table markdown="span"> <tr> <td valign="top" width="60%"> [**`BehaviorRobot`**](../reference/robots/behavior_robot.html#robots.behavior_robot.BehaviorRobot)<br><br> A hand-designed model intended to be used exclusively for VR teleoperation.<br><br> <ul> <li>_Controllers_: Base, Head, Left Arm, Right Arm, Left Gripper, Right Gripper</li> <li>_Sensors_: Head Camera</li> </ul> </td> <td> <img src="../assets/robots/BehaviorRobot.png" alt="rgb"> </td> </tr> </table>
StanfordVL/OmniGibson/docs/modules/object_states.md
--- icon: material/thermometer --- # ๐ŸŒก๏ธ **Object States** ## Description In **`OmniGibson`**, `ObjectState`s define kinematic (such as `OnTop`, or `Inside`) or semantic (such as `Temperature` or `Saturated`) states for a given `StatefulObject`. These states enable finer-grained description of the scene at hand not captured by the raw simulation state (such as object and joint poses). Every `StatefulObject` owns its own dictionary of states `obj.states`, which maps the object state _class type_ to the object state _instance_ owned by the object. Object states have a unified API interface: a getter `state.get_value(...)`, and a setter `state.set_value(...)`. Note that not all object states implement these functions: - Some states such as `Temperature` implement both `get_value()` and `set_value()` as a simple R/W operation, as this is merely an internal variable that is tracked over time. - Other states implement more complex behavior such as `OnTop`, which infers spatial relationships between different objects during `get_value()` and additional samples poses in `set_value()` such that the spatial relationship is true. - Some states such as `NextTo` only implement `get_value()`, since setting these states are non-trivial and unclear to sample. - Finally, `IntrinsicObjectState`s such as `ParticleApplier` (which describes an object that can generate particles, such as a spray bottle) describe an intrinsic semantic property of the object, and therefore do not implement `get_value` nor `set_value`. **`OmniGibson`** supports a wide range of object state types, and provides an extensive example suite showcasing individual object states. For more information, check out our [object state examples](../getting_started/examples.md#object-states). !!! info annotate "Object States must be enabled before usage!" To enable usage of object states, `gm.ENABLE_OBJECT_STATES` (1) must be set! 1. Access global macros via `from omnigibson.macros import gm` ## Usage ### Adding Object States Object states are intended to be added when an object is instantiated, during its constructor call via the `abilities` kwarg. This is expected to be a dictionary mapping ability name to a dictionary of keyword-arguments that dictate the instantiated object state's behavior. Normally, this is simply the keyword-arguments to pass to the specific `ObjectState` constructor, but this can be different. Concretely, the raw values in the `abilities` value dictionary are postprocessed via the specific object state's `postprocess_ability_params` classmethod. This is to allow `abilities` to be fully exportable in .json format, without requiring complex datatypes (which may be required as part of an object state's actual constructor) to be stored. By default, `abilities=None` results in an object's abilities directly being inferred from its `category` kwarg. **`OmniGibson`** leverages a crowdsourced [knowledgebase](https://behavior.stanford.edu/knowledgebase/categories/index.html) to determine what abilities (or "properties" in the knowledgebase) a given entity (called "synset" in the knowledgebase) can have. Every category in **`OmniGibson`**'s asset dataset directly corresponds to a specific synset. By going to the knowledgebase and clicking on the corresponding synset, one can see the annotated abilities (properties) for that given synset, which will be applied to the object being created. Alternatively, you can programmatically observe which abilities, with the exact default kwargs, correspond to a given category via: ```python3 from omnigibson.utils.bddl_utils import OBJECT_TAXONOMY category = "apple" # or any other category synset = OBJECT_TAXONOMY.get_synset_from_category(category) abilities = OBJECT_TAXONOMY.get_abilities(synset) ``` ??? warning annotate "Not all object states are guaranteed to be created!" Some object states (such as `ParticleApplier` or `ToggledOn`) potentially require specific metadata to be defined for a given object model before the object state can be created. For example, `ToggledOn` represents a pressable virtual button, and requires this button to be defined a-priori in the raw object asset before it is imported. When parsing the `abilities` dictionary, each object state runs a compatibilty check via `state.is_compatible(obj, **kwargs)` before it is created, where `**kwargs` define any relevant keyword arguments that would be passed to the object state constructor. If the check fails, then the object state is **_not_** created! ### Runtime As mentioned earlier, object states can be potentially read from via `get_state(...)` or written to via `set_state(...)`. The possibility of reading / writing, as well as the arguments expected and return value expected depends on the specific object state class type. For example, object states that inherit the `BooleanStateMixin` class expect `get_state(...)` to return and `set_state(...)` to receive a boolean. `AbsoluteObjectState`s are agnostic to any other object in the scene, and so `get_state()` takes no arguments. In contrast, `RelativeObjectState`s are computed with respect to another object, and so require `other_obj` to be passed into the getter and setter, e.g., `get_state(other_obj)` and `set_state(other_obj, ...)`. A `ValueError` will be raised if a `get_state(...)` or `set_state(...)` is called on an object that does not support that functionality. If `set_state()` is called and is successful, it will return `True`, otherwise, it will return `False`. For more information on specific object state types' behaviors, please see [Object State Types](#object-state-types). It is important to note that object states are usually queried / computed _on demand_ and immediately cached until its value becomes stale (usually the immediately proceeding simulation step). This is done for efficiency reasons, and also means that object states are usually not automatically updated per-step unless absolutely necessary (1). Calling `state.clear_cache()` forces a clearing of an object state's internal cache. { .annotate } ## Object State Types **`OmniGibson`** currently supports 34 object states, consisting of 19 `AbsoluteObjectState`s, 11 `RelativeObjectState`s, and 4 `InstrinsicObjectState`s. Below, we provide a brief overview of each type: ### `AbsoluteObjectState` These are object states that are agnostic to other objects in a given scene. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`AABB`**](../reference/object_states/aabb.html)<br><br> The axis-aligned bounding box (AABB) of the object in the world frame.<br><br> <ul> <li>`get_value()`: returns `aabb_min`, `aabb_max`</li> <li>`set_value()`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/AABB.png" alt="AABB"> </td> </tr> <tr> <td valign="top" width="60%"> [**`VerticalAdjacency`** / **`HorizontalAdjacency`**](../reference/object_states/adjacency.html)<br><br> The nearby objects that are considered adjacent to the object, either in the +/- global Z axis or +/- global XY plane.<br><br> <ul> <li>`get_value()`: returns `AxisAdjacencyList`, a namedtuple with `positive_neighbors` and `negative_neighbors` each of which are lists of nearby objects</li> <li>`set_value()`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/Adjacency.png" alt="Adjacency"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Burnt`**](../reference/object_states/burnt.html)<br><br> Whether the object is considered burnt or not. Note that if `True`, this object's visual appearance will also change accordingly. This corresponds to an object hitting some `MaxTemperature` threshold over the course of its lifetime.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`</li> </ul> </td> <td> <img src="../assets/object_states/Burnt.png" alt="burnt"> </td> </tr> <tr> <td valign="top" width="60%"> [**`ContactBodies`**](../reference/object_states/contact_bodies.html)<br><br> The nearby rigid bodies that this object is currently in contact with.<br><br> <ul> <li>`get_value(ignore_objs=None)`: returns `rigid_prims`, a set of `RigidPrim`s the object is in contact with, optionally with `ignore_objs` filtered from the set</li> <li>`set_value(new_value)`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/ContactBodies.png" alt="contact_bodies"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Cooked`**](../reference/object_states/cooked.html)<br><br> Whether the object is considered cooked or not. Note that if `True`, this object's visual appearance will also change accordingly. This corresponds to an object hitting some `MaxTemperature` threshold over the course of its lifetime.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`</li> </ul> </td> <td> <img src="../assets/object_states/Cooked.png" alt="cooked"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Folded`** / **`Unfolded`**](../reference/object_states/folded.html)<br><br> A cloth-specific state. Determines whether a cloth object is sufficiently un / folded or not. This is inferred as a function of its overall smoothness, total area to current area ratio, and total diagonal to current diagonal ratio.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: Can only set `unfolded.set_value(True)`. All others are not supported.</li> </ul> </td> <td> <img src="../assets/object_states/Folded.png" alt="folded"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Frozen`**](../reference/object_states/frozen.html)<br><br> Whether the object is considered frozen or not. Note that if `True`, this object's visual appearance will also change accordingly. This corresponds to an object's `Temperature` value being under some threshold at the current timestep.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`</li> </ul> </td> <td> <img src="../assets/object_states/Frozen.png" alt="frozen"> </td> </tr> <tr> <td valign="top" width="60%"> [**`HeatSourceOrSink`**](../reference/object_states/heat_source_or_sink.html)<br><br> Defines a heat source or sink which raises / lowers the temperature of nearby objects, if enabled. Use `state.affects_obj(obj)` to check whether the given heat source / sink is currently impacting `obj`'s temperature.<br><br> <ul> <li>`get_value()`: returns `True / False` (whether the source / sink is enabled or not)</li> <li>`set_value(new_value)`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/HeatSourceOrSink.png" alt="heat_source_or_sink"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Heated`**](../reference/object_states/heated.html)<br><br> Whether the object is considered heated or not. Note that if `True`, this object's visual appearance will also change accordingly with steam actively coming off of the object. This corresponds to an object's `Temperature` value being above some threshold at the current timestep.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`</li> </ul> </td> <td> <img src="../assets/object_states/Heated.png" alt="heated"> </td> </tr> <tr> <td valign="top" width="60%"> [**`MaxTemperature`**](../reference/object_states/max_temperature.html)<br><br> The object's max temperature over the course of its lifetime. This value gets automatically updated every simulation step and can be affected by nearby `HeatSourceOrSink`-enabled objects.<br><br> <ul> <li>`get_value()`: returns `float`</li> <li>`set_value(new_value)`: expects `float`</li> </ul> </td> <td> <img src="../assets/object_states/MaxTemperature.png" alt="max_temperature"> </td> </tr> <tr> <td valign="top" width="60%"> [**`OnFire`**](../reference/object_states/on_fire.html)<br><br> Whether the object is lit on fire or not. Note that if `True`, this object's visual appearance will also change accordingly with fire actively coming off of the object. This corresponds to an object's `Temperature` value being above some threshold at the current timestep. Note that if `True`, this object becomes an active `HeatSourceOrSink`-enabled object that will raise the temperature of nearby objects.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`</li> </ul> </td> <td> <img src="../assets/object_states/OnFire.png" alt="on_fire"> </td> </tr> <tr> <td valign="top" width="60%"> [**`ObjectsInFOVOfRobot`**](../reference/object_states/objects_in_fov_of_robot.html)<br><br> A robot-specific state. Comptues the list of objects that are currently in the robot's field of view.<br><br> <ul> <li>`get_value()`: returns `obj_list`, the list of `BaseObject`s</li> <li>`set_value(new_value)`: Not supported</li> </ul> </td> <td> <img src="../assets/object_states/ObjectsInFOVOfRobot.png" alt="objects_in_fov_of_robot"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Open`**](../reference/object_states/open.html)<br><br> Whether the object's joint is considered open or not. This corresponds to at least one joint being above some threshold from its pre-defined annotated closed state.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`, randomly sampling a valid open / not open configuration unless `fully` is set</li> </ul> </td> <td> <img src="../assets/object_states/Open.png" alt="open"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Pose`**](../reference/object_states/pose.html)<br><br> The object's current (position, orientation) expressed in (cartesian, quaternion) form in the global frame.<br><br> <ul> <li>`get_value()`: returns (`pos`, `quat`), with quat in (x,y,z,w) form</li> <li>`set_value(new_value)`: Not supported. Use `obj.set_position_orientation()` to directly modify an object's pose.</li> </ul> </td> <td> <img src="../assets/object_states/Pose.png" alt="pose"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Temperature`**](../reference/object_states/temperature.html)<br><br> The object's current temperature. This value gets automatically updated every simulation step and can be affected by nearby `HeatSourceOrSink`-enabled objects.<br><br> <ul> <li>`get_value()`: returns `float`</li> <li>`set_value(new_value)`: expects `float`</li> </ul> </td> <td> <img src="../assets/object_states/Temperature.png" alt="temperature"> </td> </tr> <tr> <td valign="top" width="60%"> [**`ToggledOn`**](../reference/object_states/toggled_on.html)<br><br> A virtual button that can be "pressed" by a robot's end-effector. Doing so will result in the state being toggled between `True` and `False`, and also corresponds to a visual change in the virtual button's appearance.<br><br> <ul> <li>`get_value()`: returns `True / False`</li> <li>`set_value(new_value)`: expects `True / False`</li> </ul> </td> <td> <img src="../assets/object_states/ToggledOn.png" alt="toggled_on"> </td> </tr> </table> ### `RelativeObjectState` These are object states that are computed with respect to other entities in the given scene, and therefore, both the `get_state(...)` and `set_state(...)` take in additional arguments. <table markdown=span> <tr> <td valign="top" width="60%"> [**`AttachedTo`**](../reference/object_states/attached_to.html)<br><br> Defines a rigid or flexible connection between this object and another object (parent). At any given moment, this object can only be attached to at most one parent, but the reverse is not true. That is, a parent can have multiple children, but a child can only have one parent. An attachment is triggered and created when the this object makes contact with a compatible parent and is aligned correctly.<br><br> <ul> <li>`get_value(other)`: returns `True / False`, whether this object is attached to `other`</li> <li>`set_value(other, new_value, bypass_alignment_checking=False)`: expects `True / False`, and optionally bypasses checking for object alignment with `other` if `bypass_alignment_checking` is set</li> </ul> </td> <td> <img src="../assets/object_states/AttachedTo.png" alt="attached_to"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Contains`**](../reference/object_states/contains.html)<br><br> Defines whether this object currently contains any quantity of a specific particle system. Note that this state requires that a container virtual volume be pre-annotated in the underlying object asset for it to be created. Particles are considered contained if their position lies within the annotated volume.<br><br> <ul> <li>`get_value(system)`: returns `True / False`</li> <li>`set_value(system, new_value)`: Only supported for `new_value=False`, which will remove all contained particles</li> </ul> </td> <td> <img src="../assets/object_states/Contains.png" alt="contains"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Covered`**](../reference/object_states/covered.html)<br><br> Defines whether this object is currently covered by a specific particle system. This corresponds to checking whether the number of particles either touching or attached to this object surpasses some minimum threshold.<br><br> <ul> <li>`get_value(system)`: returns `True / False`</li> <li>`set_value(system, new_value)`: If `True`, will sample particles from `system` on this object, otherwise, will remove all particles from `system` covering this object</li> </ul> </td> <td> <img src="../assets/object_states/Covered.png" alt="covered"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Draped`**](../reference/object_states/draped.html)<br><br> A cloth-specific state. Defines whether this cloth object is fully covering `other`, e.g., a tablecloth draped over a table. This object is considered draped if it is touching `other` and its center of mass is below the average position of the contact points.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Only supports `True`, which will try to sample this cloth object on top of `other` such that `draped.get_value(other)=True`</li> </ul> </td> <td> <img src="../assets/object_states/Draped.png" alt="draped"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Filled`**](../reference/object_states/filled.html)<br><br> Defines whether this object is currently filled with a specific particle system. Note that this state requires that a container virtual volume be pre-annotated in the underlying object asset for it to be created. This state corresponds to checking whether the total volume of contained particles surpasses some minimum relative ratio with respect to its total annotated container volume.<br><br> <ul> <li>`get_value(system)`: returns `True / False`</li> <li>`set_value(system, new_value)`: If `True`, will sample particles from `system` to fill the container volume, otherwise, will remove all particles from `system` contained within this object</li> </ul> </td> <td> <img src="../assets/object_states/Filled.png" alt="filled"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Inside`**](../reference/object_states/inside.html)<br><br> Defines whether this object is considered inside of `other`. This does raycasting in all axes (x,y,z), and checks to make sure that rays shot in at least two of these axes hit `other`.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Only supported for `True`, which will sample poses for this object such that `get_value(other)=True`</li> </ul> </td> <td> <img src="../assets/object_states/Inside.png" alt="inside"> </td> </tr> <tr> <td valign="top" width="60%"> [**`IsGrasping`**](../reference/object_states/robot_related_states.html)<br><br> A robot-specific state. Determines whether this robot is currently grasping `other`.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/IsGrasping.png" alt="is_grasping"> </td> </tr> <tr> <td valign="top" width="60%"> [**`NextTo`**](../reference/object_states/next_to.html)<br><br> Defines whether this object is considered next to `other`. This checks to make sure this object is relatively close to `other` and that `other` is in either of this object's `HorizontalAdjacency` neighbor lists.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/NextTo.png" alt="next_to"> </td> </tr> <tr> <td valign="top" width="60%"> [**`OnTop`**](../reference/object_states/on_top.html)<br><br> Defines whether this object is considered on top of `other`. This checks to make sure that this object is touching `other` and that `other` is in this object's `VerticalAdjacency` `negative_neighbors` list.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Only supported for `True`, which will sample poses for this object such that `get_value(other)=True`</li> </ul> </td> <td> <img src="../assets/object_states/OnTop.png" alt="on_top"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Overlaid`**](../reference/object_states/overlaid.html)<br><br> A cloth-specific state. Defines whether this object is overlaid over `other`, e.g., a t-shirt overlaid over a table. This checks to make sure that the ratio of this cloth object's XY-projection of its convex hull to `other`'s XY-area of its bounding box surpasses some threshold.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Only supports `True`, which will try to sample this cloth object on top of `other` such that `overlaid.get_value(other)=True`</li> </ul> </td> <td> <img src="../assets/object_states/Overlaid.png" alt="overlaid"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Saturated`**](../reference/object_states/saturated.html)<br><br> Defines whether this object has reached the maximum with respect to a specific particle system, e.g., a sponge fully absorbed with water, or a spray bottle fully emptied of cleaner fluid. This keeps a reference to this object's modified particle count for `system`, and checks whether the current value surpasses a desired limit. Specific limits can be queried via `get_limit(system)` and set via `set_limit(system, limit)`. Note that if `True`, this object's visual appearance will also change accordingly. <br><br> <ul> <li>`get_value(system)`: returns `True / False`</li> <li>`set_value(system, new_value)`: If `True`, will set the internal modified particle count to exactly to the limit, otherwise, will set to 0.</li> </ul> </td> <td> <img src="../assets/object_states/Saturated.png" alt="saturated"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Touching`**](../reference/object_states/touching.html)<br><br> Defines whether this object is in contact with `other`.<br><br> <ul> <li>`get_value(system)`: returns `True / False`</li> <li>`set_value(system, new_value)`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/Touching.png" alt="touching"> </td> </tr> <tr> <td valign="top" width="60%"> [**`Under`**](../reference/object_states/under.html)<br><br> Defines whether this object is considered under `other`. This checks to make sure that this object is touching `other` and that `other` is in this object's `VerticalAdjacency` `positive_neighbors` list.<br><br> <ul> <li>`get_value(other)`: returns `True / False`</li> <li>`set_value(other, new_value)`: Only supported for `True`, which will sample poses for this object such that `get_value(other)=True`</li> </ul> </td> <td> <img src="../assets/object_states/Under.png" alt="under"> </td> </tr> </table> ### `IntrinsicObjectState` These are object states that that define intrinsic properties of the object and therefore do not implement `get_state(...)` nor `set_state(...)`. <table markdown="span"> <tr> <td valign="top" width="60%"> [**`ParticleApplier` / `ParticleRemover`**](../reference/object_states/particle_modifier.html)<br><br> Defines an object that has the ability to apply (spawn) or remove (absorb) particles from specific particle systems. This state's `conditions` property defines the per-particle system requirements in order for the applier / remover to be active for that specific system. For example, a spray bottle that is a `ParticleApplier` may require `toggled_on.get_value()` to be `True` in order to allow `cleaning_fluid` particles to be sprayed, simulating a "press" of the nozzle trigger. The `method` flag in the constructor determines the applier / removal behavior, which is triggered **_only_** by direct contact with the object (`ParticleModifyMethod.ADJACENCY`) or contact with a virtual volume (`ParticleModifyMethod.PROJECTION`). The former captures objects such as sponges, while the latter captures objects such as vacuum cleaners or spray bottles. This object state is updated at each simulation step such that particles are automatically added / removed as needed.<br><br> <ul> <li>`get_value()`: Not supported.</li> <li>`set_value()`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/ParticleRemover.png" alt="particle_remover"> </td> </tr> <tr> <td valign="top" width="60%"> [**`ParticleSource` / `ParticleSink`**](../reference/object_states/particle_source_or_sink.html)<br><br> Defines an object that has the ability to apply (spawn) or remove (absorb) particles from specific particle systems. The behavior is nearly identical to **`ParticleApplier` / `ParticleRemover`**, with the exception that contact is not strictly necessary to add / remove particles. This is to provide the distinction between, e.g., a particle _source_ such as a sink, which always spawns water every timestep irregardless of whether its faucet volume is in contact with a surface, vs. a particle _applier_ such as a spray bottle, which (for efficiency reasons) only spawns water if its virtual spray cone is overlapping with a surface.<br><br> <ul> <li>`get_value()`: Not supported.</li> <li>`set_value()`: Not supported.</li> </ul> </td> <td> <img src="../assets/object_states/ParticleSource.png" alt="particle_source"> </td> </tr> </table>
StanfordVL/OmniGibson/docs/modules/prim.md
--- icon: material/cube-outline --- # ๐Ÿงฑ **Prim** A Prim, short for "primitive," is a fundamental building block of a scene, representing an individual object or entity within the scene's hierarchy. It is essentially a container that encapsulates data, attributes, and relationships, allowing it to represent various scene components like models, cameras, lights, or groups of prims. These prims are systematically organized into a hierarchical framework, creating a scene graph that depicts the relationships and transformations between objects. Every prim is uniquely identified by a path, which serves as a locator within the scene graph. This path includes the names of all parent prims leading up to it. For example, a prim's path might be `/World/robot0/gripper_link`, indicating that the `gripper_link` is a child of `robot0`. Additionally, prims carry a range of attributes, including position, rotation, scale, and material properties. These attributes define the properties and characteristics of the objects they represent.
StanfordVL/OmniGibson/docs/modules/sensor.md
--- icon: material/camera-outline --- # ๐Ÿ“ท **Sensor** ## Description Sensors play a crucial role in OmniGibson, as they facilitate the robots' observation of their environment. We offer two main classes of sensors: - `ScanSensor`: This includes a 2D LiDAR range sensor and an occupancy grid sensor. - `VisionSensor`: This sensor type features a camera equipped with various modalities, including RGB, depth, normals, three types of segmentation, optical flow, 2D and 3D bounding boxes. ## Usage To obtain sensor readings, the `get_obs()` function can be invoked at multiple levels within our hierarchy: - From `Environment`: Provides 1. All observations from all robots 2. All task-related observations 3. Observations from external sensors, if available - From `Robot`: Provides 1. Readings from all sensors associated with the robot 2. Proprioceptive observations for the robot (e.g., base pose, joint position, joint velocity) - From `Sensor`: Delivers all sensor readings based on the sensor's modalities. Additionally, our API allows for the simulation of real-world sensor behaviors by: 1. Adding noise 2. Dropping out sensor values to emulate missing data in sensor readings Besides the actual data, `get_obs()` also returns a secondary dictionary containing information about the data, such as segmentation labels for vision sensors. For instance, calling `get_obs()` on an environment with a single robot, which has all modalities enabled, might produce results similar to this: <details> <summary>Click to see code!</summary> <pre><code> data: { "robot0": { "robot0:laser_link:Lidar:0": { "scan": np.array(...), "occupancy_grid": np.array(...) }, "robot0:eyes:Camera:0": { "rgb": np.array(...), "depth": np.array(...), "depth_linear": np.array(...), "normal": np.array(...), "flow": np.array(...), "bbox_2d_tight": np.array(...), "bbox_2d_loose": np.array(...), "bbox_3d": np.array(...), "seg_semantic": np.array(...), "seg_instance": np.array(...), "seg_instance_id": np.array(...) }, "proprio": np.array(...) } "task": { "low_dim": np.array(...) } } info: { 'robot0': { 'robot0:laser_link:Lidar:0': {}, 'robot0:eyes:Camera:0': { 'seg_semantic': {'298104422': 'object', '764121901': 'background', '2814990211': 'agent'}, 'seg_instance': {...}, 'seg_instance_id': {...} }, 'proprio': {} } } </code></pre> </details> ## Observations ### Vision Sensor <table markdown="span"> <tr> <td valign="top" width="60%"> <strong>RGB</strong><br><br> RGB image of the scene from the camera perspective.<br><br> Size: (height, width, 4), numpy.uint8<br><br> </td> <td> <img src="../assets/sensor_asset/rgb.png" alt="rgb"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Depth</strong><br><br> Distance between the camera and everything else in the scene.<br><br> Size: (height, width), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/depth.png" alt="Depth Map"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Depth Linear</strong><br><br> Distance between the camera and everything else in the scene, where distance measurement is linearly proportional to the actual distance.<br><br> Size: (height, width), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/depth_linear.png" alt="Depth Map Linear"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Normal</strong><br><br> Surface normals - vectors perpendicular to the surface of objects in the scene.<br><br> Size: (height, width, 4), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/normal.png" alt="Normal"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Semantic Segmentation</strong><br><br> Each pixel is assigned a label, indicating the object category it belongs to (e.g., table, chair).<br><br> Size: (height, width), numpy.uint32<br><br> We also provide a dictionary containing the mapping of semantic IDs to object categories. You can get this here: <br><br> from omnigibson.utils.constants import semantic_class_id_to_name </td> <td> <img src="../assets/sensor_asset/seg_semantic.png" alt="Semantic Segmentation"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Instance Segmentation</strong><br><br> Each pixel is assigned a label, indicating the specific object instance it belongs to (e.g., table1, chair2).<br><br> Size: (height, width), numpy.uint32<br><br> </td> <td> <img src="../assets/sensor_asset/seg_instance.png" alt="Instance Segmentation"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Instance Segmentation ID</strong><br><br> Each pixel is assigned a label, indicating the specific object instance it belongs to (e.g., /World/table1/visuals, /World/chair2/visuals).<br><br> Size: (height, width), numpy.uint32<br><br> </td> <td> <img src="../assets/sensor_asset/seg_instance_id.png" alt="Instance Segmentation ID"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Optical Flow</strong><br><br> Optical flow - motion of pixels belonging to objects caused by the relative motion between the camera and the scene.<br><br> Size: (height, width, 4), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/optical_flow.png" alt="Optical Flow"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>2D Bounding Box Tight</strong><br><br> 2D bounding boxes wrapping individual objects, excluding any parts that are occluded.<br><br> Size: a list of <br> semanticID, numpy.uint32;<br> x_min, numpy.int32;<br> y_min, numpy.int32;<br> x_max, numpy.int32;<br> y_max, numpy.int32;<br> occlusion_ratio, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/bbox_2d_tight.png" alt="2D Bounding Box Tight"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>2D Bounding Box Loose</strong><br><br> 2D bounding boxes wrapping individual objects, including occluded parts.<br><br> Size: a list of <br> semanticID, numpy.uint32;<br> x_min, numpy.int32;<br> y_min, numpy.int32;<br> x_max, numpy.int32;<br> y_max, numpy.int32;<br> occlusion_ratio, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/bbox_2d_loose.png" alt="2D Bounding Box Loose"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>3D Bounding Box</strong><br><br> 3D bounding boxes wrapping individual objects.<br><br> Size: a list of <br> semanticID, numpy.uint32;<br> x_min, numpy.float32;<br> y_min, numpy.float32;<br> z_min, numpy.float32;<br> x_max, numpy.float32;<br> y_max, numpy.float32;<br> z_max, numpy.float32;<br> transform (4x4), numpy.float32;<br> occlusion_ratio, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/bbox_3d.png" alt="3D Bounding Box"> </td> </tr> </table> ### Range Sensor <table markdown="span"> <tr> <td valign="top" width="60%"> <strong>2D LiDAR</strong><br><br> Distances to surrounding objects by emitting laser beams and detecting the reflected light.<br><br> Size: # of rays, numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/lidar.png" alt="2D LiDAR"> </td> </tr> <tr> <td valign="top" width="60%"> <strong>Occupancy Grid</strong><br><br> A representation of the environment as a 2D grid where each cell indicates the presence (or absence) of an obstacle.<br><br> Size: (grid resolution, grid resolution), numpy.float32<br><br> </td> <td> <img src="../assets/sensor_asset/occupancy_grid.png" alt="Occupancy Grid"> </td> </tr> </table> ### Proprioception <table markdown="span"> <tr> <td valign="top" width="100%"> <strong>Joint Positions</strong><br><br> Joint positions.<br><br> Size: # of joints, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Joint Velocities</strong><br><br> Joint velocities.<br><br> Size: # of joints, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Joint Efforts</strong><br><br> Torque measured at each joint.<br><br> Size: # of joints, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Position</strong><br><br> Robot position in the world frame.<br><br> Size: (x, y, z), numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Orientation</strong><br><br> Robot global euler orientation.<br><br> Size: (roll, pitch, yaw), numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td> <strong>Robot 2D Orientation</strong><br><br> Robot orientation on the XY plane of the world frame.<br><br> Size: angle, numpy.float64<br><br> </td> <td> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Linear Velocity</strong><br><br> Robot linear velocity.<br><br> Size: (x_vel, y_vel, z_vel), numpy.float64<br><br> </td> </tr> <tr> <td valign="top" width="100%"> <strong>Robot Angular Velocity</strong><br><br> Robot angular velocity.<br><br> Size: (x_vel, y_vel, z_vel), numpy.float64<br><br> </td> <td> </td> </tr> </table> ### Task Observation <table markdown="span" style="width: 100%;"> <tr> <td valign="top" width="100%"> <strong>Low-dim task observation</strong><br><br> Task-specific observation, e.g. navigation goal position.<br><br> Size: # of low-dim observation, numpy.float64<br><br> </td> <td> </td> </tr> </table>
StanfordVL/OmniGibson/docs/modules/overview.md
--- icon: material/graph-outline --- # **Overview** <figure markdown="span"> ![OmniGibson architecture overview](../assets/architecture_overview.png){ width="100%" } </figure> **`OmniGibson`**'s framework provides **modular APIs** for (a) quickly interacting with different components of a created environment and (b) prototyping and developing custom environments. **`OmniGibson`** is built upon NVIDIA's [IsaacSim](https://docs.omniverse.nvidia.com/isaacsim/latest/index.html), a powerful simulation platform that uses [PhysX](https://nvidia-omniverse.github.io/PhysX/physx/5.3.1/index.html) as the physics backend. We build upon IsaacSim's `Simulator` interface to construct our `Environment` class, which is an [OpenAI gym-compatible](https://gymnasium.farama.org/content/gym_compatibility/) interface and the main entry point into **`OmniGibson`**. An `Environment` instance generally consists of the following: - A [`Scene`](./scene.md) instance, which by default is a "dummy" (empty) or a full-populated (`InteractiveTraversableScene`) instance, - A [`BaseTask`](./task.md) instance, which can range from a complex `BehaviorTask`, navigation `PointNavigationTask`, or no-op `DummyTask`, - Optionally, one or more [`BaseRobot`](./robots.md)s, which define the action space for the given environment instance, - Optionally, one or more additional [`BaseObject`](./object.md)s, which are additional object models not explicitly defined in the environment's scene The above figure describes **`OmniGibson`**'s simulation loop: 1. **Action Execution:** An externally defined `action` is passed to `Robot` instances in the `Environment`, which is processed by each robot's own set of `Controller`s and converted into low-level joint commands that are then deployed on the robot. 2. **Simulation Stepping:** The simulator takes at least one (and potentially multiple) physics steps, updating its internal state. 3. **Observation Retrieval:** Sensors on each `Robot` instance grab observations from the updated simulator state, and the loaded `Task` instance also computes its task-relevant observations and updates its internal state. The observations as well as task-relevant data is then returned from the `Environment` instance. Each of the modules in **`OmniGibson`** can be extended by the user, and allow for custom subclass implementations to be used without needing to directly modify **`OmniGibson`** source code. This section provides high-level overviews of each of the modules, as well as general insight into the purpose and intended use-cases of each module.
StanfordVL/OmniGibson/docs/modules/environment.md
--- icon: material/earth --- # ๐ŸŒŽ **Environment** The OpenAI Gym Environment serves as a top-level simulation object, offering a suite of common interfaces. These include methods such as `step`, `reset`, `render`, and properties like `observation_space` and `action_space`. The OmniGibson Environment builds upon this foundation by also supporting the loading of scenes, robots, and tasks. Following the OpenAI Gym interface, the OmniGibson environment further provides access to both the action space and observation space of the robots and external sensors. Creating a minimal environment requires the definition of a config dictionary. This dictionary should contain details about the scene, objects, robots, and specific characteristics of the environment: <details> <summary>Click to see code!</summary> <pre><code> import omnigibson as og cfg = { "env": { "action_frequency": 10, "physics_frequency": 120, }, "scene": { "type": "Scene", }, "objects": [], "robots": [ { "type": "Fetch", "obs_modalities": 'all', "controller_config": { "arm_0": { "name": "NullJointController", "motor_type": "position", }, }, } ] } env = og.Environment(configs=cfg) </code></pre> </details>
StanfordVL/OmniGibson/docs/stylesheets/extra.css
:root { --md-admonition-icon--code: url('data:image/svg+xml;charset=utf-8,<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 640 512"><!--! Font Awesome Free 6.2.1 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2022 Fonticons, Inc.--><path d="M392.8 1.2c-17-4.9-34.7 5-39.6 22l-128 448c-4.9 17 5 34.7 22 39.6s34.7-5 39.6-22l128-448c4.9-17-5-34.7-22-39.6zm80.6 120.1c-12.5 12.5-12.5 32.8 0 45.3l89.3 89.4-89.4 89.4c-12.5 12.5-12.5 32.8 0 45.3s32.8 12.5 45.3 0l112-112c12.5-12.5 12.5-32.8 0-45.3l-112-112c-12.5-12.5-32.8-12.5-45.3 0zm-306.7 0c-12.5-12.5-32.8-12.5-45.3 0l-112 112c-12.5 12.5-12.5 32.8 0 45.3l112 112c12.5 12.5 32.8 12.5 45.3 0s12.5-32.8 0-45.3L77.3 256l89.4-89.4c12.5-12.5 12.5-32.8 0-45.3z"/></svg>') } .md-typeset .admonition.code, .md-typeset details.code { border-color: rgb(134, 130, 245); } .md-typeset .code > .admonition-title, .md-typeset .code > summary { background-color: rgba(134, 130, 245, 0.1); } .md-typeset .code > .admonition-title::before, .md-typeset .code > summary::before { background-color: rgb(134, 130, 245); -webkit-mask-image: var(--md-admonition-icon--code); mask-image: var(--md-admonition-icon--code); }
StanfordVL/OmniGibson/docs/tutorials/demo_collection.md
--- icon: octicons/rocket-16 --- # ๐Ÿ•น๏ธ **Collecting Demonstrations** ## Devices I/O Devices can be used to read user input and teleoperate simulated robots in real-time. OmniGibson leverages [TeleMoMa](https://robin-lab.cs.utexas.edu/telemoma-web/), a modular and versatile library for manipulating mobile robots in the scene. This is achieved by using devies such as keyboards, SpaceMouse, cameras, VR devices, mobile phones, or any combination thereof. More generally, we support any interface that implements the `telemoma.human_interface.teleop_core.BaseTeleopInterface` class. In order to support your own custom device, simply subclass this base class and implement the required methods. For more information on this, checkout the [TeleMoMa codebase](https://github.com/UT-Austin-RobIn/telemoma). ## Teleoperation The following section will go through `robot_teleoperation_example.py`, which lets users to choose a robot to complete a simple pick and place task. Users are also encouraged to take a look at `vr_simple_demo.py`, which show how to actually render to VR headset and teleoperate `BehaviorRobot` with VR controllers (HTC VIVE). We assume that we already have the scene and task setup. To initialize a teleoperation system, we first need to specify the configuration for it. After the config simply instantiate teh teleoperation system. ``` teleop_sys = TeleopSystem(config=teleop_config, robot=robot, show_control_marker=True) ``` `TeleopSystem` takes in the config dictionary, which we just created. It also takes in the robot instance we want to teleoperate, as well as `show_control_marker`, which if set to `True`, will also create a green visual marker indicates the desired pose of the robot end effector that the user wants to robot to go. After the `TeleopSystem` is created, start by calling ``` teleop_sys.start() ``` Then, within the simulation loop, simply call ``` action = teleop_sys.get_action(teleop_sys.get_obs()) ``` to get the action based on the user teleoperation input, and pass the action to the `env.step` function. ## (Optional) Saving and Loading Simulation State You can save the current state of the simulator to a json file by calling `save`: ``` og.sim.save(JSON_PATH) ``` To restore any saved state, simply call `restore` ``` og.sim.restore(JSON_PATH) ``` Alternatively, if you just want to save all the scene and objects info at the current tiemframe, you can also call `self.scene.dump_state(serialized=True)`, which will return a numpy array containing all the relavant information. You can then stack the array together to get the full trajectory of states.
StanfordVL/OmniGibson/docs/dist/css/style.css
html{ line-height:1.15; -ms-text-size-adjust:100%; -webkit-text-size-adjust:100% } body{ margin:0 } article,aside,footer,header,nav,section{ display:block } h1{ font-size:2em; margin:0.67em 0 } figcaption,figure,main{ display:block } figure{ margin:1em 40px } hr{ box-sizing:content-box; height:0; overflow:visible } pre{ font-family:monospace, monospace; font-size:1em } a{ background-color:transparent; -webkit-text-decoration-skip:objects } abbr[title]{ border-bottom:none; text-decoration:underline; -webkit-text-decoration:underline dotted; text-decoration:underline dotted } b,strong{ font-weight:inherit } b,strong{ font-weight:bolder } code,kbd,samp{ font-family:monospace, monospace; font-size:1em } dfn{ font-style:italic } mark{ background-color:#ff0; color:#000 } small{ font-size:80% } sub,sup{ font-size:75%; line-height:0; position:relative; vertical-align:baseline } sub{ bottom:-0.25em } sup{ top:-0.5em } audio,video{ display:inline-block } audio:not([controls]){ display:none; height:0 } img{ border-style:none } svg:not(:root){ overflow:hidden } button,input,optgroup,select,textarea{ font-family:sans-serif; font-size:100%; line-height:1.15; margin:0 } button,input{ overflow:visible } button,select{ text-transform:none } button,html [type="button"],[type="reset"],[type="submit"]{ -webkit-appearance:button } button::-moz-focus-inner,[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner{ border-style:none; padding:0 } button:-moz-focusring,[type="button"]:-moz-focusring,[type="reset"]:-moz-focusring,[type="submit"]:-moz-focusring{ outline:1px dotted ButtonText } fieldset{ padding:0.35em 0.75em 0.625em } legend{ box-sizing:border-box; color:inherit; display:table; max-width:100%; padding:0; white-space:normal } progress{ display:inline-block; vertical-align:baseline } textarea{ overflow:auto } [type="checkbox"],[type="radio"]{ box-sizing:border-box; padding:0 } [type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{ height:auto } [type="search"]{ -webkit-appearance:textfield; outline-offset:-2px } [type="search"]::-webkit-search-cancel-button,[type="search"]::-webkit-search-decoration{ -webkit-appearance:none } ::-webkit-file-upload-button{ -webkit-appearance:button; font:inherit } details,menu{ display:block } summary{ display:list-item } canvas{ display:inline-block } template{ display:none } [hidden]{ display:none } html{ box-sizing:border-box } *,*:before,*:after{ box-sizing:inherit } body{ /*background:#1D2026;*/ /*-moz-osx-font-smoothing:grayscale;*/ /*-webkit-font-smoothing:antialiased*/ } hr{ border:0; display:block; height:1px; background:#242830; margin-top:24px; margin-bottom:24px } ul,ol{ margin-top:0; margin-bottom:24px; padding-left:24px } ul{ list-style:disc } ol{ list-style:decimal } li>ul,li>ol{ margin-bottom:0 } dl{ margin-top:0; margin-bottom:24px } dt{ font-weight:600 } dd{ margin-left:24px; margin-bottom:24px } img{ height:auto; max-width:100%; vertical-align:middle } figure{ margin:24px 0 } figcaption{ font-size:16px; line-height:24px; padding:8px 0 } img,svg{ display:block } table{ border-collapse:collapse; margin-bottom:24px; width:100% } tr{ border-bottom:1px solid #242830 } th{ text-align:left } th,td{ padding:10px 16px } th:first-child,td:first-child{ padding-left:0 } th:last-child,td:last-child{ padding-right:0 } html{ font-size:20px; line-height:30px } body{ color:#8A94A7; font-size:1rem } body,button,input,select,textarea{ font-family:"IBM Plex Sans", sans-serif } a{ color:#8A94A7; text-decoration:underline } a:hover,a:active{ outline:0; text-decoration:none } h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{ clear:both; color:#fff; font-weight:600 } h1,.h1{ font-size:38px; line-height:48px; letter-spacing:0px } @media (min-width: 641px){ h1,.h1{ font-size:44px; line-height:54px; letter-spacing:0px } } h2,.h2{ font-size:32px; line-height:42px; letter-spacing:0px } @media (min-width: 641px){ h2,.h2{ font-size:38px; line-height:48px; letter-spacing:0px } } h3,.h3,blockquote{ font-size:24px; line-height:34px; letter-spacing:0px } @media (min-width: 641px){ h3,.h3,blockquote{ font-size:32px; line-height:42px; letter-spacing:0px } } h4,h5,h6,.h4,.h5,.h6{ font-size:20px; line-height:30px; letter-spacing:-0.1px } @media (min-width: 641px){ h4,h5,h6,.h4,.h5,.h6{ font-size:24px; line-height:34px; letter-spacing:0px } } @media (max-width: 640px){ .h1-mobile{ font-size:38px; line-height:48px; letter-spacing:0px } .h2-mobile{ font-size:32px; line-height:42px; letter-spacing:0px } .h3-mobile{ font-size:24px; line-height:34px; letter-spacing:0px } .h4-mobile,.h5-mobile,.h6-mobile{ font-size:20px; line-height:30px; letter-spacing:-0.1px } } .text-sm{ font-size:18px; line-height:28px; letter-spacing:-0.1px } .text-xs{ font-size:16px; line-height:24px; letter-spacing:-0.1px } h1,h2,.h1,.h2{ margin-top:48px; margin-bottom:16px } h3,.h3{ margin-top:36px; margin-bottom:12px } h4,h5,h6,.h4,.h5,.h6{ margin-top:24px; margin-bottom:4px } p{ margin-top:0; margin-bottom:24px } dfn,cite,em,i{ font-style:italic } blockquote{ color:#3B404C; font-style:italic; margin-top:24px; margin-bottom:24px; margin-left:24px } blockquote::before{ content:"\201C" } blockquote::after{ content:"\201D" } blockquote p{ display:inline } address{ color:#8A94A7; border-width:1px 0; border-style:solid; border-color:#242830; padding:24px 0; margin:0 0 24px } pre,pre h1,pre h2,pre h3,pre h4,pre h5,pre h6,pre .h1,pre .h2,pre .h3,pre .h4,pre .h5,pre .h6{ font-family:"Courier 10 Pitch", Courier, monospace } pre,code,kbd,tt,var{ background:#1D2026 } pre{ font-size:16px; line-height:24px; margin-bottom:1.6em; max-width:100%; overflow:auto; padding:24px; margin-top:24px; margin-bottom:24px } code,kbd,tt,var{ font-family:Monaco, Consolas, "Andale Mono", "DejaVu Sans Mono", monospace; font-size:16px; padding:2px 4px } abbr,acronym{ cursor:help } mark,ins{ text-decoration:none } small{ font-size:18px; line-height:28px; letter-spacing:-0.1px } b,strong{ font-weight:600 } button,input,select,textarea,label{ font-size:20px; line-height:30px } .container,.container-sm{ width:100%; margin:0 auto; padding-left:16px; padding-right:16px } @media (min-width: 481px){ .container,.container-sm{ padding-left:24px; padding-right:24px } } .container{ max-width:1128px } .container-sm{ max-width:848px } .container .container-sm{ max-width:800px; padding-left:0; padding-right:0 } .screen-reader-text{ clip:rect(1px, 1px, 1px, 1px); position:absolute !important; height:1px; width:1px; overflow:hidden; word-wrap:normal !important } .screen-reader-text:focus{ border-radius:2px; box-shadow:0 0 2px 2px rgba(0,0,0,0.6); clip:auto !important; display:block; font-size:14px; letter-spacing:0px; font-weight:600; line-height:16px; text-decoration:none; text-transform:uppercase; /*background-color:#1D2026;*/ /*color:#0270D7 !important;*/ border:none; height:auto; left:8px; padding:16px 32px; top:8px; width:auto; z-index:100000 } .list-reset{ list-style:none; padding:0 } .text-left{ text-align:left } .text-center{ text-align:center } .text-right{ text-align:right } .text-primary{ color:#0270D7 } .has-top-divider{ position:relative } .has-top-divider::before{ content:''; position:absolute; top:0; left:0; width:100%; display:block; height:1px; background:#242830 } .has-bottom-divider{ position:relative } .has-bottom-divider::after{ content:''; position:absolute; bottom:0; left:0; width:100%; display:block; height:1px; background:#242830 } .bottom{ position: absolute; bottom: 0; left: 0; } .top{ position: absolute; top: 0; left: 0; } .m-0{ margin:0 } .mt-0{ margin-top:0 } .mr-0{ margin-right:0 } .mb-0{ margin-bottom:0 } .ml-0{ margin-left:0 } .m-8{ margin:8px } .mt-8{ margin-top:8px } .mr-8{ margin-right:8px } .mb-8{ margin-bottom:8px } .ml-8{ margin-left:8px } .m-16{ margin:16px } .mt-16{ margin-top:16px } .mr-16{ margin-right:16px } .mb-16{ margin-bottom:16px } .ml-16{ margin-left:16px } .m-24{ margin:24px } .mt-24{ margin-top:24px } .mr-24{ margin-right:24px } .mb-24{ margin-bottom:24px } .ml-24{ margin-left:24px } .m-32{ margin:32px } .mt-32{ margin-top:32px } .mr-32{ margin-right:32px } .mb-32{ margin-bottom:32px } .ml-32{ margin-left:32px } .m-40{ margin:40px } .mt-40{ margin-top:40px } .mr-40{ margin-right:40px } .mb-40{ margin-bottom:40px } .ml-40{ margin-left:40px } .m-48{ margin:48px } .mt-48{ margin-top:48px } .mr-48{ margin-right:48px } .mb-48{ margin-bottom:48px } .ml-48{ margin-left:48px } .m-56{ margin:56px } .mt-56{ margin-top:56px } .mr-56{ margin-right:56px } .mb-56{ margin-bottom:56px } .ml-56{ margin-left:56px } .m-64{ margin:64px } .mt-64{ margin-top:64px } .mr-64{ margin-right:64px } .mb-64{ margin-bottom:64px } .ml-64{ margin-left:64px } .p-0{ padding:0 } .pt-0{ padding-top:0 } .pr-0{ padding-right:0 } .pb-0{ padding-bottom:0 } .pl-0{ padding-left:0 } .p-8{ padding:8px } .pt-8{ padding-top:8px } .pr-8{ padding-right:8px } .pb-8{ padding-bottom:8px } .pl-8{ padding-left:8px } .p-16{ padding:16px } .pt-16{ padding-top:16px } .pr-16{ padding-right:16px } .pb-16{ padding-bottom:16px } .pl-16{ padding-left:16px } .p-24{ padding:24px } .pt-24{ padding-top:24px } .pr-24{ padding-right:24px } .pb-24{ padding-bottom:24px } .pl-24{ padding-left:24px } .p-32{ padding:32px } .pt-32{ padding-top:32px } .pr-32{ padding-right:32px } .pb-32{ padding-bottom:32px } .pl-32{ padding-left:32px } .p-40{ padding:40px } .pt-40{ padding-top:40px } .pr-40{ padding-right:40px } .pb-40{ padding-bottom:40px } .pl-40{ padding-left:40px } .p-48{ padding:48px } .pt-48{ padding-top:48px } .pr-48{ padding-right:48px } .pb-48{ padding-bottom:48px } .pl-48{ padding-left:48px } .p-56{ padding:56px } .pt-56{ padding-top:56px } .pr-56{ padding-right:56px } .pb-56{ padding-bottom:56px } .pl-56{ padding-left:56px } .p-64{ padding:64px } .pt-64{ padding-top:64px } .pr-64{ padding-right:64px } .pb-64{ padding-bottom:64px } .pl-64{ padding-left:64px } .sr .has-animations .is-revealing{ visibility:hidden } .has-animations .anime-element{ visibility:hidden } .anime-ready .has-animations .anime-element{ visibility:visible } .input,.textarea{ background-color:#fff; border-width:1px; border-style:solid; border-color:#242830; border-radius:2px; color:#8A94A7; max-width:100%; width:100% } .input::-webkit-input-placeholder,.textarea::-webkit-input-placeholder{ color:#3B404C } .input:-ms-input-placeholder,.textarea:-ms-input-placeholder{ color:#3B404C } .input::-ms-input-placeholder,.textarea::-ms-input-placeholder{ color:#3B404C } .input::placeholder,.textarea::placeholder{ color:#3B404C } .input::-ms-input-placeholder,.textarea::-ms-input-placeholder{ color:#3B404C } .input:-ms-input-placeholder,.textarea:-ms-input-placeholder{ color:#3B404C } .input:hover,.textarea:hover{ border-color:#191c21 } .input:active,.input:focus,.textarea:active,.textarea:focus{ outline:none; border-color:#242830 } .input[disabled],.textarea[disabled]{ cursor:not-allowed; background-color:#1D2026; border-color:#1D2026 } .input{ -moz-appearance:none; -webkit-appearance:none; font-size:16px; letter-spacing:-0.1px; line-height:20px; padding:13px 16px; height:48px; box-shadow:none } .input .inline-input{ display:inline; width:auto } .textarea{ display:block; min-width:100%; resize:vertical } .textarea .inline-textarea{ display:inline; width:auto } .field-grouped>.control:not(:last-child){ margin-bottom:8px } @media (min-width: 641px){ .field-grouped{ display:flex } .field-grouped>.control{ flex-shrink:0 } .field-grouped>.control.control-expanded{ flex-grow:1; flex-shrink:1 } .field-grouped>.control:not(:last-child){ margin-bottom:0; margin-right:8px } } .button{ display:inline-flex; font-size:14px; letter-spacing:0px; font-weight:600; line-height:16px; text-decoration:none !important; text-transform:uppercase; background-color:#242830; color:#fff !important; border:none; border-radius:2px; cursor:pointer; justify-content:center; padding:16px 32px; height:48px; text-align:center; white-space:nowrap } .button:hover{ background:#262a33 } .button:active{ outline:0 } .button::before{ border-radius:2px } .button-sm{ padding:8px 24px; height:32px } .button-primary{ background:#8c1515; background:linear-gradient(65deg, #8a0f0f 0, #8a2727 100%) } .button-primary:hover{ background:#b32727; background:linear-gradient(65deg, #b51d1d 0, #b83b3b 100%) } .button-block{ display:flex } .button-block{ display:flex; width:100% } @media (max-width: 640px){ .button-wide-mobile{ width:100%; max-width:280px } } .site-header{ padding:24px 0 } .site-header-inner{ position:relative; display:flex; justify-content:space-between; align-items:center } .header-links{ display:inline-flex } .header-links li{ display:inline-flex } .header-links a:not(.button){ font-size:16px; line-height:24px; letter-spacing:-0.1px; font-weight:600; color:#8A94A7; text-transform:uppercase; text-decoration:none; line-height:16px; padding:8px 24px } @media (min-width: 641px) { .site-header { position: relative } .site-header::before { content: ''; position: absolute; top: 0; left: 0; width: 100%; height: 100vh; background-image: url("../../assets/splash_no_logo.png"), linear-gradient(rgba(0, 0, 0, 0.5), rgba(0, 0, 0, 1)); background-size: 100%; background-repeat: no-repeat; } .hero { text-align: left; padding-top: 48px; padding-bottom: 88px } .hero-copy { position: relative; z-index: 1 } @media (min-width: 641px) { .hero { text-align: left; padding-top: 64px; padding-bottom: 88px } .hero-inner { display: flex; justify-content: space-between; align-items: center } .hero-copy { padding-right: 64px; min-width: 552px; width: 552px } .hero-cta { margin: 0 } .hero-cta .button { min-width: 170px } .hero-cta .button:first-child { margin-right: 16px } .hero-figure svg { width: auto } } }
StanfordVL/OmniGibson/docs/miscellaneous/known_issues.md
# **Known Issues & Troubleshooting** ## ๐Ÿค” **Known Issues** ??? question "How can I parallelize running multiple scenes in OmniGibson?" Currently, to run multiple scenes in parallel, you will need to launch separate instances of the OmniGibson environment. While this introduces some overhead due to running multiple instances of IsaacSim, we are actively working on implementing parallelization capabilities. Our goal is to enable running multiple scenes within a single instance, streamlining the process and reducing the associated overhead. ## ๐Ÿงฏ **Troubleshooting** ??? question "I cannot open Omniverse Launcher AppImage on Linux" You probably need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage. ??? question "OmniGibson is stuck at `HydraEngine rtx failed creating scene renderer.`" `OmniGibson` is likely using an unsupported GPU (default is id 0). Run `nvidia-smi` to see the active list of GPUs, and select an NVIDIA-supported GPU and set its corresponding ID when running `OmniGibson` with `export OMNIGIBSON_GPU_ID=<ID NUMBER>`.
StanfordVL/OmniGibson/docs/miscellaneous/faq.md
# **Frequently Asked Questions** ## **What is the relationship between BEHAVIOR-1K and OmniGibson?** [BEHAVIOR-1K](https://behavior.stanford.edu/behavior-1k) is the high-level project that proposes an embodied AI benchmark of 1,000 tasks. To measure agents' performance accurately and reproducibly on these tasks, a simulation platform with comprehensive features and capabilities is necessary. This necessitates the need for a simulation platform that can support all the semantics required in the BEHAVIOR-1K tasks. OmniGibson meets this need as a feature-complete simulation platform, allowing us to instantiate and evaluate these tasks fully in a simulated environment. ## **How is OmniGibson connected to Nvidia's Omniverse?** OmniGibson is built upon NVIDIA's Isaac Sim/Omniverse physics backend, leveraging its robust physics simulation capabilities. On top of this powerful engine, OmniGibson provides modular and user-friendly APIs, along with additional features such as controllers, robots, object states, and more. These added functionalities enable OmniGibson to facilitate the necessary physical interactions and simulations required by the diverse range of tasks included in the BEHAVIOR-1K task suite. ## **Why should I use OmniGibson?** The core strengths of OmniGibson lie in its exceptional physical and visual realism, two critical factors in the development of embodied AI agents. - `Physical Realism`: to our knowledge, OmniGibson is the only simulation platform that supports large-scale scene interaction with - cloth - fluids - semantic object states (e.g. temperature, particle interactions) - complex physical interactions (transition rules). - `Visual Realism`: OmniGibson is built on NVIDIA's Isaac Sim/Omniverse physics backend, which provides industry-leading real-time ray tracing capabilities, resulting in highly realistic visual simulations. While OmniGibson may not currently be the fastest simulation platform available, we are actively working to enhance its performance. Our efforts include optimizing speeds and leveraging NVIDIA's cloning features to enable large-scale parallelization. ## **What is the relationship between Gibson, iGibson, and OmniGibson?** [Gibson](http://gibsonenv.stanford.edu/) is a collection of high-fidelity, large-scale scene scans, primarily designed for navigation tasks within static environments. [iGibson](https://svl.stanford.edu/igibson/), building upon this foundation, introduced interactivity by creating 15 fully interactive scenes. However, iGibson's implementation in PyBullet limited its capabilities, lacking support for high-fidelity rendering, cloth simulation, and fluid dynamics. [OmniGibson](https://behavior.stanford.edu/omnigibson/) aims to provide a comprehensive and feature-complete simulation platform. It includes 50 much larger-scale, fully interactive scenes with thousands of curated objects. Leveraging real-time ray tracing and NVIDIA's Omniverse backend, OmniGibson offers exceptional visual realism while supporting advanced simulations involving cloth, fluids, and various other complex physical interactions.
StanfordVL/OmniGibson/docs/miscellaneous/contact.md
# **Contact** If you have any questions, comments, or concerns, please feel free to reach out to use by joining our Discord server: <a href="https://discord.gg/bccR5vGFEx"><img src="https://discordapp.com/api/guilds/1166422812160966707/widget.png?style=banner3"></a>
StanfordVL/OmniGibson/docs/miscellaneous/contributing.md
# **Contribution Guidelines** We sincerely welcome contributions of any form to OmniGibson, as our aim is to make it a more robust and useful resource for the community. We have always held the belief that a collective effort from the community is essential to tailor BEHAVIOR/OmniGibson to meet diverse needs and unlock its full potential. ## **Bug Reports & Feature Requests** If you encounter any bugs or have feature requests that could enhance the platform, please feel free to open an issue on our GitHub repository. Before creating a new issue, we recommend checking the existing issues to see if your problem or request has already been reported or discussed. When reporting a bug, please kindly provide detailed information about the issue, including steps to reproduce it, any error messages, and relevant system details. For feature requests, we appreciate a clear description of the desired functionality and its potential benefits for the OmniGibson community. ## **Pull Requests** We are always open to pull requests that address bugs, add new features, or improve the platform in any way. If you are considering submitting a pull request, we recommend opening an issue first to discuss the changes you would like to make. This will help us ensure that your proposed changes align with the goals of the project and that we can provide guidance on the best way to implement them. When submitting a pull request, please ensure that your code adheres to the following guidelines: - **Code Style**: We follow the [PEP 8](https://www.python.org/dev/peps/pep-0008/) style guide for Python code. Please ensure that your code is formatted according to these guidelines. - **Documentation**: If your changes affect the public API or introduce new features, please update the relevant documentation to reflect these changes. - **Testing**: If your changes affect the behavior of the platform, please include tests to ensure that the new functionality works as expected and that existing functionality remains unaffected.
StanfordVL/OmniGibson/docs/getting_started/examples.md
--- icon: material/laptop --- # ๐Ÿ’ป **Examples** **`OmniGibson`** ships with many demo scripts highlighting its modularity and diverse feature set intended as a set of building blocks enabling your research. Let's try them out! *** ## โš™๏ธ **A quick word about macros** ??? question annotate "Why macros?" Macros enforce global behavior that is consistent within an individual python process but can differ between processes. This is useful because globally enabling all of **`OmniGibson`**'s features can cause unnecessary slowdowns, and so configuring the macros for your specific use case can optimize performance. For example, Omniverse provides a so-called `flatcache` feature which provides significant performance boosts, but cannot be used when fluids or soft bodies are present. So, we ideally should always have `gm.USE_FLATCACHE=True` unless we have fluids or soft bodies in our environment. `macros` define a globally available set of magic numbers or flags set throughout **`OmniGibson`**. These can either be directly set in `omnigibson.macros.py`, or can be programmatically modified at runtime via: ```{.python .annotate} from omnigibson.macros import gm, macros gm.<GLOBAL_MACRO> = <VALUE> # (1)! macros.<OG_DIRECTORY>.<OG_MODULE>.<MODULE_MACRO> = <VALUE> # (2)! ``` 1. `gm` refers to the "global" macros -- i.e.: settings that generally impact the entire **`OmniGibson`** stack. These are usually the only settings you may need to modify. 2. `macros` captures all remaining macros defined throughout **`OmniGibson`**'s codebase -- these are often hardcoded default settings or magic numbers defined in a specific module. These can also be overridden, but we recommend inspecting the module first to understand how it is used. Many of our examples set various `macros` settings at the beginning of the script, and is a good way to understand use cases for modifying them! *** ## ๐ŸŒŽ **Environments** These examples showcase the full **`OmniGibson`** stack in use, and the types of environments immediately supported. ### **BEHAVIOR Task Demo** !!! abstract "This demo is useful for..." * Understanding how to instantiate a BEHAVIOR task * Understanding how a pre-defined configuration file is used ```{.python .annotate} python -m omnigibson.examples.environments.behavior_env_demo ``` This demo instantiates one of our BEHAVIOR tasks (and optionally sampling object locations online) in a fully-populated scene and loads a `Fetch` robot. The robot executes random actions and the environment is reset periodically. ??? code "behavior_env_demo.py" ``` py linenums="1" --8<-- "examples/environments/behavior_env_demo.py" ``` ### **Navigation Task Demo** !!! abstract "This demo is useful for..." * Understanding how to instantiate a navigation task * Understanding how a pre-defined configuration file is used ```{.python .annotate} python -m omnigibson.examples.environments.navigation_env_demo ``` This demo instantiates one of our navigation tasks in a fully-populated scene and loads a `Turtlebot` robot. The robot executes random actions and the environment is reset periodically. ??? code "navigation_env_demo.py" ``` py linenums="1" --8<-- "examples/environments/navigation_env_demo.py" ``` ## ๐Ÿง‘โ€๐Ÿซ **Learning** These examples showcase how **`OmniGibson`** can be used to train embodied AI agents. ### **Reinforcement Learning Demo** !!! abstract "This demo is useful for..." * Understanding how to hook up **`OmniGibson`** to an external algorithm * Understanding how to train and evaluate a policy ```{.python .annotate} python -m omnigibson.examples.learning.navigation_policy_demo ``` This demo loads a BEHAVIOR task with a `Fetch` robot, and trains / evaluates the agent using [Stable Baseline3](https://stable-baselines3.readthedocs.io/en/master/)'s PPO algorithm. ??? code "navigation_policy_demo.py" ``` py linenums="1" --8<-- "examples/learning/navigation_policy_demo.py" ``` ## ๐Ÿ”๏ธ **Scenes** These examples showcase how to leverage **`OmniGibson`**'s large-scale, diverse scenes shipped with the BEHAVIOR dataset. ### **Scene Selector Demo** !!! abstract "This demo is useful for..." * Understanding how to load a scene into **`OmniGibson`** * Accessing all BEHAVIOR dataset scenes ```{.python .annotate} python -m omnigibson.examples.scenes.scene_selector ``` This demo lets you choose a scene from the BEHAVIOR dataset, loads it along with a `Turtlebot` robot, and cycles the resulting environment periodically. ??? code "scene_selector.py" ``` py linenums="1" --8<-- "examples/scenes/scene_selector.py" ``` ### **Scene Tour Demo** !!! abstract "This demo is useful for..." * Understanding how to load a scene into **`OmniGibson`** * Understanding how to generate a trajectory from a set of waypoints ```{.python .annotate} python -m omnigibson.examples.scenes.scene_tour_demo ``` This demo lets you choose a scene from the BEHAVIOR dataset. It allows you to move the camera using the keyboard, select waypoints, and then programmatically generates a video trajectory from the selected waypoints ??? code "scene_tour_demo.py" ``` py linenums="1" --8<-- "examples/scenes/scene_tour_demo.py" ``` ### **Traversability Map Demo** !!! abstract "This demo is useful for..." * Understanding how to leverage traversability map information from BEHAVIOR dataset scenes ```{.python .annotate} python -m omnigibson.examples.scenes.traversability_map_example ``` This demo lets you choose a scene from the BEHAVIOR dataset, and generates its corresponding traversability map. ??? code "traversability_map_example.py" ``` py linenums="1" --8<-- "examples/scenes/traversability_map_example.py" ``` ## ๐ŸŽ **Objects** These examples showcase how to leverage objects in **`OmniGibson`**. ### **Load Object Demo** !!! abstract "This demo is useful for..." * Understanding how to load an object into **`OmniGibson`** * Accessing all BEHAVIOR dataset asset categories and models ```{.python .annotate} python -m omnigibson.examples.objects.load_object_selector ``` This demo lets you choose a specific object from the BEHAVIOR dataset, and loads the requested object into an environment. ??? code "load_object_selector.py" ``` py linenums="1" --8<-- "examples/objects/load_object_selector.py" ``` ### **Object Visualizer Demo** !!! abstract "This demo is useful for..." * Viewing objects' textures as rendered in **`OmniGibson`** * Viewing articulated objects' range of motion * Understanding how to reference object instances from the environment * Understanding how to set object poses and joint states ```{.python .annotate} python -m omnigibson.examples.objects.visualize_object ``` This demo lets you choose a specific object from the BEHAVIOR dataset, and rotates the object in-place. If the object is articulated, it additionally moves its joints through its full range of motion. ??? code "visualize_object.py" ``` py linenums="1" --8<-- "examples/objects/visualize_object.py" ``` ### **Highlight Object** !!! abstract "This demo is useful for..." * Understanding how to highlight individual objects within a cluttered scene * Understanding how to access groups of objects from the environment ```{.python .annotate} python -m omnigibson.examples.objects.highlight_objects ``` This demo loads the Rs_int scene and highlights windows on/off repeatedly. ??? code "highlight_objects.py" ``` py linenums="1" --8<-- "examples/objects/highlight_objects.py" ``` ### **Draw Object Bounding Box Demo** !!! abstract annotate "This demo is useful for..." * Understanding how to access observations from a `GymObservable` object * Understanding how to access objects' bounding box information * Understanding how to dynamically modify vision modalities *[GymObservable]: [`Environment`](../reference/envs/env_base.md), all sensors extending from [`BaseSensor`](../reference/sensors/sensor_base.md), and all objects extending from [`BaseObject`](../reference/objects/object_base.md) (which includes all robots extending from [`BaseRobot`](../reference/robots/robot_base.md)!) are [`GymObservable`](../reference/utils/gym_utils.md#utils.gym_utils.GymObservable) objects! ```{.python .annotate} python -m omnigibson.examples.objects.draw_bounding_box ``` This demo loads a door object and banana object, and partially obscures the banana with the door. It generates both "loose" and "tight" bounding boxes (where the latter respects occlusions) for both objects, and dumps them to an image on disk. ??? code "draw_bounding_box.py" ``` py linenums="1" --8<-- "examples/objects/draw_bounding_box.py" ``` ## ๐ŸŒก๏ธ **Object States** These examples showcase **`OmniGibson`**'s powerful object states functionality, which captures both individual and relational kinematic and non-kinematic states. ### **Slicing Demo** !!! abstract "This demo is useful for..." * Understanding how slicing works in **`OmniGibson`** * Understanding how to access individual objects once the environment is created ```{.python .annotate} python -m omnigibson.examples.object_states.slicing_demo ``` This demo spawns an apple on a table with a knife above it, and lets the knife fall to "cut" the apple in half. ??? code "slicing_demo.py" ``` py linenums="1" --8<-- "examples/object_states/slicing_demo.py" ``` ### **Dicing Demo** !!! abstract "This demo is useful for..." * Understanding how to leverage the `Dicing` state * Understanding how to enable objects to be `diceable` ```{.python .annotate} python -m omnigibson.examples.object_states.dicing_demo ``` This demo loads an apple and a knife, and showcases how apple can be diced into smaller chunks with the knife. ??? code "dicing_demo.py" ``` py linenums="1" --8<-- "examples/object_states/dicing_demo.py" ``` ### **Folded and Unfolded Demo** !!! abstract "This demo is useful for..." * Understanding how to load a softbody (cloth) version of a BEHAVIOR dataset object * Understanding how to enable cloth objects to be `foldable` * Understanding the current heuristics used for gauging a cloth's "foldness" ```{.python .annotate} python -m omnigibson.examples.object_states.folded_unfolded_state_demo ``` This demo loads in three different cloth objects, and allows you to manipulate them while printing out their `Folded` state status in real-time. Try manipulating the object by holding down **`Shift`** and then **`Left-click + Drag`**! ??? code "folded_unfolded_state_demo.py" ``` py linenums="1" --8<-- "examples/object_states/folded_unfolded_state_demo.py" ``` ### **Overlaid Demo** !!! abstract "This demo is useful for..." * Understanding how cloth objects can be overlaid on rigid objects * Understanding current heuristics used for gauging a cloth's "overlaid" status ```{.python .annotate} python -m omnigibson.examples.object_states.overlaid_demo ``` This demo loads in a carpet on top of a table. The demo allows you to manipulate the carpet while printing out their `Overlaid` state status in real-time. Try manipulating the object by holding down **`Shift`** and then **`Left-click + Drag`**! ??? code "overlaid_demo.py" ``` py linenums="1" --8<-- "examples/object_states/overlaid_demo.py" ``` ### **Heat Source or Sink Demo** !!! abstract "This demo is useful for..." * Understanding how a heat source (or sink) is visualized in **`OmniGibson`** * Understanding how dynamic fire visuals are generated in real-time ```{.python .annotate} python -m omnigibson.examples.object_states.heat_source_or_sink_demo ``` This demo loads in a stove and toggles its `HeatSource` on and off, showcasing the dynamic fire visuals available in **`OmniGibson`**. ??? code "heat_source_or_sink_demo.py" ``` py linenums="1" --8<-- "examples/object_states/heat_source_or_sink_demo.py" ``` ### **Temperature Demo** !!! abstract "This demo is useful for..." * Understanding how to dynamically sample kinematic states for BEHAVIOR dataset objects * Understanding how temperature changes are propagated to individual objects from individual heat sources or sinks ```{.python .annotate} python -m omnigibson.examples.object_states.temperature_demo ``` This demo loads in various heat sources and sinks, and places an apple within close proximity to each of them. As the environment steps, each apple's temperature is printed in real-time, showcasing **`OmniGibson`**'s rudimentary temperature dynamics. ??? code "temperature_demo.py" ``` py linenums="1" --8<-- "examples/object_states/temperature_demo.py" ``` ### **Heated Demo** !!! abstract "This demo is useful for..." * Understanding how temperature modifications can cause objects' visual changes * Understanding how dynamic steam visuals are generated in real-time ```{.python .annotate} python -m omnigibson.examples.object_states.heated_state_demo ``` This demo loads in three bowls, and immediately sets their temperatures past their `Heated` threshold. Steam is generated in real-time from these objects, and then disappears once the temperature of the objects drops below their `Heated` threshold. ??? code "heated_state_demo.py" ``` py linenums="1" --8<-- "examples/object_states/heated_state_demo.py" ``` ### **Onfire Demo** !!! abstract "This demo is useful for..." * Understanding how changing onfire state can cause objects' visual changes * Understanding how onfire can be triggered by nearby onfire objects ```{.python .annotate} python -m omnigibson.examples.object_states.onfire_demo ``` This demo loads in a stove (toggled on) and two apples. The first apple will be ignited by the stove first, then the second apple will be ignited by the first apple. ??? code "onfire_demo.py" ``` py linenums="1" --8<-- "examples/object_states/onfire_demo.py" ``` ### **Particle Applier and Remover Demo** !!! abstract "This demo is useful for..." * Understanding how a `ParticleRemover` or `ParticleApplier` object can be generated * Understanding how particles can be dynamically generated on objects * Understanding different methods for applying and removing particles via the `ParticleRemover` or `ParticleApplier` object ```{.python .annotate} python -m omnigibson.examples.object_states.particle_applier_remover_demo ``` This demo loads in a washtowel and table and lets you choose the ability configuration to enable the washtowel with. The washtowel will then proceed to either remove and generate particles dynamically on the table while moving. ??? code "particle_applier_remover_demo.py" ``` py linenums="1" --8<-- "examples/object_states/particle_applier_remover_demo.py" ``` ### **Particle Source and Sink Demo** !!! abstract "This demo is useful for..." * Understanding how a `ParticleSource` or `ParticleSink` object can be generated * Understanding how particles can be dynamically generated and destroyed via such objects ```{.python .annotate} python -m omnigibson.examples.object_states.particle_source_sink_demo ``` This demo loads in a sink, which is enabled with both the ParticleSource and ParticleSink states. The sink's particle source is located at the faucet spout and spawns a continuous stream of water particles, which is then destroyed ("sunk") by the sink's particle sink located at the drain. ??? note "Difference between `ParticleApplier/Removers` and `ParticleSource/Sinks`" The key difference between `ParticleApplier/Removers` and `ParticleSource/Sinks` is that `Applier/Removers` requires contact (if using `ParticleProjectionMethod.ADJACENCY`) or overlap (if using `ParticleProjectionMethod.PROJECTION`) in order to spawn / remove particles, and generally only spawn particles at the contact points. `ParticleSource/Sinks` are special cases of `ParticleApplier/Removers` that always use `ParticleProjectionMethod.PROJECTION` and always spawn / remove particles within their projection volume, irregardless of overlap with other objects. ??? code "particle_source_sink_demo.py" ``` py linenums="1" --8<-- "examples/object_states/particle_source_sink_demo.py" ``` ### **Kinematics Demo** !!! abstract "This demo is useful for..." * Understanding how to dynamically sample kinematic states for BEHAVIOR dataset objects * Understanding how to import additional objects after the environment is created ```{.python .annotate} python -m omnigibson.examples.object_states.sample_kinematics_demo ``` This demo procedurally generates a mini populated scene, spawning in a cabinet and placing boxes in its shelves, and then generating a microwave on a cabinet with a plate and apples sampled both inside and on top of it. ??? code "sample_kinematics_demo.py" ``` py linenums="1" --8<-- "examples/object_states/sample_kinematics_demo.py" ``` ### **Attachment Demo** !!! abstract "This demo is useful for..." * Understanding how to leverage the `Attached` state * Understanding how to enable objects to be `attachable` ```{.python .annotate} python -m omnigibson.examples.object_states.attachment_demo ``` This demo loads an assembled shelf, and showcases how it can be manipulated to attach and detach parts. ??? code "attachment_demo.py" ``` py linenums="1" --8<-- "examples/object_states/attachment_demo.py" ``` ### **Object Texture Demo** !!! abstract "This demo is useful for..." * Understanding how different object states can result in texture changes * Understanding how to enable objects with texture-changing states * Understanding how to dynamically modify object states ```{.python .annotate} python -m omnigibson.examples.object_states.object_state_texture_demo ``` This demo loads in a single object, and then dynamically modifies its state so that its texture changes with each modification. ??? code "object_state_texture_demo.py" ``` py linenums="1" --8<-- "examples/object_states/object_state_texture_demo.py" ``` ## ๐Ÿค– **Robots** These examples showcase how to interact and leverage robot objects in **`OmniGibson`**. ### **Robot Visualizer Demo** !!! abstract "This demo is useful for..." * Understanding how to load a robot into **`OmniGibson`** after an environment is created * Accessing all **`OmniGibson`** robot models * Viewing robots' low-level joint motion ```{.python .annotate} python -m omnigibson.examples.robots.all_robots_visualizer ``` This demo iterates over all robots in **`OmniGibson`**, loading each one into an empty scene and randomly moving its joints for a brief amount of time. ??? code "all_robots_visualizer.py" ``` py linenums="1" --8<-- "examples/robots/all_robots_visualizer.py" ``` ### **Robot Control Demo** !!! abstract "This demo is useful for..." * Understanding how different controllers can be used to control robots * Understanding how to teleoperate a robot through external commands ```{.python .annotate} python -m omnigibson.examples.robots.robot_control_example ``` This demo lets you choose a robot and the set of controllers to control the robot, and then lets you teleoperate the robot using your keyboard. ??? code "robot_control_example.py" ``` py linenums="1" --8<-- "examples/robots/robot_control_example.py" ``` ### **Robot Grasping Demo** !!! abstract annotate "This demo is useful for..." * Understanding the difference between `physical` and `sticky` grasping * Understanding how to teleoperate a robot through external commands ```{.python .annotate} python -m omnigibson.examples.robots.grasping_mode_example ``` This demo lets you choose a grasping mode and then loads a `Fetch` robot and a cube on a table. You can then teleoperate the robot to grasp the cube, observing the difference is grasping behavior based on the grasping mode chosen. Here, `physical` means natural friction is required to hold objects, while `sticky` means that objects are constrained to the robot's gripper once contact is made. ??? code "grasping_mode_example.py" ``` py linenums="1" --8<-- "examples/robots/grasping_mode_example.py" ``` ### **Advanced: IK Demo** !!! abstract "This demo is useful for..." * Understanding how to construct your own IK functionality using omniverse's native lula library without explicitly utilizing all of OmniGibson's class abstractions * Understanding how to manipulate the simulator at a lower-level than the main Environment entry point ```{.python .annotate} python -m omnigibson.examples.robots.advanced.ik_example ``` This demo loads in `Fetch` robot and a IK solver to control the robot, and then lets you teleoperate the robot using your keyboard. ??? code "ik_example.py" ``` py linenums="1" --8<-- "examples/robots/advanced/ik_example.py" ``` ## ๐Ÿงฐ **Simulator** These examples showcase useful functionality from **`OmniGibson`**'s monolithic `Simulator` object. ??? question "What's the difference between `Environment` and `Simulator`?" The [`Simulator`](../../reference/simulator) class is a lower-level object that: * handles importing scenes and objects into the actual simulation * directly interfaces with the underlying physics engine The [`Environment`](../../reference/environemnts/base_env) class thinly wraps the `Simulator`'s core functionality, by: * providing convenience functions for automatically importing a predefined scene, object(s), and robot(s) (via the `cfg` argument), as well as a [`task`](../../reference/tasks/task_base) * providing a OpenAI Gym interface for stepping through the simulation While most of the core functionality in `Environment` (as well as more fine-grained physics control) can be replicated via direct calls to `Simulator` (`og.sim`), it requires deeper understanding of **`OmniGibson`**'s infrastructure and is not recommended for new users. ### **State Saving and Loading Demo** !!! abstract "This demo is useful for..." * Understanding how to interact with objects using the mouse * Understanding how to save the active simulator state to a file * Understanding how to restore the simulator state from a given file ```{.python .annotate} python -m omnigibson.examples.simulator.sim_save_load_example ``` This demo loads a stripped-down scene with the `Turtlebot` robot, and lets you interact with objects to modify the scene. The state is then saved, written to a `.json` file, and then restored in the simulation. ??? code "sim_save_load_example.py" ``` py linenums="1" --8<-- "examples/simulator/sim_save_load_example.py" ``` ## ๐Ÿ–ผ๏ธ **Rendering** These examples showcase how to change renderer settings in **`OmniGibson`**. ### **Renderer Settings Demo** !!! abstract "This demo is useful for..." * Understanding how to use RendererSettings class ```{.python .annotate} python -m omnigibson.examples.renderer_settings.renderer_settings_example ``` This demo iterates over different renderer settings of and shows how they can be programmatically set with **`OmniGibson`** interface. ??? code "renderer_settings_example.py" ``` py linenums="1" --8<-- "examples/renderer_settings/renderer_settings_example.py" ```
StanfordVL/OmniGibson/docs/getting_started/quickstart.md
--- icon: octicons/rocket-16 --- # ๐Ÿš€ **Quickstart** Let's quickly create an environment programmatically! **`OmniGibson`**'s workflow is straightforward: define the configuration of scene, object(s), robot(s), and task you'd like to load, and then instantiate our `Environment` class with that config. Let's start with the following: ```{.python .annotate} import omnigibson as og # (1)! from omnigibson.macros import gm # (2)! # Start with an empty configuration cfg = dict() ``` 1. All python scripts should start with this line! This allows access to key global variables through the top-level package. 2. Global macros (`gm`) can always be accessed directly and modified on the fly! ## ๐Ÿ”๏ธ **Defining a scene** Next, let's define a scene: ```{.python .annotate} cfg["scene"] = { "type": "Scene", # (1)! "floor_plane_visible": True, # (2)! } ``` 1. Our configuration gets parsed automatically and generates the appropriate class instance based on `type` (the string form of the class name). In this case, we're generating the most basic scene, which only consists of a floor plane. Check out [all of our available `Scene` classes](../reference/scenes/scene_base.md)! 2. In addition to specifying `type`, the remaining keyword-arguments get passed directly into the class constructor. So for the base [`Scene`](../reference/scenes/scene_base.md) class, you could optionally specify `"use_floor_plane"` and `"floor_plane_visible"`, whereas for the more powerful [`InteractiveTraversableScene`](../reference/scenes/interactive_traversable_scene.md) class (which loads a curated, preconfigured scene) you can additionally specify options for filtering objects, such as `"load_object_categories"` and `"load_room_types"`. You can see all available keyword-arguments by viewing the [individual `Scene` class](../reference/scenes/scene_base.md) you'd like to load! ## ๐ŸŽพ **Defining objects** We can optionally define some objects to load into our scene: ```{.python .annotate} cfg["objects"] = [ # (1)! { "type": "USDObject", # (2)! "name": "ghost_stain", # (3)! "usd_path": f"{gm.ASSET_PATH}/models/stain/stain.usd", "category": "stain", # (4)! "visual_only": True, # (5)! "scale": [1.0, 1.0, 1.0], # (6)! "position": [1.0, 2.0, 0.001], # (7)! "orientation": [0, 0, 0, 1.0], # (8)! }, { "type": "DatasetObject", # (9)! "name": "delicious_apple", "category": "apple", "model": "agveuv", # (10)! "position": [0, 0, 1.0], }, { "type": "PrimitiveObject", # (11)! "name": "incredible_box", "primitive_type": "Cube", # (12)! "rgba": [0, 1.0, 1.0, 1.0], # (13)! "scale": [0.5, 0.5, 0.1], "fixed_base": True, # (14)! "position": [-1.0, 0, 1.0], "orientation": [0, 0, 0.707, 0.707], }, { "type": "LightObject", # (15)! "name": "brilliant_light", "light_type": "Sphere", # (16)! "intensity": 50000, # (17)! "radius": 0.1, # (18)! "position": [3.0, 3.0, 4.0], }, ] ``` 1. Unlike the `"scene"` sub-config, we can define an arbitrary number of objects to load, so this is a `list` of `dict` istead of a single nested `dict`. 2. **`OmniGibson`** supports multiple object classes, and we showcase an instance of each core class here. A [`USDObject`](../reference/objects/usd_object.md) is our most generic object class, and generates an object sourced from the `usd_path` argument. 3. All objects **must** define the `name` argument! This is because **`OmniGibson`** enforces a global unique naming scheme, and so any created objects must have unique names assigned to them. 4. `category` is used by all object classes to assign semantic segmentation IDs. 5. `visual_only` is used by all object classes and defines whether the object is subject to both gravity and collisions. 6. `scale` is used by all object classes and defines the global (x,y,z) relative scale of the object. 7. `position` is used by all object classes and defines the initial (x,y,z) position of the object in the global frame. 8. `orientation` is used by all object classes and defines the initial (x,y,z,w) quaternion orientation of the object in the global frame. 9. A [`DatasetObject`](../reference/objects/dataset_object.md) is an object pulled directly from our **BEHAVIOR** dataset. It includes metadata and annotations not found on a generic `USDObject`. Note that these assets are encrypted, and thus cannot be created via the `USDObject` class. 10. Instead of explicitly defining the hardcoded path to the dataset USD model, `model` (in conjunction with `category`) is used to infer the exact dataset object to load. In this case this is the exact same underlying raw USD asset that was loaded above as a `USDObject`! 11. A [`PrimitiveObject`](../reference/objects/primitive_object.md) is a programmatically generated object defining a convex primitive shape. 12. `primitive_type` defines what primitive shape to load -- see [`PrimitiveObject`](../reference/objects/primitive_object.md) for available options! 13. Because this object is programmatically generated, we can also specify the color to assign to this primitive object. 14. `fixed_base` is used by all object classes and determines whether the generated object is fixed relative to the world frame. Useful for fixing in place large objects, such as furniture or structures. 15. A [`LightObject`](../reference/objects/light_object.md) is a programmatically generated light source. It is used to directly illuminate the given scene. 16. `light_type` defines what light shape to load -- see [`LightObject`](../reference/objects/light_object.md) for available options! 17. `intensity` defines how bright the generated light source should be. 18. `radius` is used by `Sphere` lights and determines their relative size. ## ๐Ÿค– **Defining robots** We can also optionally define robots to load into our scene: ```{.python .annotate} cfg["robots"] = [ # (1)! { "type": "Fetch", # (2)! "name": "baby_robot", "obs_modalities": ["scan", "rgb", "depth"], # (3)! }, ] ``` 1. Like the `"objects"` sub-config, we can define an arbitrary number of robots to load, so this is a `list` of `dict`. 2. **`OmniGibson`** supports multiple robot classes, where each class represents a specific robot model. Check out our [`robots`](../reference/robots/robot_base.md) to view all available robot classes! 3. Execute `print(og.ALL_SENSOR_MODALITIES)` for a list of all available observation modalities! ## ๐Ÿ“‹ **Defining a task** Lastly, we can optionally define a task to load into our scene. Since we're just getting started, let's load a "Dummy" task (which is the task that is loaded anyways even if we don't explicitly define a task in our config): ```{.python .annotate} cfg["task"] = { "type": "DummyTask", # (1)! "termination_config": dict(), # (2)! "reward_config": dict(), # (3)! } ``` 1. Check out all of **`OmniGibson`**'s [available tasks](../reference/tasks/task_base.md)! 2. `termination_config` configures the termination conditions for this task. It maps specific [`TerminationCondition`](../reference/termination_conditions/termination_condition_base.md) arguments to their corresponding values to set. 3. `reward_config` configures the reward functions for this task. It maps specific [`RewardFunction`](../reference/reward_functions/reward_function_base.md) arguments to their corresponding values to set. ## ๐ŸŒ€ **Creating the environment** We're all set! Let's load the config and create our environment: ```{.python .annotate} env = og.Environment(cfg) ``` Once the environment loads, we can interface with our environment similar to OpenAI's Gym interface: ```{.python .annotate} obs, rew, done, info = env.step(env.action_space.sample()) ``` ??? question "What happens if we have no robot loaded?" Even if we have no robot loaded, we still need to define an "action" to pass into the environment. In this case, our action space is 0, so you can simply pass `[]` or `np.array([])` into the `env.step()` call! ??? code "my_first_env.py" ``` py linenums="1" import omnigibson as og from omnigibson.macros import gm cfg = dict() # Define scene cfg["scene"] = { "type": "Scene", "floor_plane_visible": True, } # Define objects cfg["objects"] = [ { "type": "USDObject", "name": "ghost_stain", "usd_path": f"{gm.ASSET_PATH}/models/stain/stain.usd", "category": "stain", "visual_only": True, "scale": [1.0, 1.0, 1.0], "position": [1.0, 2.0, 0.001], "orientation": [0, 0, 0, 1.0], }, { "type": "DatasetObject", "name": "delicious_apple", "category": "apple", "model": "agveuv", "position": [0, 0, 1.0], }, { "type": "PrimitiveObject", "name": "incredible_box", "primitive_type": "Cube", "rgba": [0, 1.0, 1.0, 1.0], "scale": [0.5, 0.5, 0.1], "fixed_base": True, "position": [-1.0, 0, 1.0], "orientation": [0, 0, 0.707, 0.707], }, { "type": "LightObject", "name": "brilliant_light", "light_type": "Sphere", "intensity": 50000, "radius": 0.1, "position": [3.0, 3.0, 4.0], }, ] # Define robots cfg["robots"] = [ { "type": "Fetch", "name": "skynet_robot", "obs_modalities": ["scan", "rgb", "depth"], }, ] # Define task cfg["task"] = { "type": "DummyTask", "termination_config": dict(), "reward_config": dict(), } # Create the environment env = og.Environment(cfg) # Allow camera teleoperation og.sim.enable_viewer_camera_teleoperation() # Step! for _ in range(10000): obs, rew, done, info = env.step(env.action_space.sample()) og.shutdown() ``` ## ๐Ÿ‘€ **Looking around** Look around by: * `Left-CLICK + Drag`: Tilt * `Scroll-Wheel-CLICK + Drag`: Pan * `Scroll-Wheel UP / DOWN`: Zoom Interact with objects by: * `Shift + Left-CLICK + Drag`: Apply force on selected object Or, for more fine-grained control, run: ```{.python .annotate} og.sim.enable_viewer_camera_teleoperation() # (1)! ``` 1. This allows you to move the camera precisely with your keyboard, record camera poses, and dynamically modify lights! Or, for programmatic control, directly set the viewer camera's global pose: ```{.python .annotate} og.sim.viewer_camera.set_position_orientation(<POSITION>, <ORIENTATION>) ``` *** **Next:** Check out some of **`OmniGibson`**'s breadth of features from our [Building Block](./building_blocks.md) examples!
StanfordVL/OmniGibson/docs/getting_started/installation.md
--- icon: material/hammer-wrench --- # ๐Ÿ› ๏ธ **Installation** ## ๐Ÿ—’๏ธ **Requirements** Please make sure your system meets the following specs: - [x] **OS:** Ubuntu 20.04+ / Windows 10+ - [x] **RAM:** 32GB+ - [x] **GPU:** NVIDIA RTX 2070+ - [x] **VRAM:** 8GB+ ??? question "Why these specs?" **`OmniGibson`** is built upon NVIDIA's [Omniverse](https://www.nvidia.com/en-us/omniverse/) and [Isaac Sim](https://developer.nvidia.com/isaac-sim) platforms, so we inherit their dependencies. For more information, please see [Isaac Sim's Requirements](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/requirements.html). ## ๐Ÿ’ป **Setup** There are two ways to setup **`OmniGibson`**: - **๐Ÿณ Install with Docker (Linux only)**: You can quickly get **`OmniGibson`** immediately up and running from our pre-built docker image. - **๐Ÿงช Install from source (Linux / Windows)**: This method is recommended for deeper users looking to develop upon **`OmniGibson`** or use it extensively for research. !!! tip "" === "๐Ÿณ Install with Docker (Linux only)" Install **`OmniGibson`** with Docker is supported for **๐Ÿง Linux** only. ??? info "Need to install docker or NVIDIA docker?" ```{.shell .annotate} # Install docker curl https://get.docker.com | sh && sudo systemctl --now enable docker # Install nvidia-docker runtime distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ && curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | \ sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/$distribution/libnvidia-container.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list sudo apt-get update sudo apt-get install -y nvidia-docker2 # install sudo systemctl restart docker # restart docker engine ``` 1. Install our docker launching scripts: ```shell curl -LJO https://raw.githubusercontent.com/StanfordVL/OmniGibson/main/docker/run_docker.sh chmod a+x run_docker.sh ``` ??? question annotate "What is being installed?" Our docker image automatically ships with a pre-configured conda virtual environment named `omnigibson` with Isaac Sim and **`OmniGibson`** pre-installed. Upon running the first time, our scene and object assets will automatically be downloaded as well. 2. Then, simply launch the shell script: === "Headless" ```{.shell .annotate} sudo ./run_docker.sh -h <ABS_DATA_PATH> # (1)! ``` 1. `<ABS_DATA_PATH>` specifies the **absolute** path data will be stored on your machine (if no `<ABS_DATA_PATH>` is specified, it defaults to `./omnigibson_data`). This needs to be called each time the docker container is run! === "GUI" ```{.shell .annotate} sudo ./run_docker.sh <ABS_DATA_PATH> # (1)! ``` 1. `<ABS_DATA_PATH>` specifies the **absolute** path data will be stored on your machine (if no `<ABS_DATA_PATH>` is specified, it defaults to `./omnigibson_data`). This needs to be called each time the docker container is run! ??? warning annotate "Are you using NFS or AFS?" Docker containers are unable to access NFS or AFS drives, so if `run_docker.sh` are located on an NFS / AFS partition, please set `<DATA_PATH>` to an alternative data directory located on a non-NFS / AFS partition. === "๐Ÿงช Install from source (Linux / Windows)" Install **`OmniGibson`** from source is supported for both **๐Ÿง Linux (bash)** and **๐Ÿ“ Windows (powershell/cmd)**. !!! example "" === "๐Ÿง Linux (bash)" <div class="annotate" markdown> 1. Install [Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) and NVIDIA's [Omniverse Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html) !!! warning "Please make sure you have the latest version of Isaac Sim (2023.1.1) installed." For Ubuntu 22.04, you need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage. 2. Clone [**`OmniGibson`**](https://github.com/StanfordVL/OmniGibson) and move into the directory: ```shell git clone https://github.com/StanfordVL/OmniGibson.git cd OmniGibson ``` ??? note "Nightly build" The main branch contains the stable version of **`OmniGibson`**. For our latest developed (yet not fully tested) features and bug fixes, please clone from the `og-develop` branch. 3. Setup a virtual conda environment to run **`OmniGibson`**: ```{.shell .annotate} ./scripts/setup.sh # (1)! ``` 1. The script will ask you which Isaac Sim to use. If you installed it in the default location, it should be `~/.local/share/ov/pkg/isaac_sim-2023.1.1` This will create a conda env with `omnigibson` installed. Simply call `conda activate` to activate it. 4. Download **`OmniGibson`** dataset (within the conda env): ```shell python scripts/download_datasets.py ``` </div> === "๐Ÿ“ Windows (powershell/cmd)" <div class="annotate" markdown> 1. Install [Conda](https://conda.io/projects/conda/en/latest/user-guide/install/index.html) and NVIDIA's [Omniverse Isaac Sim](https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_workstation.html) !!! warning "Please make sure you have the latest version of Isaac Sim (2023.1.1) installed." 2. Clone [**`OmniGibson`**](https://github.com/StanfordVL/OmniGibson) and move into the directory: ```shell git clone https://github.com/StanfordVL/OmniGibson.git cd OmniGibson ``` ??? note "Nightly build" The main branch contains the stable version of **`OmniGibson`**. For our latest developed (yet not fully tested) features and bug fixes, please clone from the `og-develop` branch. 3. Setup a virtual conda environment to run **`OmniGibson`**: ```{.powershell .annotate} .\scripts\setup.bat # (1)! ``` 1. The script will ask you which Isaac Sim to use. If you installed it in the default location, it should be `C:\Users\<USER_NAME>\AppData\Local\ov\pkg\isaac_sim-2023.1.1` This will create a conda env with `omnigibson` installed. Simply call `conda activate` to activate it. 4. Download **`OmniGibson`** dataset (within the conda env): ```powershell python scripts\download_datasets.py ``` </div> ## ๐ŸŒŽ **Explore `OmniGibson`!** !!! warning annotate "Expect slowdown during first execution" Omniverse requires some one-time startup setup when **`OmniGibson`** is imported for the first time. The process could take up to 5 minutes. This is expected behavior, and should only occur once! **`OmniGibson`** is now successfully installed! Try exploring some of our new scenes interactively: ```{.shell .annotate} python -m omnigibson.examples.scenes.scene_selector # (1)! ``` 1. This demo lets you choose a scene and interactively move around using your keyboard and mouse. Hold down **`Shift`** and then **`Left-click + Drag`** an object to apply forces! You can also try teleoperating one of our robots: ```{.shell .annotate} python -m omnigibson.examples.robots.robot_control_example # (1)! ``` 1. This demo lets you choose a scene, robot, and set of controllers, and then teleoperate the robot using your keyboard. *** **Next:** Get quickly familiarized with **`OmniGibson`** from our [Quickstart Guide](./quickstart.md)! ## ๐Ÿงฏ **Troubleshooting** ??? question "I cannot open Omniverse Launcher AppImage on Linux" You probably need to [install FUSE](https://github.com/AppImage/AppImageKit/wiki/FUSE) to run the Omniverse Launcher AppImage. ??? question "OmniGibson is stuck at `HydraEngine rtx failed creating scene renderer.`" `OmniGibson` is likely using an unsupported GPU (default is id 0). Run `nvidia-smi` to see the active list of GPUs, and select an NVIDIA-supported GPU and set its corresponding ID when running `OmniGibson` with `export OMNIGIBSON_GPU_ID=<ID NUMBER>`.
StanfordVL/OmniGibson/docs/getting_started/slurm.md
--- icon: material/server-network --- # ๐Ÿ”Œ **Running on a SLURM cluster** _This documentation is a work in progress._ OmniGibson can be run on a SLURM cluster using the _enroot_ container software, which is a replacement for Docker that allows containers to be run as the current user rather than as root. _enroot_ needs to be installed on your SLURM cluster by an administrator. With enroot installed, you can follow the below steps to run OmniGibson on SLURM: 1. Download the dataset to a location that is accessible by cluster nodes. To do this, you can use the download_dataset.py script inside OmniGibson's scripts directory, and move it to the right spot later. In the below example, /cvgl/ is a networked drive that is accessible by the cluster nodes. **For Stanford users, this step is already done for SVL and Viscam nodes** ```{.shell .annotate} OMNIGIBSON_NO_OMNIVERSE=1 python scripts/download_dataset.py mv omnigibson/data /cvgl/group/Gibson/og-data-0-2-1 ``` 2. (Optional) Distribute the dataset to the individual nodes. This will make load times much better than reading from a network drive. To do this, run the below command on your SLURM head node (replace `svl` with your partition name and `cvgl` with your account name, as well as the paths with the respective network and local paths). Confirm via `squeue -u $USER` that all jobs have finished. **This step is already done for SVL and Viscam nodes** ```{.shell .annotate} sinfo -p svl -o "%N,%n" -h | \ sed s/,.*//g | \ xargs -L1 -I{} \ sbatch \ --account=cvgl --partition=svl --nodelist={} --mem=8G --cpus-per-task=4 \ --wrap 'cp -R /cvgl/group/Gibson/og-data-0-2-1 /scr-ssd/og-data-0-2-1' ``` 3. Download your desired image to a location that is accessible by the cluster nodes. (Replace the path with your own path, and feel free to replace `latest` with your desired branch tag). You have the option to mount code (meaning you don't need the container to come with all the code you want to run, just the right dependencies / environment setup) ```{.shell .annotate} enroot import --output /cvgl2/u/cgokmen/omnigibson.sqsh docker://stanfordvl/omnigibson:action-primitives ``` 4. (Optional) If you intend to mount code onto the container, make it available at a location that is accessible by the cluster nodes. You can mount arbitrary code, and you can also mount a custom version of OmniGibson (for the latter, you need to make sure you mount your copy of OmniGibson at /omnigibson-src inside the container). For example: ```{.shell .annotate} git clone https://github.com/StanfordVL/OmniGibson.git /cvgl2/u/cgokmen/OmniGibson ``` 5. Create your launch script. You can start with a copy of the script below. If you want to launch multiple workers, increase the job array option. You should keep the setting at at least 1 GPU per node, but can feel free to edit other settings. You can mount any additional code as you'd like, and you can change the entrypoint such that the container runs your mounted code upon launch. See the mounts section for an example. A copy of this script can be found in docker/sbatch_example.sh ```{.shell .annotate} #!/usr/bin/env bash #SBATCH --account=cvgl #SBATCH --partition=svl --qos=normal #SBATCH --nodes=1 #SBATCH --cpus-per-task=8 #SBATCH --mem=30G #SBATCH --gres=gpu:2080ti:1 IMAGE_PATH="/cvgl2/u/cgokmen/omnigibson.sqsh" GPU_ID=$(nvidia-smi -L | grep -oP '(?<=GPU-)[a-fA-F0-9\-]+' | head -n 1) ISAAC_CACHE_PATH="/scr-ssd/${SLURM_JOB_USER}/isaac_cache_${GPU_ID}" # Define env kwargs to pass declare -A ENVS=( [NVIDIA_DRIVER_CAPABILITIES]=all [NVIDIA_VISIBLE_DEVICES]=0 [DISPLAY]="" [OMNIGIBSON_HEADLESS]=1 ) for env_var in "${!ENVS[@]}"; do # Add to env kwargs we'll pass to enroot command later ENV_KWARGS="${ENV_KWARGS} --env ${env_var}=${ENVS[${env_var}]}" done # Define mounts to create (maps local directory to container directory) declare -A MOUNTS=( [/scr-ssd/og-data-0-2-1]=/data [${ISAAC_CACHE_PATH}/isaac-sim/kit/cache/Kit]=/isaac-sim/kit/cache/Kit [${ISAAC_CACHE_PATH}/isaac-sim/cache/ov]=/root/.cache/ov [${ISAAC_CACHE_PATH}/isaac-sim/cache/pip]=/root/.cache/pip [${ISAAC_CACHE_PATH}/isaac-sim/cache/glcache]=/root/.cache/nvidia/GLCache [${ISAAC_CACHE_PATH}/isaac-sim/cache/computecache]=/root/.nv/ComputeCache [${ISAAC_CACHE_PATH}/isaac-sim/logs]=/root/.nvidia-omniverse/logs [${ISAAC_CACHE_PATH}/isaac-sim/config]=/root/.nvidia-omniverse/config [${ISAAC_CACHE_PATH}/isaac-sim/data]=/root/.local/share/ov/data [${ISAAC_CACHE_PATH}/isaac-sim/documents]=/root/Documents # Feel free to include lines like the below to mount a workspace or a custom OG version # [/cvgl2/u/cgokmen/OmniGibson]=/omnigibson-src # [/cvgl2/u/cgokmen/my-project]=/my-project ) MOUNT_KWARGS="" for mount in "${!MOUNTS[@]}"; do # Verify mount path in local directory exists, otherwise, create it if [ ! -e "$mount" ]; then mkdir -p ${mount} fi # Add to mount kwargs we'll pass to enroot command later MOUNT_KWARGS="${MOUNT_KWARGS} --mount ${mount}:${MOUNTS[${mount}]}" done # Create the image if it doesn't already exist CONTAINER_NAME=omnigibson_${GPU_ID} enroot create --force --name ${CONTAINER_NAME} ${IMAGE_PATH} # Remove leading space in string ENV_KWARGS="${ENV_KWARGS:1}" MOUNT_KWARGS="${MOUNT_KWARGS:1}" # The last line here is the command you want to run inside the container. # Here I'm running some unit tests. enroot start \ --root \ --rw \ ${ENV_KWARGS} \ ${MOUNT_KWARGS} \ ${CONTAINER_NAME} \ source /isaac-sim/setup_conda_env.sh && pytest tests/test_object_states.py # Clean up the image if possible. enroot remove -f ${CONTAINER_NAME} ``` 6. Launch your job using `sbatch your_script.sh` - and profit!
lucasapchagas/Omniverse/mvnw.cmd
@REM ---------------------------------------------------------------------------- @REM Licensed to the Apache Software Foundation (ASF) under one @REM or more contributor license agreements. See the NOTICE file @REM distributed with this work for additional information @REM regarding copyright ownership. The ASF licenses this file @REM to you under the Apache License, Version 2.0 (the @REM "License"); you may not use this file except in compliance @REM with the License. You may obtain a copy of the License at @REM @REM https://www.apache.org/licenses/LICENSE-2.0 @REM @REM Unless required by applicable law or agreed to in writing, @REM software distributed under the License is distributed on an @REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY @REM KIND, either express or implied. See the License for the @REM specific language governing permissions and limitations @REM under the License. @REM ---------------------------------------------------------------------------- @REM ---------------------------------------------------------------------------- @REM Apache Maven Wrapper startup batch script, version 3.2.0 @REM @REM Required ENV vars: @REM JAVA_HOME - location of a JDK home dir @REM @REM Optional ENV vars @REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands @REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending @REM MAVEN_OPTS - parameters passed to the Java VM when running Maven @REM e.g. to debug Maven itself, use @REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 @REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files @REM ---------------------------------------------------------------------------- @REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' @echo off @REM set title of command window title %0 @REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' @if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% @REM set %HOME% to equivalent of $HOME if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") @REM Execute a userRecord defined script before this one if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre @REM check for pre script, once with legacy .bat ending and once with .cmd ending if exist "%USERPROFILE%\mavenrc_pre.bat" call "%USERPROFILE%\mavenrc_pre.bat" %* if exist "%USERPROFILE%\mavenrc_pre.cmd" call "%USERPROFILE%\mavenrc_pre.cmd" %* :skipRcPre @setlocal set ERROR_CODE=0 @REM To isolate internal variables from possible post scripts, we use another setlocal @setlocal @REM ==== START VALIDATION ==== if not "%JAVA_HOME%" == "" goto OkJHome echo. echo Error: JAVA_HOME not found in your environment. >&2 echo Please set the JAVA_HOME variable in your environment to match the >&2 echo location of your Java installation. >&2 echo. goto error :OkJHome if exist "%JAVA_HOME%\bin\java.exe" goto init echo. echo Error: JAVA_HOME is set to an invalid directory. >&2 echo JAVA_HOME = "%JAVA_HOME%" >&2 echo Please set the JAVA_HOME variable in your environment to match the >&2 echo location of your Java installation. >&2 echo. goto error @REM ==== END VALIDATION ==== :init @REM Find the project base dir, i.e. the directory that contains the folder ".mvn". @REM Fallback to current working directory if not found. set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir set EXEC_DIR=%CD% set WDIR=%EXEC_DIR% :findBaseDir IF EXIST "%WDIR%"\.mvn goto baseDirFound cd .. IF "%WDIR%"=="%CD%" goto baseDirNotFound set WDIR=%CD% goto findBaseDir :baseDirFound set MAVEN_PROJECTBASEDIR=%WDIR% cd "%EXEC_DIR%" goto endDetectBaseDir :baseDirNotFound set MAVEN_PROJECTBASEDIR=%EXEC_DIR% cd "%EXEC_DIR%" :endDetectBaseDir IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig @setlocal EnableExtensions EnableDelayedExpansion for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a @endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% :endReadAdditionalConfig SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain set WRAPPER_URL="https://repo.maven.apache.org/maven2/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( IF "%%A"=="wrapperUrl" SET WRAPPER_URL=%%B ) @REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central @REM This allows using the maven wrapper in projects that prohibit checking in binary data. if exist %WRAPPER_JAR% ( if "%MVNW_VERBOSE%" == "true" ( echo Found %WRAPPER_JAR% ) ) else ( if not "%MVNW_REPOURL%" == "" ( SET WRAPPER_URL="%MVNW_REPOURL%/org/apache/maven/wrapper/maven-wrapper/3.2.0/maven-wrapper-3.2.0.jar" ) if "%MVNW_VERBOSE%" == "true" ( echo Couldn't find %WRAPPER_JAR%, downloading it ... echo Downloading from: %WRAPPER_URL% ) powershell -Command "&{"^ "$webclient = new-object System.Net.WebClient;"^ "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ "}"^ "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%WRAPPER_URL%', '%WRAPPER_JAR%')"^ "}" if "%MVNW_VERBOSE%" == "true" ( echo Finished downloading %WRAPPER_JAR% ) ) @REM End of extension @REM If specified, validate the SHA-256 sum of the Maven wrapper jar file SET WRAPPER_SHA_256_SUM="" FOR /F "usebackq tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( IF "%%A"=="wrapperSha256Sum" SET WRAPPER_SHA_256_SUM=%%B ) IF NOT %WRAPPER_SHA_256_SUM%=="" ( powershell -Command "&{"^ "$hash = (Get-FileHash \"%WRAPPER_JAR%\" -Algorithm SHA256).Hash.ToLower();"^ "If('%WRAPPER_SHA_256_SUM%' -ne $hash){"^ " Write-Output 'Error: Failed to validate Maven wrapper SHA-256, your Maven wrapper might be compromised.';"^ " Write-Output 'Investigate or delete %WRAPPER_JAR% to attempt a clean download.';"^ " Write-Output 'If you updated your Maven version, you need to update the specified wrapperSha256Sum property.';"^ " exit 1;"^ "}"^ "}" if ERRORLEVEL 1 goto error ) @REM Provide a "standardized" way to retrieve the CLI args that will @REM work with both Windows and non-Windows executions. set MAVEN_CMD_LINE_ARGS=%* %MAVEN_JAVA_EXE% ^ %JVM_CONFIG_MAVEN_PROPS% ^ %MAVEN_OPTS% ^ %MAVEN_DEBUG_OPTS% ^ -classpath %WRAPPER_JAR% ^ "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" ^ %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* if ERRORLEVEL 1 goto error goto end :error set ERROR_CODE=1 :end @endlocal & set ERROR_CODE=%ERROR_CODE% if not "%MAVEN_SKIP_RC%"=="" goto skipRcPost @REM check for post script, once with legacy .bat ending and once with .cmd ending if exist "%USERPROFILE%\mavenrc_post.bat" call "%USERPROFILE%\mavenrc_post.bat" if exist "%USERPROFILE%\mavenrc_post.cmd" call "%USERPROFILE%\mavenrc_post.cmd" :skipRcPost @REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' if "%MAVEN_BATCH_PAUSE%"=="on" pause if "%MAVEN_TERMINATE_CMD%"=="on" exit %ERROR_CODE% cmd /C exit /B %ERROR_CODE%
lucasapchagas/Omniverse/README.md
# OmniVerse API ๐ŸŒŒ OmniVerse API is a straightforward API that provides access only to the basic CRUD concept routes, enabling efficient and consistent data manipulation. Our API uses the ViaCEP API, a well-known API that returns the data of a specific address based on the provided postal code (CEP). ## Setup ๐Ÿ”ง OmniVerse API is an API built on top of the Java Spring Boot framework, designed to be easily installed and deployed. For an easy setup, you'll need a MySQL server, but the API itself is prepared to accept any DB you want. Follow [MySQL Documentation](https://dev.mysql.com/doc/mysql-getting-started/en) link in order to setup a working server. 1. First thing you'll need after your MySQL server is running is to setup the API to be able to connect to it. You'll need to modify [**application.properties**](https://github.com/lucasapchagas/Omniverse/blob/main/src/main/resources/application.properties) file to your own needs. - `spring.datasource.url`, you must provide your MySQL server url. - `spring.datasource.username`, you must provide your MySQL server username. - `spring.datasource.password`, you must provide your MySQL server password. โ—**If you provide an url for a database which is not previously created the API will not start. Use `CREATE database <db_name>;` in order to properly create it.** 2. Building it ๐Ÿ”จ To build the project, you need to have Java 17 installed, but you can easily change the version by modifying the application's [**pom.xml**](https://github.com/lucasapchagas/Omniverse/blob/main/pom.xml) file. The project uses Maven as the build platform, which brings all the conveniences of Maven. - You can build it just by running `./mvnw pacakge` in the project root folder, the target file will be generated at `/target/` folder. 3. Using it ๐Ÿ˜ฏ Utilizing the API is as simple as modifying, understanding, and building it. Given that Java runs on the JVM, deploying the API becomes effortlessโ€”simply run the compiled JAR on any cloud service. - You can just use a [RELEASE](https://github.com/lucasapchagas/Omniverse/releases/tag/RELEASE) instead of compiling it. Please, always use the latest one. - In order to run it you must use the following command `java -jar OmniVerse-0.0.1-SNAPSHOT.jar`. By default it will try to open the api to [`http://localhost:8080/`](http://localhost:8080/). - Use the OmniverseCLI to test the API. https://github.com/lucasapchagas/OmniverseCLI ## Features ๐Ÿชถ - Uses **viacep api** in order to register users address. - Migrations with flyway library. - Data validation with spring boot data validation. - JPA design pattern. ## API Usage ๐Ÿช The OmniVerse API is user-friendly and comprises only 5 possible routes that align with the CRUD standard. You can use popular API testing tools like Insomnia. We have created a configuration that can be accessed on pastebin by [clicking here](https://pastebin.com/f1rBDfZP). Import it into your Insomnia to streamline your testing process. ### What is an user? Example: ```json { "id": 8, "name": "Lucas", "email": "[email protected]", "address": { "cep": "69050500", "place": "Rua Peru", "complement": "", "neighborhood": "Parque 10 de Novembro", "locality": "Manaus", "uf": "AM" } } ``` #### Register a user ```http POST /user ``` | Parameter | Type | Description | | :---------- | :--------- | :---------------------------------- | | `name` | `string` | User name | | `email` | `string` | Valid email | | `cep` | `string` | Valid cep, just numbers. | #### Returns an user ```http GET /user/{id} ``` #### Returns all users ```http GET /user ``` #### Delete a user ```http DELETE /user/{id} ``` #### Update a user Just the field you want to modify is needed as a Parameter. User id is a **must have**. ```http PUT /user ``` | Parameter | Type | Description | | :---------- | :--------- | :---------------------------------- | | `id` | `int` | User id| | `name` | `string` | User name | | `email` | `string` | Valid email | | `cep` | `string` | Valid cep, just numbers. | ## Roadmap - [x] Implement JPA pattern. - [x] Usage of **ViaCEP API** in order to generate user's adress. - [x] Implement Flyway migrations to our database. - [x] Implement Spring boot data validation. - [ ] Implement Spring boot security module. - [ ] Implement JSON Web Token usage.
lucasapchagas/Omniverse/pom.xml
<?xml version="1.0" encoding="UTF-8"?> <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd"> <modelVersion>4.0.0</modelVersion> <parent> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-parent</artifactId> <version>3.1.2</version> <relativePath/> <!-- lookup parent from repository --> </parent> <groupId>com.lucasapchagas.OmniVerse </groupId> <artifactId>OmniVerse</artifactId> <version>0.0.1-SNAPSHOT</version> <name>OmniVerse</name> <description>OmniVerse is a Spring Boot application that provides an omnichannel experience, allowing users to interact consistently and efficiently across multiple channels and platforms.</description> <properties> <java.version>17</java.version> </properties> <dependencies> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-web</artifactId> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-devtools</artifactId> <scope>runtime</scope> <optional>true</optional> </dependency> <dependency> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> <optional>true</optional> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-test</artifactId> <scope>test</scope> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-data-jpa</artifactId> </dependency> <dependency> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-starter-validation</artifactId> </dependency> <dependency> <groupId>org.flywaydb</groupId> <artifactId>flyway-core</artifactId> </dependency> <dependency> <groupId>org.flywaydb</groupId> <artifactId>flyway-mysql</artifactId> </dependency> <dependency> <groupId>com.mysql</groupId> <artifactId>mysql-connector-j</artifactId> <scope>runtime</scope> </dependency> </dependencies> <build> <plugins> <plugin> <groupId>org.springframework.boot</groupId> <artifactId>spring-boot-maven-plugin</artifactId> <configuration> <excludes> <exclude> <groupId>org.projectlombok</groupId> <artifactId>lombok</artifactId> </exclude> </excludes> </configuration> </plugin> </plugins> </build> </project>
lucasapchagas/Omniverse/src/test/java/com/lucasapchagas/OmniVerse/OmniVerseApplicationTests.java
package com.lucasapchagas.OmniVerse; import org.junit.jupiter.api.Test; import org.springframework.boot.test.context.SpringBootTest; @SpringBootTest class OmniVerseApplicationTests { @Test void contextLoads() { } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/OmniVerseApplication.java
package com.lucasapchagas.OmniVerse; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; @SpringBootApplication public class OmniVerseApplication { public static void main(String[] args) { SpringApplication.run(OmniVerseApplication.class, args); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/Constant.java
package com.lucasapchagas.OmniVerse; public class Constant { public static boolean DEBUG = true; public static String viacepOrigin="https://viacep.com.br/ws/$/json/"; }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/external/RequestCep.java
package com.lucasapchagas.OmniVerse.external; import com.fasterxml.jackson.databind.ObjectMapper; import com.lucasapchagas.OmniVerse.Constant; import com.lucasapchagas.OmniVerse.entities.common.AddressRecord; import com.lucasapchagas.OmniVerse.utils.Formatter; import com.lucasapchagas.OmniVerse.utils.Log; import org.springframework.web.client.RestTemplate; import java.util.Objects; public class RequestCep extends RestTemplate { private final String TAG = RequestCep.class.getName(); private AddressRecord result; public RequestCep(String cep) { super(); try { this.result = new AddressRecord( Objects.requireNonNull(this.getForObject( Constant.viacepOrigin.replace("$", cep), ViacepRecord.class )) ); } catch (Exception e) { this.result = new AddressRecord(cep, "", "", "", "", ""); Log.d(TAG, "Exception while retrieving cep - " + e); } } public AddressRecord getResult() { return result; } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/external/ViacepRecord.java
package com.lucasapchagas.OmniVerse.external; public record ViacepRecord( String cep, String logradouro, String complemento, String bairro, String localidade, String uf) { }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/controller/UserController.java
package com.lucasapchagas.OmniVerse.controller; import com.lucasapchagas.OmniVerse.entities.user.User; import com.lucasapchagas.OmniVerse.entities.user.UserCreateRecord; import com.lucasapchagas.OmniVerse.entities.user.UserRepository; import com.lucasapchagas.OmniVerse.entities.user.UserUpdateRecord; import jakarta.transaction.Transactional; import jakarta.validation.Valid; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.ResponseEntity; import org.springframework.web.bind.annotation.*; import org.springframework.web.util.UriComponentsBuilder; import java.net.URI; import java.util.List; @RestController @RequestMapping("/user") public class UserController { @Autowired private UserRepository userRepository; @PostMapping @Transactional public ResponseEntity registerUser(@RequestBody @Valid UserCreateRecord data) { User user = new User(data); userRepository.save(user); URI uri = UriComponentsBuilder.fromPath("/medicos/{id}").buildAndExpand(user.getId()).toUri(); return ResponseEntity.created(uri).body(user); } @GetMapping public ResponseEntity<List<User>> listUsers() { return ResponseEntity.ok(userRepository.findAll()); } @PutMapping @Transactional public ResponseEntity updateUser(@RequestBody UserUpdateRecord data) { User user = userRepository.getReferenceById(data.id()); user.update(data); return ResponseEntity.ok(user); } @DeleteMapping("/{id}") @Transactional public ResponseEntity deleteUser(@PathVariable Long id) { userRepository.deleteById(id); return ResponseEntity.noContent().build(); } @GetMapping("/{id}") public ResponseEntity user(@PathVariable Long id) { return ResponseEntity.ok(userRepository.getReferenceById(id)); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/utils/Log.java
package com.lucasapchagas.OmniVerse.utils; import com.lucasapchagas.OmniVerse.Constant; public class Log { public static void d (String TAG, String message) { if (Constant.DEBUG) System.out.println(TAG + ": " + message); } public static void i (String TAG, String message) { System.out.println(TAG + ": " + message); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/utils/Formatter.java
package com.lucasapchagas.OmniVerse.utils; public class Formatter { public static String cep(String cep) { return cep.replace("-", ""); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/user/UserUpdateRecord.java
package com.lucasapchagas.OmniVerse.entities.user; import jakarta.validation.constraints.NotNull; public record UserUpdateRecord( @NotNull Long id, String name, String cep) { }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/user/User.java
package com.lucasapchagas.OmniVerse.entities.user; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.lucasapchagas.OmniVerse.entities.common.Address; import jakarta.persistence.*; import jakarta.validation.Valid; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.NoArgsConstructor; @Table(name = "users") @Entity(name = "User") @Getter @NoArgsConstructor @AllArgsConstructor @EqualsAndHashCode(of = "id") @JsonIgnoreProperties(value={"hibernateLazyInitializer", "handler"}) public class User { @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private long id; private String name; private String email; @Embedded private Address address; public User(@Valid UserCreateRecord userRecord) { this.name = userRecord.name(); this.email = userRecord.email(); this.address = new Address(userRecord.cep()); } public void update(UserUpdateRecord data) { if (data.name() != null) this.name = data.name(); if (data.cep() != null) this.address = new Address(data.cep()); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/user/UserCreateRecord.java
package com.lucasapchagas.OmniVerse.entities.user; import jakarta.validation.constraints.Email; import jakarta.validation.constraints.NotBlank; import jakarta.validation.constraints.NotNull; import jakarta.validation.constraints.Pattern; public record UserCreateRecord( @NotBlank String name, @NotBlank @Email String email, @NotNull @Pattern(regexp = "\\d{8}") String cep) { }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/user/UserRepository.java
package com.lucasapchagas.OmniVerse.entities.user; import org.springframework.data.jpa.repository.JpaRepository; public interface UserRepository extends JpaRepository<User, Long> { }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/common/Address.java
package com.lucasapchagas.OmniVerse.entities.common; import com.lucasapchagas.OmniVerse.external.RequestCep; import jakarta.persistence.Embeddable; import lombok.AllArgsConstructor; import lombok.Getter; import lombok.NoArgsConstructor; @Embeddable @Getter @NoArgsConstructor @AllArgsConstructor public class Address { private String cep; private String place; private String complement; private String neighborhood; private String locality; private String uf; public Address(String cep) { AddressRecord addressRecord = new RequestCep(cep).getResult(); this.cep = addressRecord.cep(); this.place = addressRecord.place(); this.complement = addressRecord.complement(); this.neighborhood = addressRecord.neighborhood(); this.locality = addressRecord.locality(); this.uf = addressRecord.uf(); System.out.println(addressRecord); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/common/ErrorHandler.java
package com.lucasapchagas.OmniVerse.entities.common; import jakarta.persistence.EntityNotFoundException; import org.springframework.http.ResponseEntity; import org.springframework.validation.FieldError; import org.springframework.web.bind.MethodArgumentNotValidException; import org.springframework.web.bind.annotation.ExceptionHandler; import org.springframework.web.bind.annotation.RestControllerAdvice; import java.util.List; @RestControllerAdvice public class ErrorHandler { @ExceptionHandler(EntityNotFoundException.class) public ResponseEntity error404() { return ResponseEntity.notFound().build(); } @ExceptionHandler(MethodArgumentNotValidException.class) public ResponseEntity error400(MethodArgumentNotValidException e) { List<FieldError> errors = e.getFieldErrors(); return ResponseEntity.badRequest().body(errors.stream().map(DataValidation::new).toList()); } private record DataValidation(String field, String message) { public DataValidation(FieldError e) { this(e.getField(), e.getDefaultMessage()); } } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/entities/common/AddressRecord.java
package com.lucasapchagas.OmniVerse.entities.common; import com.lucasapchagas.OmniVerse.external.ViacepRecord; import com.lucasapchagas.OmniVerse.utils.Formatter; import jakarta.validation.constraints.NotNull; import jakarta.validation.constraints.Pattern; public record AddressRecord ( @NotNull @Pattern(regexp = "\\d{8}") String cep, String place, String complement, String neighborhood, String locality, String uf) { public AddressRecord(ViacepRecord viacepRecord) { this( Formatter.cep(viacepRecord.cep()), viacepRecord.logradouro(), viacepRecord.complemento(), viacepRecord.bairro(), viacepRecord.localidade(), viacepRecord.uf() ); } }
lucasapchagas/Omniverse/src/main/java/com/lucasapchagas/OmniVerse/configuration/CorsConfiguration.java
package com.lucasapchagas.OmniVerse.configuration; import com.lucasapchagas.OmniVerse.Constant; import org.springframework.context.annotation.Configuration; import org.springframework.web.servlet.config.annotation.CorsRegistry; import org.springframework.web.servlet.config.annotation.WebMvcConfigurer; @Configuration public class CorsConfiguration implements WebMvcConfigurer { @Override public void addCorsMappings(CorsRegistry registry) { registry.addMapping("/**") //.allowedOrigins(Constant.frontOrigin) .allowedMethods("GET", "POST", "PUT", "DELETE", "OPTIONS", "HEAD", "TRACE", "CONNECT"); } }
Toni-SM/skrl/pyproject.toml
[project] name = "skrl" version = "1.1.0" description = "Modular and flexible library for reinforcement learning on PyTorch and JAX" readme = "README.md" requires-python = ">=3.6" license = {text = "MIT License"} authors = [ {name = "Toni-SM"}, ] maintainers = [ {name = "Toni-SM"}, ] keywords = ["reinforcement-learning", "machine-learning", "reinforcement", "machine", "learning", "rl"] classifiers = [ "License :: OSI Approved :: MIT License", "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Programming Language :: Python :: 3", "Operating System :: OS Independent", ] # dependencies / optional-dependencies dependencies = [ "gym", "gymnasium", "tqdm", "packaging", "tensorboard", ] [project.optional-dependencies] torch = [ "torch>=1.9", ] jax = [ "jax>=0.4.3", "jaxlib>=0.4.3", "flax", "optax", ] all = [ "torch>=1.9", "jax>=0.4.3", "jaxlib>=0.4.3", "flax", "optax", ] # urls [project.urls] "Homepage" = "https://github.com/Toni-SM/skrl" "Documentation" = "https://skrl.readthedocs.io" "Discussions" = "https://github.com/Toni-SM/skrl/discussions" "Bug Reports" = "https://github.com/Toni-SM/skrl/issues" "Say Thanks!" = "https://github.com/Toni-SM" "Source" = "https://github.com/Toni-SM/skrl" [tool.yapf] # run: yapf -p -m -i -r <folder> based_on_style = "pep8" blank_line_before_nested_class_or_def = false blank_lines_between_top_level_imports_and_variables = 2 column_limit = 120 join_multiple_lines = false space_between_ending_comma_and_closing_bracket = false spaces_around_power_operator = true split_all_top_level_comma_separated_values = true split_before_arithmetic_operator = true split_before_dict_set_generator = false split_before_dot = true split_complex_comprehension = true coalesce_brackets = true [tool.codespell] # run: codespell <folder> skip = "./docs/_build,./docs/source/_static" quiet-level = 3 count = "" [tool.isort] use_parentheses = false line_length = 120 multi_line_output = 3 lines_after_imports = 2 known_annotation = ["typing"] known_framework = [ "torch", "jax", "jaxlib", "flax", "optax", "numpy", ] sections = [ "FUTURE", "ANNOTATION", "STDLIB", "THIRDPARTY", "FRAMEWORK", "FIRSTPARTY", "LOCALFOLDER", ] no_lines_before = "THIRDPARTY" skip = ["docs"]
Toni-SM/skrl/CONTRIBUTING.md
First of all, **thank you**... For what? Because you are dedicating some time to reading these guidelines and possibly thinking about contributing <hr> ### I just want to ask a question! If you have a question, please do not open an issue for this. Instead, use the following resources for it (you will get a faster response): - [skrl's GitHub discussions](https://github.com/Toni-SM/skrl/discussions), a place to ask questions and discuss about the project - [Isaac Gym's forum](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/isaac-gym/322), a place to post your questions, find past answers, or just chat with other members of the community about Isaac Gym topics - [Omniverse Isaac Sim's forum](https://forums.developer.nvidia.com/c/agx-autonomous-machines/isaac/simulation/69), a place to post your questions, find past answers, or just chat with other members of the community about Omniverse Isaac Sim/Gym topics ### I have found a (good) bug. What can I do? Open an issue on [skrl's GitHub issues](https://github.com/Toni-SM/skrl/issues) and describe the bug. If possible, please provide some of the following items: - Minimum code that reproduces the bug... - or the exact steps to reproduce it - The error log or a screenshot of it - A link to the source code of the library that you are using (some problems may be due to the use of older versions. If possible, always use the latest version) - Any other information that you think may be useful or help to reproduce/describe the problem ### I want to contribute, but I don't know how There is a [board](https://github.com/users/Toni-SM/projects/2/views/8) containing relevant future implementations which can be a good starting place to identify contributions. Please consider the following points #### Notes about contributing - Try to **communicate your change first** to [discuss](https://github.com/Toni-SM/skrl/discussions) the implementation if you want to add a new feature or change an existing one - Modify only the minimum amount of code required and the files needed to make the change - Use the provided [pre-commit](https://pre-commit.com/) hooks to format the code. Install it by running `pre-commit install` in the root of the repository, running it periodically using `pre-commit run --all` helps reducing commit errors - Changes that are cosmetic in nature (code formatting, removing whitespace, etc.) or that correct grammatical, spelling or typo errors, and that do not add anything substantial to the functionality of the library will generally not be accepted as a pull request - The only exception are changes that results from the use of the pre-commit hooks #### Coding conventions **skrl** is designed with a focus on modularity, readability, simplicity and transparency of algorithm implementation. The file system structure groups components according to their functionality. Library components only inherit (and must inherit) from a single base class (no multilevel or multiple inheritance) that provides a uniform interface and implements common functionality that is not tied to the implementation details of the algorithms Read the code a little bit and you will understand it at first glance... Also - Use 4 indentation spaces - Follow, as much as possible, the PEP8 Style Guide for Python code - Document each module, class, function or method using the reStructuredText format - Annotate all functions, both for the parameters and for the return value - Follow the commit message style guide for Git described in https://commit.style - Capitalize (the first letter) and omit any trailing punctuation - Write it in the imperative tense - Aim for about 50 (or 72) characters - Add import statements at the top of each module as follows: ```ini function annotation (e.g. typing) # insert an empty line python libraries and other libraries (e.g. gym, numpy, time, etc.) # insert an empty line machine learning framework modules (e.g. torch, torch.nn) # insert an empty line skrl components ``` <hr> Thank you once again, Toni
Toni-SM/skrl/CHANGELOG.md
# Changelog The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). ## [1.1.0] - 2024-02-12 ### Added - MultiCategorical mixin to operate MultiDiscrete action spaces ### Changed (breaking changes) - Rename the `ManualTrainer` to `StepTrainer` - Output training/evaluation progress messages to system's stdout - Get single observation/action spaces for vectorized environments - Update Isaac Orbit environment wrapper ## [1.0.0] - 2023-08-16 Transition from pre-release versions (`1.0.0-rc.1` and`1.0.0-rc.2`) to a stable version. This release also announces the publication of the **skrl** paper in the Journal of Machine Learning Research (JMLR): https://www.jmlr.org/papers/v24/23-0112.html Summary of the most relevant features: - JAX support - New documentation theme and structure - Multi-agent Reinforcement Learning (MARL) ## [1.0.0-rc.2] - 2023-08-11 ### Added - Get truncation from `time_outs` info in Isaac Gym, Isaac Orbit and Omniverse Isaac Gym environments - Time-limit (truncation) boostrapping in on-policy actor-critic agents - Model instantiators `initial_log_std` parameter to set the log standard deviation's initial value ### Changed (breaking changes) - Structure environment loaders and wrappers file hierarchy coherently Import statements now follow the next convention: - Wrappers (e.g.): - `from skrl.envs.wrappers.torch import wrap_env` - `from skrl.envs.wrappers.jax import wrap_env` - Loaders (e.g.): - `from skrl.envs.loaders.torch import load_omniverse_isaacgym_env` - `from skrl.envs.loaders.jax import load_omniverse_isaacgym_env` ### Changed - Drop support for versions prior to PyTorch 1.9 (1.8.0 and 1.8.1) ## [1.0.0-rc.1] - 2023-07-25 ### Added - JAX support (with Flax and Optax) - RPO agent - IPPO and MAPPO multi-agent - Multi-agent base class - Bi-DexHands environment loader - Wrapper for PettingZoo and Bi-DexHands environments - Parameters `num_envs`, `headless` and `cli_args` for configuring Isaac Gym, Isaac Orbit and Omniverse Isaac Gym environments when they are loaded ### Changed - Migrate to `pyproject.toml` Python package development - Define ML framework dependencies as optional dependencies in the library installer - Move agent implementations with recurrent models to a separate file - Allow closing the environment at the end of execution instead of after training/evaluation - Documentation theme from *sphinx_rtd_theme* to *furo* - Update documentation structure and examples ### Fixed - Compatibility for Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier) - Disable PyTorch gradient computation during the environment stepping - Get categorical models' entropy - Typo in `KLAdaptiveLR` learning rate scheduler (keep the old name for compatibility with the examples of previous versions. The old name will be removed in future releases) ## [0.10.2] - 2023-03-23 ### Changed - Update loader and utils for OmniIsaacGymEnvs 2022.2.1.0 - Update Omniverse Isaac Gym real-world examples ## [0.10.1] - 2023-01-26 ### Fixed - Tensorboard writer instantiation when `write_interval` is zero ## [0.10.0] - 2023-01-22 ### Added - Isaac Orbit environment loader - Wrap an Isaac Orbit environment - Gaussian-Deterministic shared model instantiator ## [0.9.1] - 2023-01-17 ### Added - Utility for downloading models from Hugging Face Hub ### Fixed - Initialization of agent components if they have not been defined - Manual trainer `train`/`eval` method default arguments ## [0.9.0] - 2023-01-13 ### Added - Support for Farama Gymnasium interface - Wrapper for robosuite environments - Weights & Biases integration - Set the running mode (training or evaluation) of the agents - Allow clipping the gradient norm for DDPG, TD3 and SAC agents - Initialize model biases - Add RNN (RNN, LSTM, GRU and any other variant) support for A2C, DDPG, PPO, SAC, TD3 and TRPO agents - Allow disabling training/evaluation progressbar - Farama Shimmy and robosuite examples - KUKA LBR iiwa real-world example ### Changed (breaking changes) - Forward model inputs as a Python dictionary - Returns a Python dictionary with extra output values in model calls ### Changed - Adopt the implementation of `terminated` and `truncated` over `done` for all environments ### Fixed - Omniverse Isaac Gym simulation speed for the Franka Emika real-world example - Call agents' method `record_transition` instead of parent method to allow storing samples in memories during evaluation - Move TRPO policy optimization out of the value optimization loop - Access to the categorical model distribution - Call reset only once for Gym/Gymnasium vectorized environments ### Removed - Deprecated method `start` in trainers ## [0.8.0] - 2022-10-03 ### Added - AMP agent for physics-based character animation - Manual trainer - Gaussian model mixin - Support for creating shared models - Parameter `role` to model methods - Wrapper compatibility with the new OpenAI Gym environment API - Internal library colored logger - Migrate checkpoints/models from other RL libraries to skrl models/agents - Configuration parameter `store_separately` to agent configuration dict - Save/load agent modules (models, optimizers, preprocessors) - Set random seed and configure deterministic behavior for reproducibility - Benchmark results for Isaac Gym and Omniverse Isaac Gym on the GitHub discussion page - Franka Emika real-world example ### Changed (breaking changes) - Models implementation as Python mixin ### Changed - Multivariate Gaussian model (`GaussianModel` until 0.7.0) to `MultivariateGaussianMixin` - Trainer's `cfg` parameter position and default values - Show training/evaluation display progress using `tqdm` - Update Isaac Gym and Omniverse Isaac Gym examples ### Fixed - Missing recursive arguments during model weights initialization - Tensor dimension when computing preprocessor parallel variance - Models' clip tensors dtype to `float32` ### Removed - Parameter `inference` from model methods - Configuration parameter `checkpoint_policy_only` from agent configuration dict ## [0.7.0] - 2022-07-11 ### Added - A2C agent - Isaac Gym (preview 4) environment loader - Wrap an Isaac Gym (preview 4) environment - Support for OpenAI Gym vectorized environments - Running standard scaler for input preprocessing - Installation from PyPI (`pip install skrl`) ## [0.6.0] - 2022-06-09 ### Added - Omniverse Isaac Gym environment loader - Wrap an Omniverse Isaac Gym environment - Save best models during training ## [0.5.0] - 2022-05-18 ### Added - TRPO agent - DeepMind environment wrapper - KL Adaptive learning rate scheduler - Handle `gym.spaces.Dict` observation spaces (OpenAI Gym and DeepMind environments) - Forward environment info to agent `record_transition` method - Expose and document the random seeding mechanism - Define rewards shaping function in agents' config - Define learning rate scheduler in agents' config - Improve agent's algorithm description in documentation (PPO and TRPO at the moment) ### Changed - Compute the Generalized Advantage Estimation (GAE) in agent `_update` method - Move noises definition to `resources` folder - Update the Isaac Gym examples ### Removed - `compute_functions` for computing the GAE from memory base class ## [0.4.1] - 2022-03-22 ### Added - Examples of all Isaac Gym environments (preview 3) - Tensorboard file iterator for data post-processing ### Fixed - Init and evaluate agents in ParallelTrainer ## [0.4.0] - 2022-03-09 ### Added - CEM, SARSA and Q-learning agents - Tabular model - Parallel training using multiprocessing - Isaac Gym utilities ### Changed - Initialize agents in a separate method - Change the name of the `networks` argument to `models` ### Fixed - Reset environments after post-processing ## [0.3.0] - 2022-02-07 ### Added - DQN and DDQN agents - Export memory to files - Postprocessing utility to iterate over memory files - Model instantiator utility to allow fast development - More examples and contents in the documentation ### Fixed - Clip actions using the whole space's limits ## [0.2.0] - 2022-01-18 ### Added - First official release
Toni-SM/skrl/README.md
[![pypi](https://img.shields.io/pypi/v/skrl)](https://pypi.org/project/skrl) [<img src="https://img.shields.io/badge/%F0%9F%A4%97%20models-hugging%20face-F8D521">](https://huggingface.co/skrl) ![discussions](https://img.shields.io/github/discussions/Toni-SM/skrl) <br> [![license](https://img.shields.io/github/license/Toni-SM/skrl)](https://github.com/Toni-SM/skrl) <span>&nbsp;&nbsp;&nbsp;&nbsp;</span> [![docs](https://readthedocs.org/projects/skrl/badge/?version=latest)](https://skrl.readthedocs.io/en/latest/?badge=latest) [![pytest](https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml/badge.svg)](https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml) [![pre-commit](https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml/badge.svg)](https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml) <br> <p align="center"> <a href="https://skrl.readthedocs.io"> <img width="300rem" src="https://raw.githubusercontent.com/Toni-SM/skrl/main/docs/source/_static/data/logo-light-mode.png"> </a> </p> <h2 align="center" style="border-bottom: 0 !important;">SKRL - Reinforcement Learning library</h2> <br> **skrl** is an open-source modular library for Reinforcement Learning written in Python (on top of [PyTorch](https://pytorch.org/) and [JAX](https://jax.readthedocs.io)) and designed with a focus on modularity, readability, simplicity, and transparency of algorithm implementation. In addition to supporting the OpenAI [Gym](https://www.gymlibrary.dev) / Farama [Gymnasium](https://gymnasium.farama.org) and [DeepMind](https://github.com/deepmind/dm_env) and other environment interfaces, it allows loading and configuring [NVIDIA Isaac Gym](https://developer.nvidia.com/isaac-gym/), [NVIDIA Isaac Orbit](https://isaac-orbit.github.io/orbit/index.html) and [NVIDIA Omniverse Isaac Gym](https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html) environments, enabling agents' simultaneous training by scopes (subsets of environments among all available environments), which may or may not share resources, in the same run. <br> ### Please, visit the documentation for usage details and examples <strong>https://skrl.readthedocs.io</strong> <br> > **Note:** This project is under **active continuous development**. Please make sure you always have the latest version. Visit the [develop](https://github.com/Toni-SM/skrl/tree/develop) branch or its [documentation](https://skrl.readthedocs.io/en/develop) to access the latest updates to be released. <br> ### Citing this library To cite this library in publications, please use the following reference: ```bibtex @article{serrano2023skrl, author = {Antonio Serrano-Muรฑoz and Dimitrios Chrysostomou and Simon Bรธgh and Nestor Arana-Arexolaleiba}, title = {skrl: Modular and Flexible Library for Reinforcement Learning}, journal = {Journal of Machine Learning Research}, year = {2023}, volume = {24}, number = {254}, pages = {1--9}, url = {http://jmlr.org/papers/v24/23-0112.html} } ```
Toni-SM/skrl/skrl/__init__.py
from typing import Union import logging import sys import numpy as np __all__ = ["__version__", "logger", "config"] # read library version from metadata try: import importlib.metadata __version__ = importlib.metadata.version("skrl") except ImportError: __version__ = "unknown" # logger with format class _Formatter(logging.Formatter): _format = "[%(name)s:%(levelname)s] %(message)s" _formats = {logging.DEBUG: f"\x1b[38;20m{_format}\x1b[0m", logging.INFO: f"\x1b[38;20m{_format}\x1b[0m", logging.WARNING: f"\x1b[33;20m{_format}\x1b[0m", logging.ERROR: f"\x1b[31;20m{_format}\x1b[0m", logging.CRITICAL: f"\x1b[31;1m{_format}\x1b[0m"} def format(self, record): return logging.Formatter(self._formats.get(record.levelno)).format(record) _handler = logging.StreamHandler() _handler.setLevel(logging.DEBUG) _handler.setFormatter(_Formatter()) logger = logging.getLogger("skrl") logger.setLevel(logging.DEBUG) logger.addHandler(_handler) # machine learning framework configuration class _Config(object): def __init__(self) -> None: """Machine learning framework specific configuration """ class JAX(object): def __init__(self) -> None: """JAX configuration """ self._backend = "numpy" self._key = np.array([0, 0], dtype=np.uint32) @property def backend(self) -> str: """Backend used by the different components to operate and generate arrays This configuration excludes models and optimizers. Supported backend are: ``"numpy"`` and ``"jax"`` """ return self._backend @backend.setter def backend(self, value: str) -> None: if value not in ["numpy", "jax"]: raise ValueError("Invalid jax backend. Supported values are: numpy, jax") self._backend = value @property def key(self) -> "jax.Array": """Pseudo-random number generator (PRNG) key """ if isinstance(self._key, np.ndarray): try: import jax self._key = jax.random.PRNGKey(self._key[1]) except ImportError: pass return self._key @key.setter def key(self, value: Union[int, "jax.Array"]) -> None: if type(value) is int: # don't import JAX if it has not been imported before if "jax" in sys.modules: import jax value = jax.random.PRNGKey(value) else: value = np.array([0, value], dtype=np.uint32) self._key = value self.jax = JAX() config = _Config()
Toni-SM/skrl/skrl/envs/__init__.py
Toni-SM/skrl/skrl/envs/jax.py
# TODO: Delete this file in future releases from skrl import logger # isort: skip logger.warning("Using `from skrl.envs.jax import ...` is deprecated and will be removed in future versions.") logger.warning(" - Import loaders using `from skrl.envs.loaders.jax import ...`") logger.warning(" - Import wrappers using `from skrl.envs.wrappers.jax import ...`") from skrl.envs.loaders.jax import ( load_bidexhands_env, load_isaac_orbit_env, load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4, load_omniverse_isaacgym_env ) from skrl.envs.wrappers.jax import MultiAgentEnvWrapper, Wrapper, wrap_env
Toni-SM/skrl/skrl/envs/torch.py
# TODO: Delete this file in future releases from skrl import logger # isort: skip logger.warning("Using `from skrl.envs.torch import ...` is deprecated and will be removed in future versions.") logger.warning(" - Import loaders using `from skrl.envs.loaders.torch import ...`") logger.warning(" - Import wrappers using `from skrl.envs.wrappers.torch import ...`") from skrl.envs.loaders.torch import ( load_bidexhands_env, load_isaac_orbit_env, load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4, load_omniverse_isaacgym_env ) from skrl.envs.wrappers.torch import MultiAgentEnvWrapper, Wrapper, wrap_env
Toni-SM/skrl/skrl/envs/loaders/__init__.py
Toni-SM/skrl/skrl/envs/loaders/torch/bidexhands_envs.py
from typing import Optional, Sequence import os import sys from contextlib import contextmanager from skrl import logger __all__ = ["load_bidexhands_env"] @contextmanager def cwd(new_path: str) -> None: """Context manager to change the current working directory This function restores the current working directory after the context manager exits :param new_path: The new path to change to :type new_path: str """ current_path = os.getcwd() os.chdir(new_path) try: yield finally: os.chdir(current_path) def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_bidexhands_env(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], bidexhands_path: str = "", show_cfg: bool = True): """Load a Bi-DexHands environment :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``--task TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: Isaac Gym environment configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param bidexhands_path: The path to the ``bidexhands`` directory (default: ``""``). If empty, the path will obtained from bidexhands package metadata :type bidexhands_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The bidexhands package is not installed or the path is wrong :return: Bi-DexHands environment (preview 4) :rtype: isaacgymenvs.tasks.base.vec_task.VecTask """ import isaacgym # isort:skip import bidexhands # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("--task"): defined = True break # get task name from command line arguments if defined: arg_index = sys.argv.index("--task") + 1 if arg_index >= len(sys.argv): raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") if task_name and task_name != sys.argv[arg_index]: logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})") # get task name from function arguments else: if task_name: sys.argv.append("--task") sys.argv.append(task_name) else: raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("--num_envs"): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None: logger.warning("Overriding num_envs with command line argument --num_envs") # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append("--num_envs") sys.argv.append(str(num_envs)) # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("--headless"): defined = True break # get headless from command line arguments if defined: if headless is not None: logger.warning("Overriding headless with command line argument --headless") # get headless from function arguments elif headless is not None: sys.argv.append("--headless") # others command line arguments sys.argv += cli_args # get bidexhands path from bidexhands package metadata if not bidexhands_path: if not hasattr(bidexhands, "__path__"): raise RuntimeError("bidexhands package is not installed") path = list(bidexhands.__path__)[0] else: path = bidexhands_path sys.path.append(path) status = True try: from utils.config import get_args, load_cfg, parse_sim_params # type: ignore from utils.parse_task import parse_task # type: ignore from utils.process_marl import get_AgentIndex # type: ignore except Exception as e: status = False logger.error(f"Failed to import required packages: {e}") if not status: raise RuntimeError(f"The path ({path}) is not valid") args = get_args() # print config if show_cfg: print(f"\nBi-DexHands environment ({args.task})") _print_cfg(vars(args)) # update task arguments args.task_type = "MultiAgent" # TODO: get from parameters args.cfg_train = os.path.join(path, args.cfg_train) args.cfg_env = os.path.join(path, args.cfg_env) # load environment with cwd(path): cfg, cfg_train, _ = load_cfg(args) agent_index = get_AgentIndex(cfg) sim_params = parse_sim_params(args, cfg, cfg_train) task, env = parse_task(args, cfg, cfg_train, sim_params, agent_index) return env
Toni-SM/skrl/skrl/envs/loaders/torch/__init__.py
from skrl.envs.loaders.torch.bidexhands_envs import load_bidexhands_env from skrl.envs.loaders.torch.isaac_orbit_envs import load_isaac_orbit_env from skrl.envs.loaders.torch.isaacgym_envs import ( load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4 ) from skrl.envs.loaders.torch.omniverse_isaacgym_envs import load_omniverse_isaacgym_env
Toni-SM/skrl/skrl/envs/loaders/torch/isaacgym_envs.py
from typing import Optional, Sequence import os import sys from contextlib import contextmanager from skrl import logger __all__ = ["load_isaacgym_env_preview2", "load_isaacgym_env_preview3", "load_isaacgym_env_preview4"] @contextmanager def cwd(new_path: str) -> None: """Context manager to change the current working directory This function restores the current working directory after the context manager exits :param new_path: The new path to change to :type new_path: str """ current_path = os.getcwd() os.chdir(new_path) try: yield finally: os.chdir(current_path) def _omegaconf_to_dict(config) -> dict: """Convert OmegaConf config to dict :param config: The OmegaConf config :type config: OmegaConf.Config :return: The config as dict :rtype: dict """ # return config.to_container(dict) from omegaconf import DictConfig d = {} for k, v in config.items(): d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v return d def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_isaacgym_env_preview2(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], isaacgymenvs_path: str = "", show_cfg: bool = True): """Load an Isaac Gym environment (preview 2) :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``--task TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: Isaac Gym environment configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param isaacgymenvs_path: The path to the ``rlgpu`` directory (default: ``""``). If empty, the path will obtained from isaacgym package metadata :type isaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The isaacgym package is not installed or the path is wrong :return: Isaac Gym environment (preview 2) :rtype: tasks.base.vec_task.VecTask """ import isaacgym # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("--task"): defined = True break # get task name from command line arguments if defined: arg_index = sys.argv.index("--task") + 1 if arg_index >= len(sys.argv): raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") if task_name and task_name != sys.argv[arg_index]: logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})") # get task name from function arguments else: if task_name: sys.argv.append("--task") sys.argv.append(task_name) else: raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("--num_envs"): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None: logger.warning("Overriding num_envs with command line argument --num_envs") # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append("--num_envs") sys.argv.append(str(num_envs)) # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("--headless"): defined = True break # get headless from command line arguments if defined: if headless is not None: logger.warning("Overriding headless with command line argument --headless") # get headless from function arguments elif headless is not None: sys.argv.append("--headless") # others command line arguments sys.argv += cli_args # get isaacgym envs path from isaacgym package metadata if not isaacgymenvs_path: if not hasattr(isaacgym, "__path__"): raise RuntimeError("isaacgym package is not installed or could not be accessed by the current Python environment") path = isaacgym.__path__ path = os.path.join(path[0], "..", "rlgpu") else: path = isaacgymenvs_path # import required packages sys.path.append(path) status = True try: from utils.config import get_args, load_cfg, parse_sim_params # type: ignore from utils.parse_task import parse_task # type: ignore except Exception as e: status = False logger.error(f"Failed to import required packages: {e}") if not status: raise RuntimeError(f"Path ({path}) is not valid or the isaacgym package is not installed in editable mode (pip install -e .)") args = get_args() # print config if show_cfg: print(f"\nIsaac Gym environment ({args.task})") _print_cfg(vars(args)) # update task arguments args.cfg_train = os.path.join(path, args.cfg_train) args.cfg_env = os.path.join(path, args.cfg_env) # load environment with cwd(path): cfg, cfg_train, _ = load_cfg(args) sim_params = parse_sim_params(args, cfg, cfg_train) task, env = parse_task(args, cfg, cfg_train, sim_params) return env def load_isaacgym_env_preview3(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], isaacgymenvs_path: str = "", show_cfg: bool = True): """Load an Isaac Gym environment (preview 3) Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: IsaacGymEnvs configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param isaacgymenvs_path: The path to the ``isaacgymenvs`` directory (default: ``""``). If empty, the path will obtained from isaacgymenvs package metadata :type isaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The isaacgymenvs package is not installed or the path is wrong :return: Isaac Gym environment (preview 3) :rtype: isaacgymenvs.tasks.base.vec_task.VecTask """ import isaacgym import isaacgymenvs from hydra._internal.hydra import Hydra from hydra._internal.utils import create_automatic_config_search_path, get_args_parser from hydra.types import RunMode from omegaconf import OmegaConf # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("task="): defined = True break # get task name from command line arguments if defined: if task_name and task_name != arg.split("task=")[1].split(" ")[0]: logger.warning("Overriding task name ({}) with command line argument ({})" \ .format(task_name, arg.split("task=")[1].split(" ")[0])) # get task name from function arguments else: if task_name: sys.argv.append(f"task={task_name}") else: raise ValueError("No task name defined. Set task_name parameter or use task=<task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("num_envs="): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None and num_envs != int(arg.split("num_envs=")[1].split(" ")[0]): logger.warning("Overriding num_envs ({}) with command line argument (num_envs={})" \ .format(num_envs, arg.split("num_envs=")[1].split(" ")[0])) # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append(f"num_envs={num_envs}") # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("headless="): defined = True break # get headless from command line arguments if defined: if headless is not None and str(headless).lower() != arg.split("headless=")[1].split(" ")[0].lower(): logger.warning("Overriding headless ({}) with command line argument (headless={})" \ .format(headless, arg.split("headless=")[1].split(" ")[0])) # get headless from function arguments elif headless is not None: sys.argv.append(f"headless={headless}") # others command line arguments sys.argv += cli_args # get isaacgymenvs path from isaacgymenvs package metadata if isaacgymenvs_path == "": if not hasattr(isaacgymenvs, "__path__"): raise RuntimeError("isaacgymenvs package is not installed") isaacgymenvs_path = list(isaacgymenvs.__path__)[0] config_path = os.path.join(isaacgymenvs_path, "cfg") # set omegaconf resolvers try: OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower()) except Exception as e: pass try: OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) except Exception as e: pass try: OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b) except Exception as e: pass try: OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg) except Exception as e: pass # get hydra config without use @hydra.main config_file = "config" args = get_args_parser().parse_args() search_path = create_automatic_config_search_path(config_file, None, config_path) hydra_object = Hydra.create_main_hydra2(task_name='load_isaacgymenv', config_search_path=search_path) config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN) cfg = _omegaconf_to_dict(config.task) # print config if show_cfg: print(f"\nIsaac Gym environment ({config.task.name})") _print_cfg(cfg) # load environment sys.path.append(isaacgymenvs_path) from tasks import isaacgym_task_map # type: ignore try: env = isaacgym_task_map[config.task.name](cfg=cfg, sim_device=config.sim_device, graphics_device_id=config.graphics_device_id, headless=config.headless) except TypeError as e: env = isaacgym_task_map[config.task.name](cfg=cfg, rl_device=config.rl_device, sim_device=config.sim_device, graphics_device_id=config.graphics_device_id, headless=config.headless, virtual_screen_capture=config.capture_video, # TODO: check force_render=config.force_render) return env def load_isaacgym_env_preview4(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], isaacgymenvs_path: str = "", show_cfg: bool = True): """Load an Isaac Gym environment (preview 4) Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/IsaacGymEnvs :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: IsaacGymEnvs configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param isaacgymenvs_path: The path to the ``isaacgymenvs`` directory (default: ``""``). If empty, the path will obtained from isaacgymenvs package metadata :type isaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The isaacgymenvs package is not installed or the path is wrong :return: Isaac Gym environment (preview 4) :rtype: isaacgymenvs.tasks.base.vec_task.VecTask """ return load_isaacgym_env_preview3(task_name, num_envs, headless, cli_args, isaacgymenvs_path, show_cfg)
Toni-SM/skrl/skrl/envs/loaders/torch/isaac_orbit_envs.py
from typing import Optional, Sequence import os import sys from skrl import logger __all__ = ["load_isaac_orbit_env"] def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_isaac_orbit_env(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], show_cfg: bool = True): """Load an Isaac Orbit environment Isaac Orbit: https://isaac-orbit.github.io/orbit/index.html This function includes the definition and parsing of command line arguments used by Isaac Orbit: - ``--headless``: Force display off at all times - ``--cpu``: Use CPU pipeline - ``--num_envs``: Number of environments to simulate - ``--task``: Name of the task - ``--num_envs``: Seed used for the environment :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``--task TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: Isaac Orbit configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :return: Isaac Orbit environment :rtype: gym.Env """ import argparse import atexit import gym # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("--task"): defined = True break # get task name from command line arguments if defined: arg_index = sys.argv.index("--task") + 1 if arg_index >= len(sys.argv): raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") if task_name and task_name != sys.argv[arg_index]: logger.warning(f"Overriding task ({task_name}) with command line argument ({sys.argv[arg_index]})") # get task name from function arguments else: if task_name: sys.argv.append("--task") sys.argv.append(task_name) else: raise ValueError("No task name defined. Set the task_name parameter or use --task <task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("--num_envs"): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None: logger.warning("Overriding num_envs with command line argument (--num_envs)") # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append("--num_envs") sys.argv.append(str(num_envs)) # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("--headless"): defined = True break # get headless from command line arguments if defined: if headless is not None: logger.warning("Overriding headless with command line argument (--headless)") # get headless from function arguments elif headless is not None: sys.argv.append("--headless") # others command line arguments sys.argv += cli_args # parse arguments parser = argparse.ArgumentParser("Welcome to Orbit: Omniverse Robotics Environments!") parser.add_argument("--headless", action="store_true", default=False, help="Force display off at all times.") parser.add_argument("--cpu", action="store_true", default=False, help="Use CPU pipeline.") parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.") parser.add_argument("--task", type=str, default=None, help="Name of the task.") parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment") args = parser.parse_args() # load the most efficient kit configuration in headless mode if args.headless: app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.gym.headless.kit" else: app_experience = f"{os.environ['EXP_PATH']}/omni.isaac.sim.python.kit" # launch the simulator from omni.isaac.kit import SimulationApp # type: ignore config = {"headless": args.headless} simulation_app = SimulationApp(config, experience=app_experience) @atexit.register def close_the_simulator(): simulation_app.close() # import orbit extensions import omni.isaac.contrib_envs # type: ignore import omni.isaac.orbit_envs # type: ignore from omni.isaac.orbit_envs.utils import parse_env_cfg # type: ignore cfg = parse_env_cfg(args.task, use_gpu=not args.cpu, num_envs=args.num_envs) # print config if show_cfg: print(f"\nIsaac Orbit environment ({args.task})") try: _print_cfg(cfg) except AttributeError as e: pass # load environment env = gym.make(args.task, cfg=cfg, headless=args.headless) return env
Toni-SM/skrl/skrl/envs/loaders/torch/omniverse_isaacgym_envs.py
from typing import Optional, Sequence, Union import os import queue import sys from skrl import logger __all__ = ["load_omniverse_isaacgym_env"] def _omegaconf_to_dict(config) -> dict: """Convert OmegaConf config to dict :param config: The OmegaConf config :type config: OmegaConf.Config :return: The config as dict :rtype: dict """ # return config.to_container(dict) from omegaconf import DictConfig d = {} for k, v in config.items(): d[k] = _omegaconf_to_dict(v) if isinstance(v, DictConfig) else v return d def _print_cfg(d, indent=0) -> None: """Print the environment configuration :param d: The dictionary to print :type d: dict :param indent: The indentation level (default: ``0``) :type indent: int, optional """ for key, value in d.items(): if isinstance(value, dict): _print_cfg(value, indent + 1) else: print(" | " * indent + f" |-- {key}: {value}") def load_omniverse_isaacgym_env(task_name: str = "", num_envs: Optional[int] = None, headless: Optional[bool] = None, cli_args: Sequence[str] = [], omniisaacgymenvs_path: str = "", show_cfg: bool = True, multi_threaded: bool = False, timeout: int = 30) -> Union["VecEnvBase", "VecEnvMT"]: """Load an Omniverse Isaac Gym environment (OIGE) Omniverse Isaac Gym benchmark environments: https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs :param task_name: The name of the task (default: ``""``). If not specified, the task name is taken from the command line argument (``task=TASK_NAME``). Command line argument has priority over function parameter if both are specified :type task_name: str, optional :param num_envs: Number of parallel environments to create (default: ``None``). If not specified, the default number of environments defined in the task configuration is used. Command line argument has priority over function parameter if both are specified :type num_envs: int, optional :param headless: Whether to use headless mode (no rendering) (default: ``None``). If not specified, the default task configuration is used. Command line argument has priority over function parameter if both are specified :type headless: bool, optional :param cli_args: OIGE configuration and command line arguments (default: ``[]``) :type cli_args: list of str, optional :param omniisaacgymenvs_path: The path to the ``omniisaacgymenvs`` directory (default: ``""``). If empty, the path will obtained from omniisaacgymenvs package metadata :type omniisaacgymenvs_path: str, optional :param show_cfg: Whether to print the configuration (default: ``True``) :type show_cfg: bool, optional :param multi_threaded: Whether to use multi-threaded environment (default: ``False``) :type multi_threaded: bool, optional :param timeout: Seconds to wait for data when queue is empty in multi-threaded environment (default: ``30``) :type timeout: int, optional :raises ValueError: The task name has not been defined, neither by the function parameter nor by the command line arguments :raises RuntimeError: The omniisaacgymenvs package is not installed or the path is wrong :return: Omniverse Isaac Gym environment :rtype: omni.isaac.gym.vec_env.vec_env_base.VecEnvBase or omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT """ import omegaconf import omniisaacgymenvs # type: ignore from hydra._internal.hydra import Hydra from hydra._internal.utils import create_automatic_config_search_path, get_args_parser from hydra.types import RunMode from omegaconf import OmegaConf from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT # type: ignore from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT # type: ignore import torch # check task from command line arguments defined = False for arg in sys.argv: if arg.startswith("task="): defined = True break # get task name from command line arguments if defined: if task_name and task_name != arg.split("task=")[1].split(" ")[0]: logger.warning("Overriding task name ({}) with command line argument (task={})" \ .format(task_name, arg.split("task=")[1].split(" ")[0])) # get task name from function arguments else: if task_name: sys.argv.append(f"task={task_name}") else: raise ValueError("No task name defined. Set task_name parameter or use task=<task_name> as command line argument") # check num_envs from command line arguments defined = False for arg in sys.argv: if arg.startswith("num_envs="): defined = True break # get num_envs from command line arguments if defined: if num_envs is not None and num_envs != int(arg.split("num_envs=")[1].split(" ")[0]): logger.warning("Overriding num_envs ({}) with command line argument (num_envs={})" \ .format(num_envs, arg.split("num_envs=")[1].split(" ")[0])) # get num_envs from function arguments elif num_envs is not None and num_envs > 0: sys.argv.append(f"num_envs={num_envs}") # check headless from command line arguments defined = False for arg in sys.argv: if arg.startswith("headless="): defined = True break # get headless from command line arguments if defined: if headless is not None and str(headless).lower() != arg.split("headless=")[1].split(" ")[0].lower(): logger.warning("Overriding headless ({}) with command line argument (headless={})" \ .format(headless, arg.split("headless=")[1].split(" ")[0])) # get headless from function arguments elif headless is not None: sys.argv.append(f"headless={headless}") # others command line arguments sys.argv += cli_args # get omniisaacgymenvs path from omniisaacgymenvs package metadata if omniisaacgymenvs_path == "": if not hasattr(omniisaacgymenvs, "__path__"): raise RuntimeError("omniisaacgymenvs package is not installed") omniisaacgymenvs_path = list(omniisaacgymenvs.__path__)[0] config_path = os.path.join(omniisaacgymenvs_path, "cfg") # set omegaconf resolvers OmegaConf.register_new_resolver('eq', lambda x, y: x.lower() == y.lower()) OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower()) OmegaConf.register_new_resolver('if', lambda condition, a, b: a if condition else b) OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg == '' else arg) # get hydra config without use @hydra.main config_file = "config" args = get_args_parser().parse_args() search_path = create_automatic_config_search_path(config_file, None, config_path) hydra_object = Hydra.create_main_hydra2(task_name='load_omniisaacgymenv', config_search_path=search_path) config = hydra_object.compose_config(config_file, args.overrides, run_mode=RunMode.RUN) del config.hydra cfg = _omegaconf_to_dict(config) cfg["train"] = {} # print config if show_cfg: print(f"\nOmniverse Isaac Gym environment ({config.task.name})") _print_cfg(cfg) # internal classes class _OmniIsaacGymVecEnv(VecEnvBase): def step(self, actions): actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone() self._task.pre_physics_step(actions) for _ in range(self._task.control_frequency_inv): self._world.step(render=self._render) self.sim_frame_count += 1 observations, rewards, dones, info = self._task.post_physics_step() return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \ rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy() def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] class _OmniIsaacGymTrainerMT(TrainerMT): def run(self): pass def stop(self): pass class _OmniIsaacGymVecEnvMT(VecEnvMT): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.action_queue = queue.Queue(1) self.data_queue = queue.Queue(1) def run(self, trainer=None): super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer) def _parse_data(self, data): self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone() self._rewards = data["rew"].to(self._task.rl_device).clone() self._dones = data["reset"].to(self._task.rl_device).clone() self._info = data["extras"].copy() def step(self, actions): if self._stop: raise TaskStopException() actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone() self.send_actions(actions) data = self.get_data() return {"obs": self._observations}, self._rewards, self._dones, self._info def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] def close(self): # end stop signal to main thread self.send_actions(None) self.stop = True # load environment sys.path.append(omniisaacgymenvs_path) from utils.task_util import initialize_task # type: ignore try: if config.multi_gpu: rank = int(os.getenv("LOCAL_RANK", "0")) config.device_id = rank config.rl_device = f"cuda:{rank}" except omegaconf.errors.ConfigAttributeError: logger.warning("Using an older version of OmniIsaacGymEnvs (2022.2.0 or earlier)") enable_viewport = "enable_cameras" in config.task.sim and config.task.sim.enable_cameras if multi_threaded: try: env = _OmniIsaacGymVecEnvMT(headless=config.headless, sim_device=config.device_id, enable_livestream=config.enable_livestream, enable_viewport=enable_viewport) except (TypeError, omegaconf.errors.ConfigAttributeError): logger.warning("Using an older version of Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)") env = _OmniIsaacGymVecEnvMT(headless=config.headless) # Isaac Sim 2022.2.0 and earlier task = initialize_task(cfg, env, init_sim=False) env.initialize(env.action_queue, env.data_queue, timeout=timeout) else: try: env = _OmniIsaacGymVecEnv(headless=config.headless, sim_device=config.device_id, enable_livestream=config.enable_livestream, enable_viewport=enable_viewport) except (TypeError, omegaconf.errors.ConfigAttributeError): logger.warning("Using an older version of Isaac Sim or OmniIsaacGymEnvs (2022.2.0 or earlier)") env = _OmniIsaacGymVecEnv(headless=config.headless) # Isaac Sim 2022.2.0 and earlier task = initialize_task(cfg, env, init_sim=True) return env
Toni-SM/skrl/skrl/envs/loaders/jax/bidexhands_envs.py
# since Bi-DexHands environments are implemented on top of PyTorch, the loader is the same from skrl.envs.loaders.torch import load_bidexhands_env
Toni-SM/skrl/skrl/envs/loaders/jax/__init__.py
from skrl.envs.loaders.jax.bidexhands_envs import load_bidexhands_env from skrl.envs.loaders.jax.isaac_orbit_envs import load_isaac_orbit_env from skrl.envs.loaders.jax.isaacgym_envs import ( load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4 ) from skrl.envs.loaders.jax.omniverse_isaacgym_envs import load_omniverse_isaacgym_env
Toni-SM/skrl/skrl/envs/loaders/jax/isaacgym_envs.py
# since Isaac Gym (preview) environments are implemented on top of PyTorch, the loaders are the same from skrl.envs.loaders.torch import ( # isort:skip load_isaacgym_env_preview2, load_isaacgym_env_preview3, load_isaacgym_env_preview4, )
Toni-SM/skrl/skrl/envs/loaders/jax/isaac_orbit_envs.py
# since Isaac Orbit environments are implemented on top of PyTorch, the loader is the same from skrl.envs.loaders.torch import load_isaac_orbit_env
Toni-SM/skrl/skrl/envs/loaders/jax/omniverse_isaacgym_envs.py
# since Omniverse Isaac Gym environments are implemented on top of PyTorch, the loader is the same from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
Toni-SM/skrl/skrl/envs/wrappers/__init__.py
Toni-SM/skrl/skrl/envs/wrappers/torch/gym_envs.py
from typing import Any, Optional, Tuple import gym from packaging import version import numpy as np import torch from skrl import logger from skrl.envs.wrappers.torch.base import Wrapper class GymWrapper(Wrapper): def __init__(self, env: Any) -> None: """OpenAI Gym environment wrapper :param env: The environment to wrap :type env: Any supported OpenAI Gym environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gym.vector.SyncVectorEnv) or isinstance(env, gym.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") self._deprecated_api = version.parse(gym.__version__) < version.parse("0.25.0") if self._deprecated_api: logger.warning(f"Using a deprecated version of OpenAI Gym's API: {gym.__version__}") @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gym.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gym.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gym.Space] = None) -> torch.Tensor: """Convert the OpenAI Gym observation to a flat tensor :param observation: The OpenAI Gym observation to convert to a tensor :type observation: Any supported OpenAI Gym observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gym.spaces.MultiDiscrete): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, int): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gym.spaces.Discrete): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gym.spaces.Box): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gym.spaces.Dict): tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the OpenAI Gym expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the OpenAI Gym format :rtype: Any supported OpenAI Gym action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gym.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Tuple): if isinstance(space[0], gym.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(space.shape) elif isinstance(space[0], gym.spaces.Discrete): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(-1) elif isinstance(space, gym.spaces.Discrete): return actions.item() elif isinstance(space, gym.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ if self._deprecated_api: observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions)) # truncated: https://gymnasium.farama.org/tutorials/handling_time_limits if type(info) is list: truncated = np.array([d.get("TimeLimit.truncated", False) for d in info], dtype=terminated.dtype) terminated *= np.logical_not(truncated) else: truncated = info.get("TimeLimit.truncated", False) if truncated: terminated = False else: observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to torch observation = self._observation_to_tensor(observation) reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1) terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs if self._deprecated_api: observation = self._env.reset() info = {} else: observation, info = self._env.reset() return self._observation_to_tensor(observation), info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/torch/bidexhands_envs.py
from typing import Any, Mapping, Sequence, Tuple import gym import torch from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper class BiDexHandsWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """Bi-DexHands wrapper :param env: The environment to wrap :type env: Any supported Bi-DexHands environment """ super().__init__(env) self._reset_once = True self._obs_buf = None self._shared_obs_buf = None self.possible_agents = [f"agent_{i}" for i in range(self.num_agents)] @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self.possible_agents @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.observation_space)} @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.action_space)} @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.share_observation_space)} def step(self, actions: Mapping[str, torch.Tensor]) -> \ Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dictionary of torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of dictionaries torch.Tensor and any other info """ actions = [actions[uid] for uid in self.possible_agents] obs_buf, shared_obs_buf, reward_buf, terminated_buf, info, _ = self._env.step(actions) self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} reward = {uid: reward_buf[:,i].view(-1, 1) for i, uid in enumerate(self.possible_agents)} terminated = {uid: terminated_buf[:,i].view(-1, 1) for i, uid in enumerate(self.possible_agents)} truncated = {uid: torch.zeros_like(value) for uid, value in terminated.items()} info = {"shared_states": self._shared_obs_buf} return self._obs_buf, reward, terminated, truncated, info def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ if self._reset_once: obs_buf, shared_obs_buf, _ = self._env.reset() self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._reset_once = False return self._obs_buf, {"shared_states": self._shared_obs_buf}
Toni-SM/skrl/skrl/envs/wrappers/torch/robosuite_envs.py
from typing import Any, Optional, Tuple import collections import gym import numpy as np import torch from skrl.envs.wrappers.torch.base import Wrapper class RobosuiteWrapper(Wrapper): def __init__(self, env: Any) -> None: """Robosuite environment wrapper :param env: The environment to wrap :type env: Any supported robosuite environment """ super().__init__(env) # observation and action spaces self._observation_space = self._spec_to_space(self._env.observation_spec()) self._action_space = self._spec_to_space(self._env.action_spec) @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ return self._observation_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space def _spec_to_space(self, spec: Any) -> gym.Space: """Convert the robosuite spec to a Gym space :param spec: The robosuite spec to convert :type spec: Any supported robosuite spec :raises: ValueError if the spec type is not supported :return: The Gym space :rtype: gym.Space """ if type(spec) is tuple: return gym.spaces.Box(shape=spec[0].shape, dtype=np.float32, low=spec[0], high=spec[1]) elif isinstance(spec, np.ndarray): return gym.spaces.Box(shape=spec.shape, dtype=np.float32, low=np.full(spec.shape, float("-inf")), high=np.full(spec.shape, float("inf"))) elif isinstance(spec, collections.OrderedDict): return gym.spaces.Dict({k: self._spec_to_space(v) for k, v in spec.items()}) else: raise ValueError(f"Spec type {type(spec)} not supported. Please report this issue") def _observation_to_tensor(self, observation: Any, spec: Optional[Any] = None) -> torch.Tensor: """Convert the observation to a flat tensor :param observation: The observation to convert to a tensor :type observation: Any supported observation :raises: ValueError if the observation spec type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ spec = spec if spec is not None else self._env.observation_spec() if isinstance(spec, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1) elif isinstance(spec, collections.OrderedDict): return torch.cat([self._observation_to_tensor(observation[k], spec[k]) \ for k in sorted(spec.keys())], dim=-1).reshape(self.num_envs, -1) else: raise ValueError(f"Observation spec type {type(spec)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the robosuite expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the robosuite expected format :rtype: Any supported robosuite action """ spec = self._env.action_spec if type(spec) is tuple: return np.array(actions.cpu().numpy(), dtype=np.float32).reshape(spec[0].shape) else: raise ValueError(f"Action spec type {type(spec)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions)) truncated = False info = {} # convert response to torch return self._observation_to_tensor(observation), \ torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1), \ torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: The state of the environment :rtype: torch.Tensor """ observation = self._env.reset() return self._observation_to_tensor(observation), {} def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/torch/base.py
from typing import Any, Mapping, Sequence, Tuple import gym import torch class Wrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for RL environments :param env: The environment to wrap :type env: Any supported RL environment """ self._env = env # device (faster than @property) if hasattr(self._env, "device"): self.device = torch.device(self._env.device) else: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # spaces try: self._action_space = self._env.single_action_space self._observation_space = self._env.single_observation_space except AttributeError: self._action_space = self._env.action_space self._observation_space = self._env.observation_space self._state_space = self._env.state_space if hasattr(self._env, "state_space") else self._observation_space def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: torch.Tensor and any other info """ raise NotImplementedError def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def state_space(self) -> gym.Space: """State space If the wrapped environment does not have the ``state_space`` property, the value of the ``observation_space`` property will be used """ return self._state_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space class MultiAgentEnvWrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for multi-agent environments :param env: The multi-agent environment to wrap :type env: Any supported multi-agent environment """ self._env = env # device (faster than @property) if hasattr(self._env, "device"): self.device = torch.device(self._env.device) else: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") self.possible_agents = [] def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ raise NotImplementedError def step(self, actions: Mapping[str, torch.Tensor]) -> \ Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dictionary of torch.Tensor :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ raise NotImplementedError @property def state_spaces(self) -> Mapping[str, gym.Space]: """State spaces An alias for the ``observation_spaces`` property """ return self.observation_spaces @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ raise NotImplementedError @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ raise NotImplementedError @property def shared_state_spaces(self) -> Mapping[str, gym.Space]: """Shared state spaces An alias for the ``shared_observation_spaces`` property """ return self.shared_observation_spaces @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ raise NotImplementedError def state_space(self, agent: str) -> gym.Space: """State space :param agent: Name of the agent :type agent: str :return: The state space for the specified agent :rtype: gym.Space """ return self.state_spaces[agent] def observation_space(self, agent: str) -> gym.Space: """Observation space :param agent: Name of the agent :type agent: str :return: The observation space for the specified agent :rtype: gym.Space """ return self.observation_spaces[agent] def action_space(self, agent: str) -> gym.Space: """Action space :param agent: Name of the agent :type agent: str :return: The action space for the specified agent :rtype: gym.Space """ return self.action_spaces[agent] def shared_state_space(self, agent: str) -> gym.Space: """Shared state space :param agent: Name of the agent :type agent: str :return: The shared state space for the specified agent :rtype: gym.Space """ return self.shared_state_spaces[agent] def shared_observation_space(self, agent: str) -> gym.Space: """Shared observation space :param agent: Name of the agent :type agent: str :return: The shared observation space for the specified agent :rtype: gym.Space """ return self.shared_observation_spaces[agent]
Toni-SM/skrl/skrl/envs/wrappers/torch/__init__.py
from typing import Any, Union import gym import gymnasium from skrl import logger from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper, Wrapper from skrl.envs.wrappers.torch.bidexhands_envs import BiDexHandsWrapper from skrl.envs.wrappers.torch.deepmind_envs import DeepMindWrapper from skrl.envs.wrappers.torch.gym_envs import GymWrapper from skrl.envs.wrappers.torch.gymnasium_envs import GymnasiumWrapper from skrl.envs.wrappers.torch.isaac_orbit_envs import IsaacOrbitWrapper from skrl.envs.wrappers.torch.isaacgym_envs import IsaacGymPreview2Wrapper, IsaacGymPreview3Wrapper from skrl.envs.wrappers.torch.omniverse_isaacgym_envs import OmniverseIsaacGymWrapper from skrl.envs.wrappers.torch.pettingzoo_envs import PettingZooWrapper from skrl.envs.wrappers.torch.robosuite_envs import RobosuiteWrapper __all__ = ["wrap_env", "Wrapper", "MultiAgentEnvWrapper"] def wrap_env(env: Any, wrapper: str = "auto", verbose: bool = True) -> Union[Wrapper, MultiAgentEnvWrapper]: """Wrap an environment to use a common interface Example:: >>> from skrl.envs.wrappers.torch import wrap_env >>> >>> # assuming that there is an environment called "env" >>> env = wrap_env(env) :param env: The environment to be wrapped :type env: gym.Env, gymnasium.Env, dm_env.Environment or VecTask :param wrapper: The type of wrapper to use (default: ``"auto"``). If ``"auto"``, the wrapper will be automatically selected based on the environment class. The supported wrappers are described in the following table: +--------------------+-------------------------+ |Environment |Wrapper tag | +====================+=========================+ |OpenAI Gym |``"gym"`` | +--------------------+-------------------------+ |Gymnasium |``"gymnasium"`` | +--------------------+-------------------------+ |Petting Zoo |``"pettingzoo"`` | +--------------------+-------------------------+ |DeepMind |``"dm"`` | +--------------------+-------------------------+ |Robosuite |``"robosuite"`` | +--------------------+-------------------------+ |Bi-DexHands |``"bidexhands"`` | +--------------------+-------------------------+ |Isaac Gym preview 2 |``"isaacgym-preview2"`` | +--------------------+-------------------------+ |Isaac Gym preview 3 |``"isaacgym-preview3"`` | +--------------------+-------------------------+ |Isaac Gym preview 4 |``"isaacgym-preview4"`` | +--------------------+-------------------------+ |Omniverse Isaac Gym |``"omniverse-isaacgym"`` | +--------------------+-------------------------+ |Isaac Sim (orbit) |``"isaac-orbit"`` | +--------------------+-------------------------+ :type wrapper: str, optional :param verbose: Whether to print the wrapper type (default: ``True``) :type verbose: bool, optional :raises ValueError: Unknown wrapper type :return: Wrapped environment :rtype: Wrapper or MultiAgentEnvWrapper """ if verbose: logger.info("Environment class: {}".format(", ".join([str(base).replace("<class '", "").replace("'>", "") \ for base in env.__class__.__bases__]))) if wrapper == "auto": base_classes = [str(base) for base in env.__class__.__bases__] if "<class 'omni.isaac.gym.vec_env.vec_env_base.VecEnvBase'>" in base_classes or \ "<class 'omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT'>" in base_classes: if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif isinstance(env, gym.core.Env) or isinstance(env, gym.core.Wrapper): # isaac-orbit if hasattr(env, "sim") and hasattr(env, "env_ns"): if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) # gym if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif isinstance(env, gymnasium.core.Env) or isinstance(env, gymnasium.core.Wrapper): if verbose: logger.info("Environment wrapper: Gymnasium") return GymnasiumWrapper(env) elif "<class 'pettingzoo.utils.env" in base_classes[0] or "<class 'pettingzoo.utils.wrappers" in base_classes[0]: if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif "<class 'dm_env._environment.Environment'>" in base_classes: if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif "<class 'robosuite.environments." in base_classes[0]: if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif "<class 'rlgpu.tasks.base.vec_task.VecTask'>" in base_classes: if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3/4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "gym": if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif wrapper == "gymnasium": if verbose: logger.info("Environment wrapper: gymnasium") return GymnasiumWrapper(env) elif wrapper == "pettingzoo": if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif wrapper == "dm": if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif wrapper == "robosuite": if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif wrapper == "bidexhands": if verbose: logger.info("Environment wrapper: Bi-DexHands") return BiDexHandsWrapper(env) elif wrapper == "isaacgym-preview2": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) elif wrapper == "isaacgym-preview3": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3)") return IsaacGymPreview3Wrapper(env) elif wrapper == "isaacgym-preview4": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "omniverse-isaacgym": if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif wrapper == "isaac-orbit": if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) else: raise ValueError(f"Unknown wrapper type: {wrapper}")
Toni-SM/skrl/skrl/envs/wrappers/torch/isaacgym_envs.py
from typing import Any, Tuple import torch from skrl.envs.wrappers.torch.base import Wrapper class IsaacGymPreview2Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 2) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 2) environment """ super().__init__(env) self._reset_once = True self._obs_buf = None def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_buf, reward, terminated, info = self._env.step(actions) truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated) return self._obs_buf, reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_buf = self._env.reset() self._reset_once = False return self._obs_buf, {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass class IsaacGymPreview3Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 3) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 3) environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_dict, reward, terminated, info = self._env.step(actions) truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated) return self._obs_dict["obs"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return self._obs_dict["obs"], {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass
Toni-SM/skrl/skrl/envs/wrappers/torch/isaac_orbit_envs.py
from typing import Any, Tuple import torch from skrl.envs.wrappers.torch.base import Wrapper class IsaacOrbitWrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Orbit environment wrapper :param env: The environment to wrap :type env: Any supported Isaac Orbit environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_dict, reward, terminated, truncated, info = self._env.step(actions) return self._obs_dict["policy"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_dict, info = self._env.reset() self._reset_once = False return self._obs_dict["policy"], info def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/torch/gymnasium_envs.py
from typing import Any, Optional, Tuple import gymnasium import numpy as np import torch from skrl import logger from skrl.envs.wrappers.torch.base import Wrapper class GymnasiumWrapper(Wrapper): def __init__(self, env: Any) -> None: """Gymnasium environment wrapper :param env: The environment to wrap :type env: Any supported Gymnasium environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gymnasium.vector.SyncVectorEnv) or isinstance(env, gymnasium.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") @property def state_space(self) -> gymnasium.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gymnasium.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gymnasium.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gymnasium.Space] = None) -> torch.Tensor: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gymnasium.spaces.MultiDiscrete): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, int): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Discrete): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Dict): tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gymnasium.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Tuple): if isinstance(space[0], gymnasium.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(space.shape) elif isinstance(space[0], gymnasium.spaces.Discrete): return np.array(actions.cpu().numpy(), dtype=space[0].dtype).reshape(-1) if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.MultiDiscrete): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to torch observation = self._observation_to_tensor(observation) reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1) terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs observation, info = self._env.reset() return self._observation_to_tensor(observation), info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/torch/pettingzoo_envs.py
from typing import Any, Mapping, Sequence, Tuple import collections import gymnasium import numpy as np import torch from skrl.envs.wrappers.torch.base import MultiAgentEnvWrapper class PettingZooWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """PettingZoo (parallel) environment wrapper :param env: The environment to wrap :type env: Any supported PettingZoo (parallel) environment """ super().__init__(env) self.possible_agents = self._env.possible_agents self._shared_observation_space = self._compute_shared_observation_space(self._env.observation_spaces) def _compute_shared_observation_space(self, observation_spaces): space = next(iter(observation_spaces.values())) shape = (len(self.possible_agents),) + space.shape return gymnasium.spaces.Box(low=np.stack([space.low for _ in self.possible_agents], axis=0), high=np.stack([space.high for _ in self.possible_agents], axis=0), dtype=space.dtype, shape=shape) @property def num_agents(self) -> int: """Number of agents """ return len(self.possible_agents) @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self._env.agents @property def observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Observation spaces """ return {uid: self._env.observation_space(uid) for uid in self.possible_agents} @property def action_spaces(self) -> Mapping[str, gymnasium.Space]: """Action spaces """ return {uid: self._env.action_space(uid) for uid in self.possible_agents} @property def shared_observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Shared observation spaces """ return {uid: self._shared_observation_space for uid in self.possible_agents} def _observation_to_tensor(self, observation: Any, space: gymnasium.Space) -> torch.Tensor: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ if isinstance(observation, int): return torch.tensor(observation, device=self.device, dtype=torch.int64).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Discrete): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return torch.tensor(observation, device=self.device, dtype=torch.float32).view(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Dict): tmp = torch.cat([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], dim=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor, space: gymnasium.Space) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.Box): return np.array(actions.cpu().numpy(), dtype=space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Mapping[str, torch.Tensor]) -> \ Tuple[Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, torch.Tensor], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dictionary of torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of dictionaries torch.Tensor and any other info """ actions = {uid: self._tensor_to_action(action, self._env.action_space(uid)) for uid, action in actions.items()} observations, rewards, terminated, truncated, infos = self._env.step(actions) # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to torch observations = {uid: self._observation_to_tensor(value, self._env.observation_space(uid)) for uid, value in observations.items()} rewards = {uid: torch.tensor(value, device=self.device, dtype=torch.float32).view(self.num_envs, -1) for uid, value in rewards.items()} terminated = {uid: torch.tensor(value, device=self.device, dtype=torch.bool).view(self.num_envs, -1) for uid, value in terminated.items()} truncated = {uid: torch.tensor(value, device=self.device, dtype=torch.bool).view(self.num_envs, -1) for uid, value in truncated.items()} return observations, rewards, terminated, truncated, infos def reset(self) -> Tuple[Mapping[str, torch.Tensor], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dictionaries of torch.Tensor and any other info """ outputs = self._env.reset() if isinstance(outputs, collections.abc.Mapping): observations = outputs infos = {uid: {} for uid in self.possible_agents} else: observations, infos = outputs # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to torch observations = {uid: self._observation_to_tensor(observation, self._env.observation_space(uid)) for uid, observation in observations.items()} return observations, infos def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/torch/omniverse_isaacgym_envs.py
from typing import Any, Optional, Tuple import torch from skrl.envs.wrappers.torch.base import Wrapper class OmniverseIsaacGymWrapper(Wrapper): def __init__(self, env: Any) -> None: """Omniverse Isaac Gym environment wrapper :param env: The environment to wrap :type env: Any supported Omniverse Isaac Gym environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def run(self, trainer: Optional["omni.isaac.gym.vec_env.vec_env_mt.TrainerMT"] = None) -> None: """Run the simulation in the main thread This method is valid only for the Omniverse Isaac Gym multi-threaded environments :param trainer: Trainer which should implement a ``run`` method that initiates the RL loop on a new thread :type trainer: omni.isaac.gym.vec_env.vec_env_mt.TrainerMT, optional """ self._env.run(trainer) def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ self._obs_dict, reward, terminated, info = self._env.step(actions) truncated = info["time_outs"] if "time_outs" in info else torch.zeros_like(terminated) return self._obs_dict["obs"], reward.view(-1, 1), terminated.view(-1, 1), truncated.view(-1, 1), info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: Observation, info :rtype: torch.Tensor and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return self._obs_dict["obs"], {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/torch/deepmind_envs.py
from typing import Any, Optional, Tuple import collections import gym import numpy as np import torch from skrl.envs.wrappers.torch.base import Wrapper class DeepMindWrapper(Wrapper): def __init__(self, env: Any) -> None: """DeepMind environment wrapper :param env: The environment to wrap :type env: Any supported DeepMind environment """ super().__init__(env) from dm_env import specs self._specs = specs # observation and action spaces self._observation_space = self._spec_to_space(self._env.observation_spec()) self._action_space = self._spec_to_space(self._env.action_spec()) @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ return self._observation_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space def _spec_to_space(self, spec: Any) -> gym.Space: """Convert the DeepMind spec to a Gym space :param spec: The DeepMind spec to convert :type spec: Any supported DeepMind spec :raises: ValueError if the spec type is not supported :return: The Gym space :rtype: gym.Space """ if isinstance(spec, self._specs.DiscreteArray): return gym.spaces.Discrete(spec.num_values) elif isinstance(spec, self._specs.BoundedArray): return gym.spaces.Box(shape=spec.shape, dtype=spec.dtype, low=spec.minimum if spec.minimum.ndim else np.full(spec.shape, spec.minimum), high=spec.maximum if spec.maximum.ndim else np.full(spec.shape, spec.maximum)) elif isinstance(spec, self._specs.Array): return gym.spaces.Box(shape=spec.shape, dtype=spec.dtype, low=np.full(spec.shape, float("-inf")), high=np.full(spec.shape, float("inf"))) elif isinstance(spec, collections.OrderedDict): return gym.spaces.Dict({k: self._spec_to_space(v) for k, v in spec.items()}) else: raise ValueError(f"Spec type {type(spec)} not supported. Please report this issue") def _observation_to_tensor(self, observation: Any, spec: Optional[Any] = None) -> torch.Tensor: """Convert the DeepMind observation to a flat tensor :param observation: The DeepMind observation to convert to a tensor :type observation: Any supported DeepMind observation :raises: ValueError if the observation spec type is not supported :return: The observation as a flat tensor :rtype: torch.Tensor """ spec = spec if spec is not None else self._env.observation_spec() if isinstance(spec, self._specs.DiscreteArray): return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1) elif isinstance(spec, self._specs.Array): # includes BoundedArray return torch.tensor(observation, device=self.device, dtype=torch.float32).reshape(self.num_envs, -1) elif isinstance(spec, collections.OrderedDict): return torch.cat([self._observation_to_tensor(observation[k], spec[k]) \ for k in sorted(spec.keys())], dim=-1).reshape(self.num_envs, -1) else: raise ValueError(f"Observation spec type {type(spec)} not supported. Please report this issue") def _tensor_to_action(self, actions: torch.Tensor) -> Any: """Convert the action to the DeepMind expected format :param actions: The actions to perform :type actions: torch.Tensor :raise ValueError: If the action space type is not supported :return: The action in the DeepMind expected format :rtype: Any supported DeepMind action """ spec = self._env.action_spec() if isinstance(spec, self._specs.DiscreteArray): return np.array(actions.item(), dtype=spec.dtype) elif isinstance(spec, self._specs.Array): # includes BoundedArray return np.array(actions.cpu().numpy(), dtype=spec.dtype).reshape(spec.shape) else: raise ValueError(f"Action spec type {type(spec)} not supported. Please report this issue") def step(self, actions: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: torch.Tensor :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ timestep = self._env.step(self._tensor_to_action(actions)) observation = timestep.observation reward = timestep.reward if timestep.reward is not None else 0 terminated = timestep.last() truncated = False info = {} # convert response to torch return self._observation_to_tensor(observation), \ torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1), \ torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1), \ info def reset(self) -> Tuple[torch.Tensor, Any]: """Reset the environment :return: The state of the environment :rtype: torch.Tensor """ timestep = self._env.reset() return self._observation_to_tensor(timestep.observation), {} def render(self, *args, **kwargs) -> None: """Render the environment OpenCV is used to render the environment. Install OpenCV with ``pip install opencv-python`` """ frame = self._env.physics.render(480, 640, camera_id=0) # render the frame using OpenCV import cv2 cv2.imshow("env", cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) cv2.waitKey(1) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/jax/gym_envs.py
from typing import Any, Optional, Tuple, Union import gym from packaging import version import jax import numpy as np from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper class GymWrapper(Wrapper): def __init__(self, env: Any) -> None: """OpenAI Gym environment wrapper :param env: The environment to wrap :type env: Any supported OpenAI Gym environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gym.vector.SyncVectorEnv) or isinstance(env, gym.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") self._deprecated_api = version.parse(gym.__version__) < version.parse("0.25.0") if self._deprecated_api: logger.warning(f"Using a deprecated version of OpenAI Gym's API: {gym.__version__}") @property def state_space(self) -> gym.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gym.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gym.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gym.Space] = None) -> np.ndarray: """Convert the OpenAI Gym observation to a flat tensor :param observation: The OpenAI Gym observation to convert to a tensor :type observation: Any supported OpenAI Gym observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: np.ndarray """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gym.spaces.MultiDiscrete): return observation.reshape(self.num_envs, -1).astype(np.int32) elif isinstance(observation, int): return np.array(observation, dtype=np.int32).reshape(self.num_envs, -1) elif isinstance(observation, np.ndarray): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gym.spaces.Discrete): return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1) elif isinstance(space, gym.spaces.Box): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gym.spaces.Dict): tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], axis=-1).reshape(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: np.ndarray) -> Any: """Convert the action to the OpenAI Gym expected format :param actions: The actions to perform :type actions: np.ndarray :raise ValueError: If the action space type is not supported :return: The action in the OpenAI Gym format :rtype: Any supported OpenAI Gym action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gym.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Tuple): if isinstance(space[0], gym.spaces.Box): return actions.astype(space[0].dtype).reshape(space.shape) elif isinstance(space[0], gym.spaces.Discrete): return actions.astype(space[0].dtype).reshape(-1) elif isinstance(space, gym.spaces.Discrete): return actions.item() elif isinstance(space, gym.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gym.spaces.Box): return actions.astype(space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ if self._jax: actions = jax.device_get(actions) if self._deprecated_api: observation, reward, terminated, info = self._env.step(self._tensor_to_action(actions)) # truncated: https://gymnasium.farama.org/tutorials/handling_time_limits if type(info) is list: truncated = np.array([d.get("TimeLimit.truncated", False) for d in info], dtype=terminated.dtype) terminated *= np.logical_not(truncated) else: truncated = info.get("TimeLimit.truncated", False) if truncated: terminated = False else: observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to numpy or jax observation = self._observation_to_tensor(observation) reward = np.array(reward, dtype=np.float32).reshape(self.num_envs, -1) terminated = np.array(terminated, dtype=np.int8).reshape(self.num_envs, -1) truncated = np.array(truncated, dtype=np.int8).reshape(self.num_envs, -1) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs if self._deprecated_api: observation = self._env.reset() info = {} else: observation, info = self._env.reset() return self._observation_to_tensor(observation), info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/jax/bidexhands_envs.py
from typing import Any, Mapping, Sequence, Tuple, Union import gym import jax import jax.dlpack import numpy as np try: import torch import torch.utils.dlpack except: pass # TODO: show warning message from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper def _jax2torch(array, device, from_jax=True): return torch.utils.dlpack.from_dlpack(jax.dlpack.to_dlpack(array)) if from_jax else torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): return jax.dlpack.from_dlpack(torch.utils.dlpack.to_dlpack(tensor.contiguous())) if to_jax else tensor.cpu().numpy() class BiDexHandsWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """Bi-DexHands wrapper :param env: The environment to wrap :type env: Any supported Bi-DexHands environment """ super().__init__(env) self._reset_once = True self._obs_buf = None self._shared_obs_buf = None self.possible_agents = [f"agent_{i}" for i in range(self.num_agents)] @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self.possible_agents @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.observation_space)} @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.action_space)} @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ return {uid: space for uid, space in zip(self.possible_agents, self._env.share_observation_space)} def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \ Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dict of nd.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of dict of nd.ndarray or jax.Array and any other info """ actions = [_jax2torch(actions[uid], self.device, self._jax) for uid in self.possible_agents] with torch.no_grad(): obs_buf, shared_obs_buf, reward_buf, terminated_buf, info, _ = self._env.step(actions) obs_buf = _torch2jax(obs_buf, self._jax) shared_obs_buf = _torch2jax(shared_obs_buf, self._jax) reward_buf = _torch2jax(reward_buf, self._jax) terminated_buf = _torch2jax(terminated_buf.to(dtype=torch.int8), self._jax) self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} reward = {uid: reward_buf[:,i].reshape(-1, 1) for i, uid in enumerate(self.possible_agents)} terminated = {uid: terminated_buf[:,i].reshape(-1, 1) for i, uid in enumerate(self.possible_agents)} truncated = terminated info = {"shared_states": self._shared_obs_buf} return self._obs_buf, reward, terminated, truncated, info def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dict of np.ndarray of jax.Array and any other info """ if self._reset_once: obs_buf, shared_obs_buf, _ = self._env.reset() obs_buf = _torch2jax(obs_buf, self._jax) shared_obs_buf = _torch2jax(shared_obs_buf, self._jax) self._obs_buf = {uid: obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._shared_obs_buf = {uid: shared_obs_buf[:,i] for i, uid in enumerate(self.possible_agents)} self._reset_once = False return self._obs_buf, {"shared_states": self._shared_obs_buf}
Toni-SM/skrl/skrl/envs/wrappers/jax/base.py
from typing import Any, Mapping, Sequence, Tuple, Union import gym import jax import numpy as np from skrl import config class Wrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for RL environments :param env: The environment to wrap :type env: Any supported RL environment """ self._jax = config.jax.backend == "jax" self._env = env # device (faster than @property) self.device = jax.devices()[0] if hasattr(self._env, "device"): try: self.device = jax.devices(self._env.device.split(':')[0] if type(self._env.device) == str else self._env.device.type)[0] except RuntimeError: pass # spaces try: self._action_space = self._env.single_action_space self._observation_space = self._env.single_observation_space except AttributeError: self._action_space = self._env.action_space self._observation_space = self._env.observation_space self._state_space = self._env.state_space if hasattr(self._env, "state_space") else self._observation_space def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ raise NotImplementedError def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def state_space(self) -> gym.Space: """State space If the wrapped environment does not have the ``state_space`` property, the value of the ``observation_space`` property will be used """ return self._state_space @property def observation_space(self) -> gym.Space: """Observation space """ return self._observation_space @property def action_space(self) -> gym.Space: """Action space """ return self._action_space class MultiAgentEnvWrapper(object): def __init__(self, env: Any) -> None: """Base wrapper class for multi-agent environments :param env: The multi-agent environment to wrap :type env: Any supported multi-agent environment """ self._jax = config.jax.backend == "jax" self._env = env # device (faster than @property) self.device = jax.devices()[0] if hasattr(self._env, "device"): try: self.device = jax.devices(self._env.device.split(':')[0] if type(self._env.device) == str else self._env.device.type)[0] except RuntimeError: pass self.possible_agents = [] def __getattr__(self, key: str) -> Any: """Get an attribute from the wrapped environment :param key: The attribute name :type key: str :raises AttributeError: If the attribute does not exist :return: The attribute value :rtype: Any """ if hasattr(self._env, key): return getattr(self._env, key) raise AttributeError(f"Wrapped environment ({self._env.__class__.__name__}) does not have attribute '{key}'") def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Reset the environment :raises NotImplementedError: Not implemented :return: Observation, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ raise NotImplementedError def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \ Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dict of np.ndarray or jax.Array :raises NotImplementedError: Not implemented :return: Observation, reward, terminated, truncated, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ raise NotImplementedError def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass @property def num_envs(self) -> int: """Number of environments If the wrapped environment does not have the ``num_envs`` property, it will be set to 1 """ return self._env.num_envs if hasattr(self._env, "num_envs") else 1 @property def num_agents(self) -> int: """Number of agents If the wrapped environment does not have the ``num_agents`` property, it will be set to 1 """ return self._env.num_agents if hasattr(self._env, "num_agents") else 1 @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ raise NotImplementedError @property def state_spaces(self) -> Mapping[str, gym.Space]: """State spaces An alias for the ``observation_spaces`` property """ return self.observation_spaces @property def observation_spaces(self) -> Mapping[str, gym.Space]: """Observation spaces """ raise NotImplementedError @property def action_spaces(self) -> Mapping[str, gym.Space]: """Action spaces """ raise NotImplementedError @property def shared_state_spaces(self) -> Mapping[str, gym.Space]: """Shared state spaces An alias for the ``shared_observation_spaces`` property """ return self.shared_observation_spaces @property def shared_observation_spaces(self) -> Mapping[str, gym.Space]: """Shared observation spaces """ raise NotImplementedError def state_space(self, agent: str) -> gym.Space: """State space :param agent: Name of the agent :type agent: str :return: The state space for the specified agent :rtype: gym.Space """ return self.state_spaces[agent] def observation_space(self, agent: str) -> gym.Space: """Observation space :param agent: Name of the agent :type agent: str :return: The observation space for the specified agent :rtype: gym.Space """ return self.observation_spaces[agent] def action_space(self, agent: str) -> gym.Space: """Action space :param agent: Name of the agent :type agent: str :return: The action space for the specified agent :rtype: gym.Space """ return self.action_spaces[agent] def shared_state_space(self, agent: str) -> gym.Space: """Shared state space :param agent: Name of the agent :type agent: str :return: The shared state space for the specified agent :rtype: gym.Space """ return self.shared_state_spaces[agent] def shared_observation_space(self, agent: str) -> gym.Space: """Shared observation space :param agent: Name of the agent :type agent: str :return: The shared observation space for the specified agent :rtype: gym.Space """ return self.shared_observation_spaces[agent]
Toni-SM/skrl/skrl/envs/wrappers/jax/__init__.py
from typing import Any, Union import gym import gymnasium from skrl import logger from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper, Wrapper from skrl.envs.wrappers.jax.bidexhands_envs import BiDexHandsWrapper from skrl.envs.wrappers.jax.gym_envs import GymWrapper from skrl.envs.wrappers.jax.gymnasium_envs import GymnasiumWrapper from skrl.envs.wrappers.jax.isaac_orbit_envs import IsaacOrbitWrapper from skrl.envs.wrappers.jax.isaacgym_envs import IsaacGymPreview2Wrapper, IsaacGymPreview3Wrapper from skrl.envs.wrappers.jax.omniverse_isaacgym_envs import OmniverseIsaacGymWrapper from skrl.envs.wrappers.jax.pettingzoo_envs import PettingZooWrapper __all__ = ["wrap_env", "Wrapper", "MultiAgentEnvWrapper"] def wrap_env(env: Any, wrapper: str = "auto", verbose: bool = True) -> Union[Wrapper, MultiAgentEnvWrapper]: """Wrap an environment to use a common interface Example:: >>> from skrl.envs.wrappers.jax import wrap_env >>> >>> # assuming that there is an environment called "env" >>> env = wrap_env(env) :param env: The environment to be wrapped :type env: gym.Env, gymnasium.Env, dm_env.Environment or VecTask :param wrapper: The type of wrapper to use (default: ``"auto"``). If ``"auto"``, the wrapper will be automatically selected based on the environment class. The supported wrappers are described in the following table: +--------------------+-------------------------+ |Environment |Wrapper tag | +====================+=========================+ |OpenAI Gym |``"gym"`` | +--------------------+-------------------------+ |Gymnasium |``"gymnasium"`` | +--------------------+-------------------------+ |Petting Zoo |``"pettingzoo"`` | +--------------------+-------------------------+ |Bi-DexHands |``"bidexhands"`` | +--------------------+-------------------------+ |Isaac Gym preview 2 |``"isaacgym-preview2"`` | +--------------------+-------------------------+ |Isaac Gym preview 3 |``"isaacgym-preview3"`` | +--------------------+-------------------------+ |Isaac Gym preview 4 |``"isaacgym-preview4"`` | +--------------------+-------------------------+ |Omniverse Isaac Gym |``"omniverse-isaacgym"`` | +--------------------+-------------------------+ |Isaac Sim (orbit) |``"isaac-orbit"`` | +--------------------+-------------------------+ :type wrapper: str, optional :param verbose: Whether to print the wrapper type (default: ``True``) :type verbose: bool, optional :raises ValueError: Unknown wrapper type :return: Wrapped environment :rtype: Wrapper or MultiAgentEnvWrapper """ if verbose: logger.info("Environment class: {}".format(", ".join([str(base).replace("<class '", "").replace("'>", "") \ for base in env.__class__.__bases__]))) if wrapper == "auto": base_classes = [str(base) for base in env.__class__.__bases__] if "<class 'omni.isaac.gym.vec_env.vec_env_base.VecEnvBase'>" in base_classes or \ "<class 'omni.isaac.gym.vec_env.vec_env_mt.VecEnvMT'>" in base_classes: if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif isinstance(env, gym.core.Env) or isinstance(env, gym.core.Wrapper): # isaac-orbit if hasattr(env, "sim") and hasattr(env, "env_ns"): if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) # gym if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif isinstance(env, gymnasium.core.Env) or isinstance(env, gymnasium.core.Wrapper): if verbose: logger.info("Environment wrapper: Gymnasium") return GymnasiumWrapper(env) elif "<class 'pettingzoo.utils.env" in base_classes[0] or "<class 'pettingzoo.utils.wrappers" in base_classes[0]: if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif "<class 'dm_env._environment.Environment'>" in base_classes: if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif "<class 'robosuite.environments." in base_classes[0]: if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif "<class 'rlgpu.tasks.base.vec_task.VecTask'>" in base_classes: if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3/4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "gym": if verbose: logger.info("Environment wrapper: Gym") return GymWrapper(env) elif wrapper == "gymnasium": if verbose: logger.info("Environment wrapper: gymnasium") return GymnasiumWrapper(env) elif wrapper == "pettingzoo": if verbose: logger.info("Environment wrapper: Petting Zoo") return PettingZooWrapper(env) elif wrapper == "dm": if verbose: logger.info("Environment wrapper: DeepMind") return DeepMindWrapper(env) elif wrapper == "robosuite": if verbose: logger.info("Environment wrapper: Robosuite") return RobosuiteWrapper(env) elif wrapper == "bidexhands": if verbose: logger.info("Environment wrapper: Bi-DexHands") return BiDexHandsWrapper(env) elif wrapper == "isaacgym-preview2": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 2)") return IsaacGymPreview2Wrapper(env) elif wrapper == "isaacgym-preview3": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 3)") return IsaacGymPreview3Wrapper(env) elif wrapper == "isaacgym-preview4": if verbose: logger.info("Environment wrapper: Isaac Gym (preview 4)") return IsaacGymPreview3Wrapper(env) # preview 4 is the same as 3 elif wrapper == "omniverse-isaacgym": if verbose: logger.info("Environment wrapper: Omniverse Isaac Gym") return OmniverseIsaacGymWrapper(env) elif wrapper == "isaac-orbit": if verbose: logger.info("Environment wrapper: Isaac Orbit") return IsaacOrbitWrapper(env) else: raise ValueError(f"Unknown wrapper type: {wrapper}")
Toni-SM/skrl/skrl/envs/wrappers/jax/isaacgym_envs.py
from typing import Any, Tuple, Union import jax import jax.dlpack as jax_dlpack import numpy as np try: import torch import torch.utils.dlpack as torch_dlpack except: pass # TODO: show warning message from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper # ML frameworks conversion utilities # jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided. _CPU = jax.devices()[0].device_kind.lower() == "cpu" if _CPU: logger.warning("IsaacGymEnvs runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.") def _jax2torch(array, device, from_jax=True): if from_jax: return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device) return torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): if to_jax: return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous())) return tensor.cpu().numpy() class IsaacGymPreview2Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 2) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 2) environment """ super().__init__(env) self._reset_once = True self._obs_buf = None def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env.device, self._jax) with torch.no_grad(): self._obs_buf, reward, terminated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated) return _torch2jax(self._obs_buf, self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_buf = self._env.reset() self._reset_once = False return _torch2jax(self._obs_buf, self._jax), {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass class IsaacGymPreview3Wrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Gym environment (preview 3) wrapper :param env: The environment to wrap :type env: Any supported Isaac Gym environment (preview 3) environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: Union[np.ndarray, jax.Array]) ->\ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env.device, self._jax) with torch.no_grad(): self._obs_dict, reward, terminated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated) return _torch2jax(self._obs_dict["obs"], self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return _torch2jax(self._obs_dict["obs"], self._jax), {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ pass
Toni-SM/skrl/skrl/envs/wrappers/jax/isaac_orbit_envs.py
from typing import Any, Tuple, Union import jax import jax.dlpack as jax_dlpack import numpy as np try: import torch import torch.utils.dlpack as torch_dlpack except: pass # TODO: show warning message from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper # ML frameworks conversion utilities # jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided. _CPU = jax.devices()[0].device_kind.lower() == "cpu" if _CPU: logger.warning("Isaac Orbit runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.") def _jax2torch(array, device, from_jax=True): if from_jax: return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device) return torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): if to_jax: return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous())) return tensor.cpu().numpy() class IsaacOrbitWrapper(Wrapper): def __init__(self, env: Any) -> None: """Isaac Orbit environment wrapper :param env: The environment to wrap :type env: Any supported Isaac Orbit environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env.device, self._jax) with torch.no_grad(): self._obs_dict, reward, terminated, truncated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = truncated.to(dtype=torch.int8) return _torch2jax(self._obs_dict["policy"], self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_dict, info = self._env.reset() self._reset_once = False return _torch2jax(self._obs_dict["policy"], self._jax), info def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/jax/gymnasium_envs.py
from typing import Any, Optional, Tuple, Union import gymnasium import jax import numpy as np from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper class GymnasiumWrapper(Wrapper): def __init__(self, env: Any) -> None: """Gymnasium environment wrapper :param env: The environment to wrap :type env: Any supported Gymnasium environment """ super().__init__(env) self._vectorized = False try: if isinstance(env, gymnasium.vector.SyncVectorEnv) or isinstance(env, gymnasium.vector.AsyncVectorEnv): self._vectorized = True self._reset_once = True self._obs_tensor = None self._info_dict = None except Exception as e: logger.warning(f"Failed to check for a vectorized environment: {e}") @property def state_space(self) -> gymnasium.Space: """State space An alias for the ``observation_space`` property """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def observation_space(self) -> gymnasium.Space: """Observation space """ if self._vectorized: return self._env.single_observation_space return self._env.observation_space @property def action_space(self) -> gymnasium.Space: """Action space """ if self._vectorized: return self._env.single_action_space return self._env.action_space def _observation_to_tensor(self, observation: Any, space: Optional[gymnasium.Space] = None) -> np.ndarray: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: np.ndarray """ observation_space = self._env.observation_space if self._vectorized else self.observation_space space = space if space is not None else observation_space if self._vectorized and isinstance(space, gymnasium.spaces.MultiDiscrete): return observation.reshape(self.num_envs, -1).astype(np.int32) elif isinstance(observation, int): return np.array(observation, dtype=np.int32).reshape(self.num_envs, -1) elif isinstance(observation, np.ndarray): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Discrete): return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Dict): tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], axis=-1).reshape(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: np.ndarray) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: np.ndarray :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ space = self._env.action_space if self._vectorized else self.action_space if self._vectorized: if isinstance(space, gymnasium.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Tuple): if isinstance(space[0], gymnasium.spaces.Box): return actions.astype(space[0].dtype).reshape(space.shape) elif isinstance(space[0], gymnasium.spaces.Discrete): return actions.astype(space[0].dtype).reshape(-1) if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.MultiDiscrete): return actions.astype(space.dtype).reshape(space.shape) elif isinstance(space, gymnasium.spaces.Box): return actions.astype(space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ if self._jax: actions = jax.device_get(actions) observation, reward, terminated, truncated, info = self._env.step(self._tensor_to_action(actions)) # convert response to numpy or jax observation = self._observation_to_tensor(observation) reward = np.array(reward, dtype=np.float32).reshape(self.num_envs, -1) terminated = np.array(terminated, dtype=np.int8).reshape(self.num_envs, -1) truncated = np.array(truncated, dtype=np.int8).reshape(self.num_envs, -1) # if self._jax: # HACK: jax.device_put(...).block_until_ready() # observation = jax.device_put(observation) # reward = jax.device_put(reward) # terminated = jax.device_put(terminated) # truncated = jax.device_put(truncated) # save observation and info for vectorized envs if self._vectorized: self._obs_tensor = observation self._info_dict = info return observation, reward, terminated, truncated, info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ # handle vectorized envs if self._vectorized: if not self._reset_once: return self._obs_tensor, self._info_dict self._reset_once = False # reset the env/envs observation, info = self._env.reset() # convert response to numpy or jax observation = self._observation_to_tensor(observation) # if self._jax: # HACK: jax.device_put(...).block_until_ready() # observation = jax.device_put(observation) return observation, info def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/jax/pettingzoo_envs.py
from typing import Any, Mapping, Sequence, Tuple, Union import collections import gymnasium import jax import numpy as np from skrl.envs.wrappers.jax.base import MultiAgentEnvWrapper class PettingZooWrapper(MultiAgentEnvWrapper): def __init__(self, env: Any) -> None: """PettingZoo (parallel) environment wrapper :param env: The environment to wrap :type env: Any supported PettingZoo (parallel) environment """ super().__init__(env) self.possible_agents = self._env.possible_agents self._shared_observation_space = self._compute_shared_observation_space(self._env.observation_spaces) def _compute_shared_observation_space(self, observation_spaces): space = next(iter(observation_spaces.values())) shape = (len(self.possible_agents),) + space.shape return gymnasium.spaces.Box(low=np.stack([space.low for _ in self.possible_agents], axis=0), high=np.stack([space.high for _ in self.possible_agents], axis=0), dtype=space.dtype, shape=shape) @property def num_agents(self) -> int: """Number of agents """ return len(self.possible_agents) @property def agents(self) -> Sequence[str]: """Names of all current agents These may be changed as an environment progresses (i.e. agents can be added or removed) """ return self._env.agents @property def observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Observation spaces """ return {uid: self._env.observation_space(uid) for uid in self.possible_agents} @property def action_spaces(self) -> Mapping[str, gymnasium.Space]: """Action spaces """ return {uid: self._env.action_space(uid) for uid in self.possible_agents} @property def shared_observation_spaces(self) -> Mapping[str, gymnasium.Space]: """Shared observation spaces """ return {uid: self._shared_observation_space for uid in self.possible_agents} def _observation_to_tensor(self, observation: Any, space: gymnasium.Space) -> np.ndarray: """Convert the Gymnasium observation to a flat tensor :param observation: The Gymnasium observation to convert to a tensor :type observation: Any supported Gymnasium observation space :raises: ValueError if the observation space type is not supported :return: The observation as a flat tensor :rtype: np.ndarray """ if isinstance(observation, int): return np.array(observation, dtype=np.int32).view(self.num_envs, -1) elif isinstance(observation, np.ndarray): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Discrete): return np.array(observation, dtype=np.float32).reshape(self.num_envs, -1) elif isinstance(space, gymnasium.spaces.Box): return observation.reshape(self.num_envs, -1).astype(np.float32) elif isinstance(space, gymnasium.spaces.Dict): tmp = np.concatenate([self._observation_to_tensor(observation[k], space[k]) \ for k in sorted(space.keys())], axis=-1).view(self.num_envs, -1) return tmp else: raise ValueError(f"Observation space type {type(space)} not supported. Please report this issue") def _tensor_to_action(self, actions: np.ndarray, space: gymnasium.Space) -> Any: """Convert the action to the Gymnasium expected format :param actions: The actions to perform :type actions: np.ndarray :raise ValueError: If the action space type is not supported :return: The action in the Gymnasium format :rtype: Any supported Gymnasium action space """ if isinstance(space, gymnasium.spaces.Discrete): return actions.item() elif isinstance(space, gymnasium.spaces.Box): return actions.astype(space.dtype).reshape(space.shape) raise ValueError(f"Action space type {type(space)} not supported. Please report this issue") def step(self, actions: Mapping[str, Union[np.ndarray, jax.Array]]) -> \ Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Perform a step in the environment :param actions: The actions to perform :type actions: dict of np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ if self._jax: actions = jax.device_get(actions) actions = {uid: self._tensor_to_action(action, self._env.action_space(uid)) for uid, action in actions.items()} observations, rewards, terminated, truncated, infos = self._env.step(actions) # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to numpy or jax observations = {uid: self._observation_to_tensor(value, self._env.observation_space(uid)) for uid, value in observations.items()} rewards = {uid: np.array(value, dtype=np.float32).reshape(self.num_envs, -1) for uid, value in rewards.items()} terminated = {uid: np.array(value, dtype=np.int8).reshape(self.num_envs, -1) for uid, value in terminated.items()} truncated = {uid: np.array(value, dtype=np.int8).reshape(self.num_envs, -1) for uid, value in truncated.items()} return observations, rewards, terminated, truncated, infos def reset(self) -> Tuple[Mapping[str, Union[np.ndarray, jax.Array]], Mapping[str, Any]]: """Reset the environment :return: Observation, info :rtype: tuple of dict of np.ndarray or jax.Array and any other info """ outputs = self._env.reset() if isinstance(outputs, collections.abc.Mapping): observations = outputs infos = {uid: {} for uid in self.possible_agents} else: observations, infos = outputs # build shared observation shared_observations = np.stack([observations[uid] for uid in self.possible_agents], axis=0) shared_observations = self._observation_to_tensor(shared_observations, self._shared_observation_space) infos["shared_states"] = {uid: shared_observations for uid in self.possible_agents} # convert response to numpy or jax observations = {uid: self._observation_to_tensor(observation, self._env.observation_space(uid)) for uid, observation in observations.items()} return observations, infos def render(self, *args, **kwargs) -> None: """Render the environment """ self._env.render(*args, **kwargs) def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/envs/wrappers/jax/omniverse_isaacgym_envs.py
from typing import Any, Optional, Tuple, Union import jax import jax.dlpack as jax_dlpack import numpy as np try: import torch import torch.utils.dlpack as torch_dlpack except: pass # TODO: show warning message from skrl import logger from skrl.envs.wrappers.jax.base import Wrapper # ML frameworks conversion utilities # jaxlib.xla_extension.XlaRuntimeError: INVALID_ARGUMENT: DLPack tensor is on GPU, but no GPU backend was provided. _CPU = jax.devices()[0].device_kind.lower() == "cpu" if _CPU: logger.warning("OmniIsaacGymEnvs runs on GPU, but there is no GPU backend for JAX. JAX operations will run on CPU.") def _jax2torch(array, device, from_jax=True): if from_jax: return torch_dlpack.from_dlpack(jax_dlpack.to_dlpack(array)).to(device=device) return torch.tensor(array, device=device) def _torch2jax(tensor, to_jax=True): if to_jax: return jax_dlpack.from_dlpack(torch_dlpack.to_dlpack(tensor.contiguous().cpu() if _CPU else tensor.contiguous())) return tensor.cpu().numpy() class OmniverseIsaacGymWrapper(Wrapper): def __init__(self, env: Any) -> None: """Omniverse Isaac Gym environment wrapper :param env: The environment to wrap :type env: Any supported Omniverse Isaac Gym environment """ super().__init__(env) self._reset_once = True self._obs_dict = None def run(self, trainer: Optional["omni.isaac.gym.vec_env.vec_env_mt.TrainerMT"] = None) -> None: """Run the simulation in the main thread This method is valid only for the Omniverse Isaac Gym multi-threaded environments :param trainer: Trainer which should implement a ``run`` method that initiates the RL loop on a new thread :type trainer: omni.isaac.gym.vec_env.vec_env_mt.TrainerMT, optional """ self._env.run(trainer) def step(self, actions: Union[np.ndarray, jax.Array]) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Perform a step in the environment :param actions: The actions to perform :type actions: np.ndarray or jax.Array :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ actions = _jax2torch(actions, self._env._task.device, self._jax) with torch.no_grad(): self._obs_dict, reward, terminated, info = self._env.step(actions) terminated = terminated.to(dtype=torch.int8) truncated = info["time_outs"].to(dtype=torch.int8) if "time_outs" in info else torch.zeros_like(terminated) return _torch2jax(self._obs_dict["obs"], self._jax), \ _torch2jax(reward.view(-1, 1), self._jax), \ _torch2jax(terminated.view(-1, 1), self._jax), \ _torch2jax(truncated.view(-1, 1), self._jax), \ info def reset(self) -> Tuple[Union[np.ndarray, jax.Array], Any]: """Reset the environment :return: Observation, info :rtype: np.ndarray or jax.Array and any other info """ if self._reset_once: self._obs_dict = self._env.reset() self._reset_once = False return _torch2jax(self._obs_dict["obs"], self._jax), {} def render(self, *args, **kwargs) -> None: """Render the environment """ pass def close(self) -> None: """Close the environment """ self._env.close()
Toni-SM/skrl/skrl/agents/__init__.py
Toni-SM/skrl/skrl/agents/torch/base.py
from typing import Any, Mapping, Optional, Tuple, Union import collections import copy import datetime import os import gym import gymnasium import numpy as np import torch from torch.utils.tensorboard import SummaryWriter from skrl import logger from skrl.memories.torch import Memory from skrl.models.torch import Model class Agent: def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Base class that represent a RL agent :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ self.models = models self.observation_space = observation_space self.action_space = action_space self.cfg = cfg if cfg is not None else {} self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) if type(memory) is list: self.memory = memory[0] self.secondary_memories = memory[1:] else: self.memory = memory self.secondary_memories = [] # convert the models to their respective device for model in self.models.values(): if model is not None: model.to(model.device) self.tracking_data = collections.defaultdict(list) self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000) self._track_rewards = collections.deque(maxlen=100) self._track_timesteps = collections.deque(maxlen=100) self._cumulative_rewards = None self._cumulative_timesteps = None self.training = True # checkpoint self.checkpoint_modules = {} self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000) self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False) self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": False, "modules": {}} # experiment directory directory = self.cfg.get("experiment", {}).get("directory", "") experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "") if not directory: directory = os.path.join(os.getcwd(), "runs") if not experiment_name: experiment_name = "{}_{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), self.__class__.__name__) self.experiment_dir = os.path.join(directory, experiment_name) def __str__(self) -> str: """Generate a representation of the agent as string :return: Representation of the agent as string :rtype: str """ string = f"Agent: {repr(self)}" for k, v in self.cfg.items(): if type(v) is dict: string += f"\n |-- {k}" for k1, v1 in v.items(): string += f"\n | |-- {k1}: {v1}" else: string += f"\n |-- {k}: {v}" return string def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any: """Empty preprocess method This method is defined because PyTorch multiprocessing can't pickle lambdas :param _input: Input to preprocess :type _input: Any :return: Preprocessed input :rtype: Any """ return _input def _get_internal_value(self, _module: Any) -> Any: """Get internal module/variable state/value :param _module: Module or variable :type _module: Any :return: Module/variable state/value :rtype: Any """ return _module.state_dict() if hasattr(_module, "state_dict") else _module def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent This method should be called before the agent is used. It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory :param trainer_cfg: Trainer configuration :type trainer_cfg: dict, optional """ # setup Weights & Biases if self.cfg.get("experiment", {}).get("wandb", False): # save experiment config trainer_cfg = trainer_cfg if trainer_cfg is not None else {} try: models_cfg = {k: v.net._modules for (k, v) in self.models.items()} except AttributeError: models_cfg = {k: v._modules for (k, v) in self.models.items()} config={**self.cfg, **trainer_cfg, **models_cfg} # set default values wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {})) wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1]) wandb_kwargs.setdefault("sync_tensorboard", True) wandb_kwargs.setdefault("config", {}) wandb_kwargs["config"].update(config) # init Weights & Biases import wandb wandb.init(**wandb_kwargs) # main entry to log data for consumption and visualization by TensorBoard if self.write_interval > 0: self.writer = SummaryWriter(log_dir=self.experiment_dir) if self.checkpoint_interval > 0: os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True) def track_data(self, tag: str, value: float) -> None: """Track data to TensorBoard Currently only scalar data are supported :param tag: Data identifier (e.g. 'Loss / policy loss') :type tag: str :param value: Value to track :type value: float """ self.tracking_data[tag].append(value) def write_tracking_data(self, timestep: int, timesteps: int) -> None: """Write tracking data to TensorBoard :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for k, v in self.tracking_data.items(): if k.endswith("(min)"): self.writer.add_scalar(k, np.min(v), timestep) elif k.endswith("(max)"): self.writer.add_scalar(k, np.max(v), timestep) else: self.writer.add_scalar(k, np.mean(v), timestep) # reset data containers for next iteration self._track_rewards.clear() self._track_timesteps.clear() self.tracking_data.clear() def write_checkpoint(self, timestep: int, timesteps: int) -> None: """Write checkpoint (modules) to disk The checkpoints are saved in the directory 'checkpoints' in the experiment directory. The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time. :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f")) # separated modules if self.checkpoint_store_separately: for name, module in self.checkpoint_modules.items(): torch.save(self._get_internal_value(module), os.path.join(self.experiment_dir, "checkpoints", f"{name}_{tag}.pt")) # whole agent else: modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = self._get_internal_value(module) torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pt")) # best modules if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]: # separated modules if self.checkpoint_store_separately: for name, module in self.checkpoint_modules.items(): torch.save(self.checkpoint_best_modules["modules"][name], os.path.join(self.experiment_dir, "checkpoints", f"best_{name}.pt")) # whole agent else: modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = self.checkpoint_best_modules["modules"][name] torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", "best_agent.pt")) self.checkpoint_best_modules["saved"] = True def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Actions :rtype: torch.Tensor """ raise NotImplementedError def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory (to be implemented by the inheriting classes) Inheriting classes must call this method to record episode information (rewards, timesteps, etc.). In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded. :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if self.write_interval > 0: # compute the cumulative sum of the rewards and timesteps if self._cumulative_rewards is None: self._cumulative_rewards = torch.zeros_like(rewards, dtype=torch.float32) self._cumulative_timesteps = torch.zeros_like(rewards, dtype=torch.int32) self._cumulative_rewards.add_(rewards) self._cumulative_timesteps.add_(1) # check ended episodes finished_episodes = (terminated + truncated).nonzero(as_tuple=False) if finished_episodes.numel(): # storage cumulative rewards and timesteps self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist()) self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist()) # reset the cumulative rewards and timesteps self._cumulative_rewards[finished_episodes] = 0 self._cumulative_timesteps[finished_episodes] = 0 # record data self.tracking_data["Reward / Instantaneous reward (max)"].append(torch.max(rewards).item()) self.tracking_data["Reward / Instantaneous reward (min)"].append(torch.min(rewards).item()) self.tracking_data["Reward / Instantaneous reward (mean)"].append(torch.mean(rewards).item()) if len(self._track_rewards): track_rewards = np.array(self._track_rewards) track_timesteps = np.array(self._track_timesteps) self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards)) self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards)) self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards)) self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps)) self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps)) self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps)) def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ for model in self.models.values(): if model is not None: model.set_mode(mode) def set_running_mode(self, mode: str) -> None: """Set the current running mode (training or evaluation) This method sets the value of the ``training`` property (boolean). This property can be used to know if the agent is running in training or evaluation mode. :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ self.training = mode == "train" def save(self, path: str) -> None: """Save the agent to the specified path :param path: Path to save the model to :type path: str """ modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = self._get_internal_value(module) torch.save(modules, path) def load(self, path: str) -> None: """Load the model from the specified path The final storage device is determined by the constructor of the model :param path: Path to load the model from :type path: str """ modules = torch.load(path, map_location=self.device) if type(modules) is dict: for name, data in modules.items(): module = self.checkpoint_modules.get(name, None) if module is not None: if hasattr(module, "load_state_dict"): module.load_state_dict(data) if hasattr(module, "eval"): module.eval() else: raise NotImplementedError else: logger.warning(f"Cannot load the {name} module. The agent doesn't have such an instance") def migrate(self, path: str, name_map: Mapping[str, Mapping[str, str]] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal checkpoint to the current agent The final storage device is determined by the constructor of the agent. Only files generated by the *rl_games* library are supported at the moment For ambiguous models (where 2 or more parameters, for source or current model, have equal shape) it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully :param path: Path to the external checkpoint to migrate from :type path: str :param name_map: Name map to use for the migration (default: ``{}``). Keys are the current parameter names and values are the external parameter names :type name_map: Mapping[str, Mapping[str, str]], optional :param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``) :type auto_mapping: bool, optional :param verbose: Show model names and migration (default: ``False``) :type verbose: bool, optional :raises ValueError: If the correct file type cannot be identified from the ``path`` parameter :return: True if the migration was successful, False otherwise. Migration is successful if all parameters of the current model are found in the external model :rtype: bool Example:: # migrate a rl_games checkpoint with ambiguous state_dict >>> agent.migrate(path="./runs/Cartpole/nn/Cartpole.pth", verbose=False) [skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight] [skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight] [skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias] False >>> name_map = {"policy": {"net.0.bias": "a2c_network.actor_mlp.0.bias", ... "net.2.bias": "a2c_network.actor_mlp.2.bias", ... "net.4.weight": "a2c_network.mu.weight", ... "net.4.bias": "a2c_network.mu.bias"}, ... "value": {"net.0.bias": "a2c_network.actor_mlp.0.bias", ... "net.2.bias": "a2c_network.actor_mlp.2.bias", ... "net.4.weight": "a2c_network.value.weight", ... "net.4.bias": "a2c_network.value.bias"}} >>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", name_map=name_map, verbose=True) [skrl:INFO] Modules [skrl:INFO] |-- current [skrl:INFO] | |-- policy (Policy) [skrl:INFO] | | |-- log_std_parameter : [1] [skrl:INFO] | | |-- net.0.weight : [32, 4] [skrl:INFO] | | |-- net.0.bias : [32] [skrl:INFO] | | |-- net.2.weight : [32, 32] [skrl:INFO] | | |-- net.2.bias : [32] [skrl:INFO] | | |-- net.4.weight : [1, 32] [skrl:INFO] | | |-- net.4.bias : [1] [skrl:INFO] | |-- value (Value) [skrl:INFO] | | |-- net.0.weight : [32, 4] [skrl:INFO] | | |-- net.0.bias : [32] [skrl:INFO] | | |-- net.2.weight : [32, 32] [skrl:INFO] | | |-- net.2.bias : [32] [skrl:INFO] | | |-- net.4.weight : [1, 32] [skrl:INFO] | | |-- net.4.bias : [1] [skrl:INFO] | |-- optimizer (Adam) [skrl:INFO] | | |-- state (dict) [skrl:INFO] | | |-- param_groups (list) [skrl:INFO] | |-- state_preprocessor (RunningStandardScaler) [skrl:INFO] | | |-- running_mean : [4] [skrl:INFO] | | |-- running_variance : [4] [skrl:INFO] | | |-- current_count : [] [skrl:INFO] | |-- value_preprocessor (RunningStandardScaler) [skrl:INFO] | | |-- running_mean : [1] [skrl:INFO] | | |-- running_variance : [1] [skrl:INFO] | | |-- current_count : [] [skrl:INFO] |-- source [skrl:INFO] | |-- model (OrderedDict) [skrl:INFO] | | |-- value_mean_std.running_mean : [1] [skrl:INFO] | | |-- value_mean_std.running_var : [1] [skrl:INFO] | | |-- value_mean_std.count : [] [skrl:INFO] | | |-- running_mean_std.running_mean : [4] [skrl:INFO] | | |-- running_mean_std.running_var : [4] [skrl:INFO] | | |-- running_mean_std.count : [] [skrl:INFO] | | |-- a2c_network.sigma : [1] [skrl:INFO] | | |-- a2c_network.actor_mlp.0.weight : [32, 4] [skrl:INFO] | | |-- a2c_network.actor_mlp.0.bias : [32] [skrl:INFO] | | |-- a2c_network.actor_mlp.2.weight : [32, 32] [skrl:INFO] | | |-- a2c_network.actor_mlp.2.bias : [32] [skrl:INFO] | | |-- a2c_network.value.weight : [1, 32] [skrl:INFO] | | |-- a2c_network.value.bias : [1] [skrl:INFO] | | |-- a2c_network.mu.weight : [1, 32] [skrl:INFO] | | |-- a2c_network.mu.bias : [1] [skrl:INFO] | |-- epoch (int) [skrl:INFO] | |-- optimizer (dict) [skrl:INFO] | |-- frame (int) [skrl:INFO] | |-- last_mean_rewards (float32) [skrl:INFO] | |-- env_state (NoneType) [skrl:INFO] Migration [skrl:INFO] Model: policy (Policy) [skrl:INFO] Models [skrl:INFO] |-- current: 7 items [skrl:INFO] | |-- log_std_parameter : [1] [skrl:INFO] | |-- net.0.weight : [32, 4] [skrl:INFO] | |-- net.0.bias : [32] [skrl:INFO] | |-- net.2.weight : [32, 32] [skrl:INFO] | |-- net.2.bias : [32] [skrl:INFO] | |-- net.4.weight : [1, 32] [skrl:INFO] | |-- net.4.bias : [1] [skrl:INFO] |-- source: 9 items [skrl:INFO] | |-- a2c_network.sigma : [1] [skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : [32, 4] [skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : [32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : [32, 32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : [32] [skrl:INFO] | |-- a2c_network.value.weight : [1, 32] [skrl:INFO] | |-- a2c_network.value.bias : [1] [skrl:INFO] | |-- a2c_network.mu.weight : [1, 32] [skrl:INFO] | |-- a2c_network.mu.bias : [1] [skrl:INFO] Migration [skrl:INFO] |-- auto: log_std_parameter <- a2c_network.sigma [skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight [skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias [skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight [skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias [skrl:INFO] |-- map: net.4.weight <- a2c_network.mu.weight [skrl:INFO] |-- map: net.4.bias <- a2c_network.mu.bias [skrl:INFO] Model: value (Value) [skrl:INFO] Models [skrl:INFO] |-- current: 6 items [skrl:INFO] | |-- net.0.weight : [32, 4] [skrl:INFO] | |-- net.0.bias : [32] [skrl:INFO] | |-- net.2.weight : [32, 32] [skrl:INFO] | |-- net.2.bias : [32] [skrl:INFO] | |-- net.4.weight : [1, 32] [skrl:INFO] | |-- net.4.bias : [1] [skrl:INFO] |-- source: 9 items [skrl:INFO] | |-- a2c_network.sigma : [1] [skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : [32, 4] [skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : [32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : [32, 32] [skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : [32] [skrl:INFO] | |-- a2c_network.value.weight : [1, 32] [skrl:INFO] | |-- a2c_network.value.bias : [1] [skrl:INFO] | |-- a2c_network.mu.weight : [1, 32] [skrl:INFO] | |-- a2c_network.mu.bias : [1] [skrl:INFO] Migration [skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight [skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias [skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight [skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias [skrl:INFO] |-- map: net.4.weight <- a2c_network.value.weight [skrl:INFO] |-- map: net.4.bias <- a2c_network.value.bias True """ # load state_dict from path if path is not None: # rl_games checkpoint if path.endswith(".pt") or path.endswith(".pth"): checkpoint = torch.load(path, map_location=self.device) else: raise ValueError("Cannot identify file type") # show modules if verbose: logger.info("Modules") logger.info(" |-- current") for name, module in self.checkpoint_modules.items(): logger.info(f" | |-- {name} ({type(module).__name__})") if hasattr(module, "state_dict"): for k, v in module.state_dict().items(): if hasattr(v, "shape"): logger.info(f" | | |-- {k} : {list(v.shape)}") else: logger.info(f" | | |-- {k} ({type(v).__name__})") logger.info(" |-- source") for name, module in checkpoint.items(): logger.info(f" | |-- {name} ({type(module).__name__})") if name == "model": for k, v in module.items(): logger.info(f" | | |-- {k} : {list(v.shape)}") else: if hasattr(module, "state_dict"): for k, v in module.state_dict().items(): if hasattr(v, "shape"): logger.info(f" | | |-- {k} : {list(v.shape)}") else: logger.info(f" | | |-- {k} ({type(v).__name__})") logger.info("Migration") if "optimizer" in self.checkpoint_modules: # loaded state dict contains a parameter group that doesn't match the size of optimizer's group # self.checkpoint_modules["optimizer"].load_state_dict(checkpoint["optimizer"]) pass # state_preprocessor if "state_preprocessor" in self.checkpoint_modules: if "running_mean_std.running_mean" in checkpoint["model"]: state_dict = copy.deepcopy(self.checkpoint_modules["state_preprocessor"].state_dict()) state_dict["running_mean"] = checkpoint["model"]["running_mean_std.running_mean"] state_dict["running_variance"] = checkpoint["model"]["running_mean_std.running_var"] state_dict["current_count"] = checkpoint["model"]["running_mean_std.count"] self.checkpoint_modules["state_preprocessor"].load_state_dict(state_dict) del checkpoint["model"]["running_mean_std.running_mean"] del checkpoint["model"]["running_mean_std.running_var"] del checkpoint["model"]["running_mean_std.count"] # value_preprocessor if "value_preprocessor" in self.checkpoint_modules: if "value_mean_std.running_mean" in checkpoint["model"]: state_dict = copy.deepcopy(self.checkpoint_modules["value_preprocessor"].state_dict()) state_dict["running_mean"] = checkpoint["model"]["value_mean_std.running_mean"] state_dict["running_variance"] = checkpoint["model"]["value_mean_std.running_var"] state_dict["current_count"] = checkpoint["model"]["value_mean_std.count"] self.checkpoint_modules["value_preprocessor"].load_state_dict(state_dict) del checkpoint["model"]["value_mean_std.running_mean"] del checkpoint["model"]["value_mean_std.running_var"] del checkpoint["model"]["value_mean_std.count"] # TODO: AMP state preprocessor # model status = True for name, module in self.checkpoint_modules.items(): if module not in ["state_preprocessor", "value_preprocessor", "optimizer"] and hasattr(module, "migrate"): if verbose: logger.info(f"Model: {name} ({type(module).__name__})") status *= module.migrate(state_dict=checkpoint["model"], name_map=name_map.get(name, {}), auto_mapping=auto_mapping, verbose=verbose) self.set_mode("eval") return bool(status) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ timestep += 1 # update best models and write checkpoints if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval: # update best models reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31)) if reward > self.checkpoint_best_modules["reward"]: self.checkpoint_best_modules["timestep"] = timestep self.checkpoint_best_modules["reward"] = reward self.checkpoint_best_modules["saved"] = False self.checkpoint_best_modules["modules"] = {k: copy.deepcopy(self._get_internal_value(v)) for k, v in self.checkpoint_modules.items()} # write checkpoints self.write_checkpoint(timestep, timesteps) # write to tensorboard if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval: self.write_tracking_data(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes """ raise NotImplementedError
Toni-SM/skrl/skrl/agents/torch/__init__.py
from skrl.agents.torch.base import Agent
Toni-SM/skrl/skrl/agents/torch/trpo/__init__.py
from skrl.agents.torch.trpo.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.agents.torch.trpo.trpo_rnn import TRPO_RNN
Toni-SM/skrl/skrl/agents/torch/trpo/trpo.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.convert_parameters import parameters_to_vector, vector_to_parameters from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TRPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "value_learning_rate": 1e-3, # value learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "value_loss_scale": 1.0, # value loss scaling factor "damping": 0.1, # damping coefficient for computing the Hessian-vector product "max_kl_divergence": 0.01, # maximum KL divergence between old and new policy "conjugate_gradient_steps": 10, # maximum number of iterations for the conjugate gradient algorithm "max_backtrack_steps": 10, # maximum number of backtracking steps during line search "accept_ratio": 0.5, # accept ratio for the line search loss improvement "step_fraction": 1.0, # fraction of the step size for the line search "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TRPO(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Trust Region Policy Optimization (TRPO) https://arxiv.org/abs/1502.05477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TRPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) self.backup_policy = copy.deepcopy(self.policy) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._value_loss_scale = self.cfg["value_loss_scale"] self._max_kl_divergence = self.cfg["max_kl_divergence"] self._damping = self.cfg["damping"] self._conjugate_gradient_steps = self.cfg["conjugate_gradient_steps"] self._max_backtrack_steps = self.cfg["max_backtrack_steps"] self._accept_ratio = self.cfg["accept_ratio"] self._step_fraction = self.cfg["step_fraction"] self._value_learning_rate = self.cfg["value_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: self.value_optimizer = torch.optim.Adam(self.value.parameters(), lr=self._value_learning_rate) if self._learning_rate_scheduler is not None: self.value_scheduler = self._learning_rate_scheduler(self.value_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names_policy = ["states", "actions", "log_prob", "advantages"] self._tensors_names_value = ["states", "returns"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO: fix for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages def surrogate_loss(policy: Model, states: torch.Tensor, actions: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor) -> torch.Tensor: """Compute the surrogate objective (policy loss) :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param actions: Actions :type actions: torch.Tensor :param log_prob: Log probability :type log_prob: torch.Tensor :param advantages: Advantages :type advantages: torch.Tensor :return: Surrogate loss :rtype: torch.Tensor """ _, new_log_prob, _ = policy.act({"states": states, "taken_actions": actions}, role="policy") return (advantages * torch.exp(new_log_prob - log_prob.detach())).mean() def conjugate_gradient(policy: Model, states: torch.Tensor, b: torch.Tensor, num_iterations: float = 10, residual_tolerance: float = 1e-10) -> torch.Tensor: """Conjugate gradient algorithm to solve Ax = b using the iterative method https://en.wikipedia.org/wiki/Conjugate_gradient_method#As_an_iterative_method :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param b: Vector b :type b: torch.Tensor :param num_iterations: Number of iterations (default: ``10``) :type num_iterations: float, optional :param residual_tolerance: Residual tolerance (default: ``1e-10``) :type residual_tolerance: float, optional :return: Conjugate vector :rtype: torch.Tensor """ x = torch.zeros_like(b) r = b.clone() p = b.clone() rr_old = torch.dot(r, r) for _ in range(num_iterations): hv = fisher_vector_product(policy, states, p, damping=self._damping) alpha = rr_old / torch.dot(p, hv) x += alpha * p r -= alpha * hv rr_new = torch.dot(r, r) if rr_new < residual_tolerance: break p = r + rr_new / rr_old * p rr_old = rr_new return x def fisher_vector_product(policy: Model, states: torch.Tensor, vector: torch.Tensor, damping: float = 0.1) -> torch.Tensor: """Compute the Fisher vector product (direct method) https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/ :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param vector: Vector :type vector: torch.Tensor :param damping: Damping (default: ``0.1``) :type damping: float, optional :return: Hessian vector product :rtype: torch.Tensor """ kl = kl_divergence(policy, policy, states) kl_gradient = torch.autograd.grad(kl, policy.parameters(), create_graph=True) flat_kl_gradient = torch.cat([gradient.view(-1) for gradient in kl_gradient]) hessian_vector_gradient = torch.autograd.grad((flat_kl_gradient * vector).sum(), policy.parameters()) flat_hessian_vector_gradient = torch.cat([gradient.contiguous().view(-1) for gradient in hessian_vector_gradient]) return flat_hessian_vector_gradient + damping * vector def kl_divergence(policy_1: Model, policy_2: Model, states: torch.Tensor) -> torch.Tensor: """Compute the KL divergence between two distributions https://en.wikipedia.org/wiki/Normal_distribution#Other_properties :param policy_1: First policy :type policy_1: Model :param policy_2: Second policy :type policy_2: Model :param states: States :type states: torch.Tensor :return: KL divergence :rtype: torch.Tensor """ mu_1 = policy_1.act({"states": states}, role="policy")[2]["mean_actions"] logstd_1 = policy_1.get_log_std(role="policy") mu_1, logstd_1 = mu_1.detach(), logstd_1.detach() mu_2 = policy_2.act({"states": states}, role="policy")[2]["mean_actions"] logstd_2 = policy_2.get_log_std(role="policy") kl = logstd_1 - logstd_2 + 0.5 * (torch.square(logstd_1.exp()) + torch.square(mu_1 - mu_2)) \ / torch.square(logstd_2.exp()) - 0.5 return torch.sum(kl, dim=-1).mean() # compute returns and advantages with torch.no_grad(): self.value.train(False) last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample all from memory sampled_states, sampled_actions, sampled_log_prob, sampled_advantages \ = self.memory.sample_all(names=self._tensors_names_policy, mini_batches=1)[0] sampled_states = self._state_preprocessor(sampled_states, train=True) # compute policy loss gradient policy_loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) policy_loss_gradient = torch.autograd.grad(policy_loss, self.policy.parameters()) flat_policy_loss_gradient = torch.cat([gradient.view(-1) for gradient in policy_loss_gradient]) # compute the search direction using the conjugate gradient algorithm search_direction = conjugate_gradient(self.policy, sampled_states, flat_policy_loss_gradient.data, num_iterations=self._conjugate_gradient_steps) # compute step size and full step xHx = (search_direction * fisher_vector_product(self.policy, sampled_states, search_direction, self._damping)) \ .sum(0, keepdim=True) step_size = torch.sqrt(2 * self._max_kl_divergence / xHx)[0] full_step = step_size * search_direction # backtracking line search restore_policy_flag = True self.backup_policy.update_parameters(self.policy) params = parameters_to_vector(self.policy.parameters()) expected_improvement = (flat_policy_loss_gradient * full_step).sum(0, keepdim=True) for alpha in [self._step_fraction * 0.5 ** i for i in range(self._max_backtrack_steps)]: new_params = params + alpha * full_step vector_to_parameters(new_params, self.policy.parameters()) expected_improvement *= alpha kl = kl_divergence(self.backup_policy, self.policy, sampled_states) loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) if kl < self._max_kl_divergence and (loss - policy_loss) / expected_improvement > self._accept_ratio: restore_policy_flag = False break if restore_policy_flag: self.policy.update_parameters(self.backup_policy) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names_value, mini_batches=self._mini_batches) cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): # mini-batches loop for sampled_states, sampled_returns in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=not epoch) # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value") value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step (value) self.value_optimizer.zero_grad() value_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.value.parameters(), self._grad_norm_clip) self.value_optimizer.step() # update cumulative losses cumulative_value_loss += value_loss.item() # update learning rate if self._learning_rate_scheduler: self.value_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Value learning rate", self.value_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/trpo/trpo_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.utils.convert_parameters import parameters_to_vector, vector_to_parameters from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TRPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "value_learning_rate": 1e-3, # value learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "value_loss_scale": 1.0, # value loss scaling factor "damping": 0.1, # damping coefficient for computing the Hessian-vector product "max_kl_divergence": 0.01, # maximum KL divergence between old and new policy "conjugate_gradient_steps": 10, # maximum number of iterations for the conjugate gradient algorithm "max_backtrack_steps": 10, # maximum number of backtracking steps during line search "accept_ratio": 0.5, # accept ratio for the line search loss improvement "step_fraction": 1.0, # fraction of the step size for the line search "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TRPO_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Trust Region Policy Optimization (TRPO) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1502.05477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TRPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) self.backup_policy = copy.deepcopy(self.policy) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._value_loss_scale = self.cfg["value_loss_scale"] self._max_kl_divergence = self.cfg["max_kl_divergence"] self._damping = self.cfg["damping"] self._conjugate_gradient_steps = self.cfg["conjugate_gradient_steps"] self._max_backtrack_steps = self.cfg["max_backtrack_steps"] self._accept_ratio = self.cfg["accept_ratio"] self._step_fraction = self.cfg["step_fraction"] self._value_learning_rate = self.cfg["value_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: self.value_optimizer = torch.optim.Adam(self.value.parameters(), lr=self._value_learning_rate) if self._learning_rate_scheduler is not None: self.value_scheduler = self._learning_rate_scheduler(self.value_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names_policy = ["states", "actions", "terminated", "log_prob", "advantages"] self._tensors_names_value = ["states", "terminated", "returns"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages def surrogate_loss(policy: Model, states: torch.Tensor, actions: torch.Tensor, log_prob: torch.Tensor, advantages: torch.Tensor) -> torch.Tensor: """Compute the surrogate objective (policy loss) :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param actions: Actions :type actions: torch.Tensor :param log_prob: Log probability :type log_prob: torch.Tensor :param advantages: Advantages :type advantages: torch.Tensor :return: Surrogate loss :rtype: torch.Tensor """ _, new_log_prob, _ = policy.act({"states": states, "taken_actions": actions, **rnn_policy}, role="policy") return (advantages * torch.exp(new_log_prob - log_prob.detach())).mean() def conjugate_gradient(policy: Model, states: torch.Tensor, b: torch.Tensor, num_iterations: float = 10, residual_tolerance: float = 1e-10) -> torch.Tensor: """Conjugate gradient algorithm to solve Ax = b using the iterative method https://en.wikipedia.org/wiki/Conjugate_gradient_method#As_an_iterative_method :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param b: Vector b :type b: torch.Tensor :param num_iterations: Number of iterations (default: ``10``) :type num_iterations: float, optional :param residual_tolerance: Residual tolerance (default: ``1e-10``) :type residual_tolerance: float, optional :return: Conjugate vector :rtype: torch.Tensor """ x = torch.zeros_like(b) r = b.clone() p = b.clone() rr_old = torch.dot(r, r) for _ in range(num_iterations): hv = fisher_vector_product(policy, states, p, damping=self._damping) alpha = rr_old / torch.dot(p, hv) x += alpha * p r -= alpha * hv rr_new = torch.dot(r, r) if rr_new < residual_tolerance: break p = r + rr_new / rr_old * p rr_old = rr_new return x def fisher_vector_product(policy: Model, states: torch.Tensor, vector: torch.Tensor, damping: float = 0.1) -> torch.Tensor: """Compute the Fisher vector product (direct method) https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/ :param policy: Policy :type policy: Model :param states: States :type states: torch.Tensor :param vector: Vector :type vector: torch.Tensor :param damping: Damping (default: ``0.1``) :type damping: float, optional :return: Hessian vector product :rtype: torch.Tensor """ kl = kl_divergence(policy, policy, states) kl_gradient = torch.autograd.grad(kl, policy.parameters(), create_graph=True) flat_kl_gradient = torch.cat([gradient.view(-1) for gradient in kl_gradient]) hessian_vector_gradient = torch.autograd.grad((flat_kl_gradient * vector).sum(), policy.parameters()) flat_hessian_vector_gradient = torch.cat([gradient.contiguous().view(-1) for gradient in hessian_vector_gradient]) return flat_hessian_vector_gradient + damping * vector def kl_divergence(policy_1: Model, policy_2: Model, states: torch.Tensor) -> torch.Tensor: """Compute the KL divergence between two distributions https://en.wikipedia.org/wiki/Normal_distribution#Other_properties :param policy_1: First policy :type policy_1: Model :param policy_2: Second policy :type policy_2: Model :param states: States :type states: torch.Tensor :return: KL divergence :rtype: torch.Tensor """ mu_1 = policy_1.act({"states": states, **rnn_policy}, role="policy")[2]["mean_actions"] logstd_1 = policy_1.get_log_std(role="policy") mu_1, logstd_1 = mu_1.detach(), logstd_1.detach() with torch.backends.cudnn.flags(enabled=not self._rnn): mu_2 = policy_2.act({"states": states, **rnn_policy}, role="policy")[2]["mean_actions"] logstd_2 = policy_2.get_log_std(role="policy") kl = logstd_1 - logstd_2 + 0.5 * (torch.square(logstd_1.exp()) + torch.square(mu_1 - mu_2)) \ / torch.square(logstd_2.exp()) - 0.5 return torch.sum(kl, dim=-1).mean() # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample all from memory sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_advantages \ = self.memory.sample_all(names=self._tensors_names_policy, mini_batches=1, sequence_length=self._rnn_sequence_length)[0] sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=1, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches], "terminated": sampled_dones} else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches, self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=True) # compute policy loss gradient policy_loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) policy_loss_gradient = torch.autograd.grad(policy_loss, self.policy.parameters()) flat_policy_loss_gradient = torch.cat([gradient.view(-1) for gradient in policy_loss_gradient]) # compute the search direction using the conjugate gradient algorithm search_direction = conjugate_gradient(self.policy, sampled_states, flat_policy_loss_gradient.data, num_iterations=self._conjugate_gradient_steps) # compute step size and full step xHx = (search_direction * fisher_vector_product(self.policy, sampled_states, search_direction, self._damping)) \ .sum(0, keepdim=True) step_size = torch.sqrt(2 * self._max_kl_divergence / xHx)[0] full_step = step_size * search_direction # backtracking line search restore_policy_flag = True self.backup_policy.update_parameters(self.policy) params = parameters_to_vector(self.policy.parameters()) expected_improvement = (flat_policy_loss_gradient * full_step).sum(0, keepdim=True) for alpha in [self._step_fraction * 0.5 ** i for i in range(self._max_backtrack_steps)]: new_params = params + alpha * full_step vector_to_parameters(new_params, self.policy.parameters()) expected_improvement *= alpha kl = kl_divergence(self.backup_policy, self.policy, sampled_states) loss = surrogate_loss(self.policy, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages) if kl < self._max_kl_divergence and (loss - policy_loss) / expected_improvement > self._accept_ratio: restore_policy_flag = False break if restore_policy_flag: self.policy.update_parameters(self.backup_policy) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names_value, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_value = {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): # mini-batches loop for i, (sampled_states, sampled_dones, sampled_returns) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_value = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} else: rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=not epoch) # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, **rnn_value}, role="value") value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step (value) self.value_optimizer.zero_grad() value_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.value.parameters(), self._grad_norm_clip) self.value_optimizer.step() # update cumulative losses cumulative_value_loss += value_loss.item() # update learning rate if self._learning_rate_scheduler: self.value_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Value learning rate", self.value_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/q_learning/__init__.py
from skrl.agents.torch.q_learning.q_learning import Q_LEARNING, Q_LEARNING_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/torch/q_learning/q_learning.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] Q_LEARNING_DEFAULT_CONFIG = { "discount_factor": 0.99, # discount factor (gamma) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "learning_rate": 0.5, # learning rate (alpha) "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class Q_LEARNING(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Q-learning https://www.academia.edu/3294050/Learning_from_delayed_rewards :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(Q_LEARNING_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration self._discount_factor = self.cfg["discount_factor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._learning_rate = self.cfg["learning_rate"] self._rewards_shaper = self.cfg["rewards_shaper"] # create temporary variables needed for storage and computation self._current_states = None self._current_actions = None self._current_rewards = None self._current_next_states = None self._current_dones = None def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample actions from policy return self.policy.act({"states": states}, role="policy") def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self._current_states = states self._current_actions = actions self._current_rewards = rewards self._current_next_states = next_states self._current_dones = terminated + truncated if self.memory is not None: self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ q_table = self.policy.table() env_ids = torch.arange(self._current_rewards.shape[0]).view(-1, 1) # compute next actions next_actions = torch.argmax(q_table[env_ids, self._current_next_states], dim=-1, keepdim=True).view(-1,1) # update Q-table q_table[env_ids, self._current_states, self._current_actions] += self._learning_rate \ * (self._current_rewards + self._discount_factor * self._current_dones.logical_not() \ * q_table[env_ids, self._current_next_states, next_actions] \ - q_table[env_ids, self._current_states, self._current_actions])
Toni-SM/skrl/skrl/agents/torch/cem/cem.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn.functional as F from skrl import logger from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] CEM_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "percentile": 0.70, # percentile to compute the reward bound [0, 1] "discount_factor": 0.99, # discount factor (gamma) "learning_rate": 1e-2, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class CEM(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Cross-Entropy Method (CEM) https://ieeexplore.ieee.org/abstract/document/6796865/ :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(CEM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration: self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._percentile = self.cfg["percentile"] self._discount_factor = self.cfg["discount_factor"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._episode_tracking = [] # set up optimizer and learning rate scheduler if self.policy is not None: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.int64) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ states = self._state_preprocessor(states) # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions return self.policy.act({"states": states}, role="policy") def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) if self.memory is not None: self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) # track episodes internally if self._rollout: indexes = torch.nonzero(terminated + truncated) if indexes.numel(): for i in indexes[:, 0]: self._episode_tracking[i.item()].append(self._rollout + 1) else: self._episode_tracking = [[0] for _ in range(rewards.size(-1))] def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self._rollout = 0 self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample all memory sampled_states, sampled_actions, sampled_rewards, _, _ = self.memory.sample_all(names=self.tensors_names)[0] sampled_states = self._state_preprocessor(sampled_states, train=True) with torch.no_grad(): # compute discounted return threshold limits = [] returns = [] for e in range(sampled_rewards.size(-1)): for i, j in zip(self._episode_tracking[e][:-1], self._episode_tracking[e][1:]): limits.append([e + i, e + j]) rewards = sampled_rewards[e + i: e + j] returns.append(torch.sum(rewards * self._discount_factor ** \ torch.arange(rewards.size(0), device=rewards.device).flip(-1).view(rewards.size()))) if not len(returns): logger.warning("No returns to update. Consider increasing the number of rollouts") return returns = torch.tensor(returns) return_threshold = torch.quantile(returns, self._percentile, dim=-1) # get elite states and actions indexes = torch.nonzero(returns >= return_threshold) elite_states = torch.cat([sampled_states[limits[i][0]:limits[i][1]] for i in indexes[:, 0]], dim=0) elite_actions = torch.cat([sampled_actions[limits[i][0]:limits[i][1]] for i in indexes[:, 0]], dim=0) # compute scores for the elite states _, _, outputs = self.policy.act({"states": elite_states}, role="policy") scores = outputs["net_output"] # compute policy loss policy_loss = F.cross_entropy(scores, elite_actions.view(-1)) # optimization step self.optimizer.zero_grad() policy_loss.backward() self.optimizer.step() # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Coefficient / Return threshold", return_threshold.item()) self.track_data("Coefficient / Mean discounted returns", torch.mean(returns).item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/cem/__init__.py
from skrl.agents.torch.cem.cem import CEM, CEM_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/torch/sac/__init__.py
from skrl.agents.torch.sac.sac import SAC, SAC_DEFAULT_CONFIG from skrl.agents.torch.sac.sac_rnn import SAC_RNN
Toni-SM/skrl/skrl/agents/torch/sac/sac.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] SAC_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "learn_entropy": True, # learn entropy "entropy_learning_rate": 1e-3, # entropy learning rate "initial_entropy_value": 0.2, # initial entropy value "target_entropy": None, # target entropy "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "base_directory": "", # base directory for the experiment "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class SAC(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Soft Actor-Critic (SAC) https://arxiv.org/abs/1801.01290 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(SAC_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_learning_rate = self.cfg["entropy_learning_rate"] self._learn_entropy = self.cfg["learn_entropy"] self._entropy_coefficient = self.cfg["initial_entropy_value"] self._rewards_shaper = self.cfg["rewards_shaper"] # entropy if self._learn_entropy: self._target_entropy = self.cfg["target_entropy"] if self._target_entropy is None: if issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): self._target_entropy = -np.prod(self.action_space.shape).astype(np.float32) elif issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): self._target_entropy = -self.action_space.n else: self._target_entropy = 0 self.log_entropy_coefficient = torch.log(torch.ones(1, device=self.device) * self._entropy_coefficient).requires_grad_(True) self.entropy_optimizer = torch.optim.Adam([self.log_entropy_coefficient], lr=self._entropy_learning_rate) self.checkpoint_modules["entropy_optimizer"] = self.entropy_optimizer # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, next_log_prob, _ = self.policy.act({"states": sampled_next_states}, role="policy") target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) - self._entropy_coefficient * next_log_prob target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_2") critic_loss = (F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values)) / 2 # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, log_prob, _ = self.policy.act({"states": sampled_states}, role="policy") critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": actions}, role="critic_2") policy_loss = (self._entropy_coefficient * log_prob - torch.min(critic_1_values, critic_2_values)).mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # entropy learning if self._learn_entropy: # compute entropy loss entropy_loss = -(self.log_entropy_coefficient * (log_prob + self._target_entropy).detach()).mean() # optimization step (entropy) self.entropy_optimizer.zero_grad() entropy_loss.backward() self.entropy_optimizer.step() # compute entropy coefficient self._entropy_coefficient = torch.exp(self.log_entropy_coefficient.detach()) # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if self.write_interval > 0: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learn_entropy: self.track_data("Loss / Entropy loss", entropy_loss.item()) self.track_data("Coefficient / Entropy coefficient", self._entropy_coefficient.item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/sac/sac_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] SAC_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "learn_entropy": True, # learn entropy "entropy_learning_rate": 1e-3, # entropy learning rate "initial_entropy_value": 0.2, # initial entropy value "target_entropy": None, # target entropy "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "base_directory": "", # base directory for the experiment "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class SAC_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Soft Actor-Critic (SAC) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1801.01290 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(SAC_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_learning_rate = self.cfg["entropy_learning_rate"] self._learn_entropy = self.cfg["learn_entropy"] self._entropy_coefficient = self.cfg["initial_entropy_value"] self._rewards_shaper = self.cfg["rewards_shaper"] # entropy if self._learn_entropy: self._target_entropy = self.cfg["target_entropy"] if self._target_entropy is None: if issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): self._target_entropy = -np.prod(self.action_space.shape).astype(np.float32) elif issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): self._target_entropy = -self.action_space.n else: self._target_entropy = 0 self.log_entropy_coefficient = torch.log(torch.ones(1, device=self.device) * self._entropy_coefficient).requires_grad_(True) self.entropy_optimizer = torch.optim.Adam([self.log_entropy_coefficient], lr=self._entropy_learning_rate) self.checkpoint_modules["entropy_optimizer"] = self.entropy_optimizer # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": []} self._rnn_initial_states = {"policy": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) # update RNN states if self._rnn: # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: sampled_rnn = self.memory.sample_by_index(names=self._rnn_tensors_names, indexes=self.memory.get_sampling_indexes())[0] rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn], "terminated": sampled_dones} # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, next_log_prob, _ = self.policy.act({"states": sampled_next_states, **rnn_policy}, role="policy") target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) - self._entropy_coefficient * next_log_prob target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_2") critic_loss = (F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values)) / 2 # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, log_prob, _ = self.policy.act({"states": sampled_states, **rnn_policy}, role="policy") critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic_2") policy_loss = (self._entropy_coefficient * log_prob - torch.min(critic_1_values, critic_2_values)).mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # entropy learning if self._learn_entropy: # compute entropy loss entropy_loss = -(self.log_entropy_coefficient * (log_prob + self._target_entropy).detach()).mean() # optimization step (entropy) self.entropy_optimizer.zero_grad() entropy_loss.backward() self.entropy_optimizer.step() # compute entropy coefficient self._entropy_coefficient = torch.exp(self.log_entropy_coefficient.detach()) # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if self.write_interval > 0: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learn_entropy: self.track_data("Loss / Entropy loss", entropy_loss.item()) self.track_data("Coefficient / Entropy coefficient", self._entropy_coefficient.item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/td3/__init__.py
from skrl.agents.torch.td3.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.agents.torch.td3.td3_rnn import TD3_RNN
Toni-SM/skrl/skrl/agents/torch/td3/td3.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl import logger from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TD3_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "policy_delay": 2, # policy delay update with respect to critic update "smooth_regularization_noise": None, # smooth noise for regularization "smooth_regularization_clip": 0.5, # clip for smooth regularization "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TD3(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Twin Delayed DDPG (TD3) https://arxiv.org/abs/1802.09477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TD3_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._policy_delay = self.cfg["policy_delay"] self._critic_update_counter = 0 self._smooth_regularization_noise = self.cfg["smooth_regularization_noise"] self._smooth_regularization_clip = self.cfg["smooth_regularization_clip"] if self._smooth_regularization_noise is None: logger.warning("agents:TD3: No smooth regularization noise specified to reduce variance during training") self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) with torch.no_grad(): # target policy smoothing next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy") if self._smooth_regularization_noise is not None: noises = torch.clamp(self._smooth_regularization_noise.sample(next_actions.shape), min=-self._smooth_regularization_clip, max=self._smooth_regularization_clip) next_actions.add_(noises) next_actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # compute target values target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic_2") critic_loss = F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # delayed update self._critic_update_counter += 1 if not self._critic_update_counter % self._policy_delay: # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states}, role="policy") critic_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions}, role="critic_1") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) self.target_policy.update_parameters(self.policy, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if not self._critic_update_counter % self._policy_delay: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/td3/td3_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl import logger from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] TD3_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "policy_delay": 2, # policy delay update with respect to critic update "smooth_regularization_noise": None, # smooth noise for regularization "smooth_regularization_clip": 0.5, # clip for smooth regularization "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class TD3_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Twin Delayed DDPG (TD3) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1802.09477 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(TD3_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._policy_delay = self.cfg["policy_delay"] self._critic_update_counter = 0 self._smooth_regularization_noise = self.cfg["smooth_regularization_noise"] self._smooth_regularization_clip = self.cfg["smooth_regularization_clip"] if self._smooth_regularization_noise is None: logger.warning("agents:TD3: No smooth regularization noise specified to reduce variance during training") self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": []} self._rnn_initial_states = {"policy": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) # update RNN states if self._rnn: # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: sampled_rnn = self.memory.sample_by_index(names=self._rnn_tensors_names, indexes=self.memory.get_sampling_indexes())[0] rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn], "terminated": sampled_dones} # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) with torch.no_grad(): # target policy smoothing next_actions, _, _ = self.target_policy.act({"states": sampled_next_states, **rnn_policy}, role="target_policy") if self._smooth_regularization_noise is not None: noises = torch.clamp(self._smooth_regularization_noise.sample(next_actions.shape), min=-self._smooth_regularization_clip, max=self._smooth_regularization_clip) next_actions.add_(noises) next_actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # compute target values target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic_2") target_q_values = torch.min(target_q1_values, target_q2_values) target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_1_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_1") critic_2_values, _, _ = self.critic_2.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic_2") critic_loss = F.mse_loss(critic_1_values, target_values) + F.mse_loss(critic_2_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.critic_1.parameters(), self.critic_2.parameters()), self._grad_norm_clip) self.critic_optimizer.step() # delayed update self._critic_update_counter += 1 if not self._critic_update_counter % self._policy_delay: # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states, **rnn_policy}, role="policy") critic_values, _, _ = self.critic_1.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic_1") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) self.target_policy.update_parameters(self.policy, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data if not self._critic_update_counter % self._policy_delay: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_1_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_1_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_1_values).item()) self.track_data("Q-network / Q2 (max)", torch.max(critic_2_values).item()) self.track_data("Q-network / Q2 (min)", torch.min(critic_2_values).item()) self.track_data("Q-network / Q2 (mean)", torch.mean(critic_2_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/ddpg/ddpg.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DDPG_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DDPG(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Deep Deterministic Policy Gradient (DDPG) https://arxiv.org/abs/1509.02971 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DDPG_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic = self.models.get("critic", None) self.target_critic = self.models.get("target_critic", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic"] = self.critic self.checkpoint_modules["target_critic"] = self.target_critic if self.target_policy is not None and self.target_critic is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic.update_parameters(self.critic, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy") target_q_values, _, _ = self.target_critic.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic") target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": sampled_actions}, role="critic") critic_loss = F.mse_loss(critic_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.critic.parameters(), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states}, role="policy") critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": actions}, role="critic") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_policy.update_parameters(self.policy, polyak=self._polyak) self.target_critic.update_parameters(self.critic, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/ddpg/__init__.py
from skrl.agents.torch.ddpg.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.agents.torch.ddpg.ddpg_rnn import DDPG_RNN
Toni-SM/skrl/skrl/agents/torch/ddpg/ddpg_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DDPG_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DDPG_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Deep Deterministic Policy Gradient (DDPG) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1509.02971 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DDPG_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic = self.models.get("critic", None) self.target_critic = self.models.get("target_critic", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic"] = self.critic self.checkpoint_modules["target_critic"] = self.target_critic if self.target_policy is not None and self.target_critic is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic.update_parameters(self.critic, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic is not None: self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._actor_learning_rate) self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self._critic_learning_rate) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": []} self._rnn_initial_states = {"policy": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # clip noise bounds if self.action_space is not None: self.clip_actions_min = torch.tensor(self.action_space.low, device=self.device) self.clip_actions_max = torch.tensor(self.action_space.high, device=self.device) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale noises.mul_(scale) # modify actions actions.add_(noises) actions.clamp_(min=self.clip_actions_min, max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", torch.max(noises).item()) self.track_data("Exploration / Exploration noise (min)", torch.min(noises).item()) self.track_data("Exploration / Exploration noise (mean)", torch.mean(noises).item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, **rnn_states) # update RNN states if self._rnn: # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size, sequence_length=self._rnn_sequence_length)[0] rnn_policy = {} if self._rnn: sampled_rnn = self.memory.sample_by_index(names=self._rnn_tensors_names, indexes=self.memory.get_sampling_indexes())[0] rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn], "terminated": sampled_dones} # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_actions, _, _ = self.target_policy.act({"states": sampled_next_states, **rnn_policy}, role="target_policy") target_q_values, _, _ = self.target_critic.act({"states": sampled_next_states, "taken_actions": next_actions, **rnn_policy}, role="target_critic") target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute critic loss critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="critic") critic_loss = F.mse_loss(critic_values, target_values) # optimization step (critic) self.critic_optimizer.zero_grad() critic_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.critic.parameters(), self._grad_norm_clip) self.critic_optimizer.step() # compute policy (actor) loss actions, _, _ = self.policy.act({"states": sampled_states, **rnn_policy}, role="policy") critic_values, _, _ = self.critic.act({"states": sampled_states, "taken_actions": actions, **rnn_policy}, role="critic") policy_loss = -critic_values.mean() # optimization step (policy) self.policy_optimizer.zero_grad() policy_loss.backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) self.policy_optimizer.step() # update target networks self.target_policy.update_parameters(self.policy, polyak=self._polyak) self.target_critic.update_parameters(self.critic, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", torch.max(critic_values).item()) self.track_data("Q-network / Q1 (min)", torch.min(critic_values).item()) self.track_data("Q-network / Q1 (mean)", torch.mean(critic_values).item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])