file_path
stringlengths
21
224
content
stringlengths
0
80.8M
Toni-SM/skrl/skrl/utils/isaacgym_utils.py
from typing import List, Optional import logging import math import threading import numpy as np import torch try: import flask except ImportError: flask = None try: import imageio import isaacgym import isaacgym.torch_utils as torch_utils from isaacgym import gymapi except ImportError: imageio = None isaacgym = None torch_utils = None gymapi = None class WebViewer: def __init__(self, host: str = "127.0.0.1", port: int = 5000) -> None: """ Web viewer for Isaac Gym :param host: Host address (default: "127.0.0.1") :type host: str :param port: Port number (default: 5000) :type port: int """ self._app = flask.Flask(__name__) self._app.add_url_rule("/", view_func=self._route_index) self._app.add_url_rule("/_route_stream", view_func=self._route_stream) self._app.add_url_rule("/_route_input_event", view_func=self._route_input_event, methods=["POST"]) self._log = logging.getLogger('werkzeug') self._log.disabled = True self._app.logger.disabled = True self._image = None self._camera_id = 0 self._camera_type = gymapi.IMAGE_COLOR self._notified = False self._wait_for_page = True self._pause_stream = False self._event_load = threading.Event() self._event_stream = threading.Event() # start server self._thread = threading.Thread(target=lambda: \ self._app.run(host=host, port=port, debug=False, use_reloader=False), daemon=True) self._thread.start() print(f"\nStarting web viewer on http://{host}:{port}/\n") def _route_index(self) -> 'flask.Response': """Render the web page :return: Flask response :rtype: flask.Response """ template = """<!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <style> html, body { width: 100%; height: 100%; margin: 0; overflow: hidden; display: block; background-color: #000; } </style> </head> <body> <div> <canvas id="canvas" tabindex='1'></canvas> </div> <script> var canvas, context, image; function sendInputRequest(data){ let xmlRequest = new XMLHttpRequest(); xmlRequest.open("POST", "{{ url_for('_route_input_event') }}", true); xmlRequest.setRequestHeader("Content-Type", "application/json"); xmlRequest.send(JSON.stringify(data)); } window.onload = function(){ canvas = document.getElementById("canvas"); context = canvas.getContext('2d'); image = new Image(); image.src = "{{ url_for('_route_stream') }}"; canvas.width = window.innerWidth; canvas.height = window.innerHeight; window.addEventListener('resize', function(){ canvas.width = window.innerWidth; canvas.height = window.innerHeight; }, false); window.setInterval(function(){ let ratio = image.naturalWidth / image.naturalHeight; context.drawImage(image, 0, 0, canvas.width, canvas.width / ratio); }, 50); canvas.addEventListener('keydown', function(event){ if(event.keyCode != 18) sendInputRequest({key: event.keyCode}); }, false); canvas.addEventListener('mousemove', function(event){ if(event.buttons){ let data = {dx: event.movementX, dy: event.movementY}; if(event.altKey && event.buttons == 1){ data.key = 18; data.mouse = "left"; } else if(event.buttons == 2) data.mouse = "right"; else if(event.buttons == 4) data.mouse = "middle"; else return; sendInputRequest(data); } }, false); canvas.addEventListener('wheel', function(event){ sendInputRequest({mouse: "wheel", dz: Math.sign(event.deltaY)}); }, false); } </script> </body> </html> """ self._event_load.set() return flask.render_template_string(template) def _route_stream(self) -> 'flask.Response': """Stream the image to the web page :return: Flask response :rtype: flask.Response """ return flask.Response(self._stream(), mimetype='multipart/x-mixed-replace; boundary=frame') def _route_input_event(self) -> 'flask.Response': """Handle keyboard and mouse input :return: Flask response :rtype: flask.Response """ def q_mult(q1, q2): return [q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3], q1[0] * q2[1] + q1[1] * q2[0] + q1[2] * q2[3] - q1[3] * q2[2], q1[0] * q2[2] + q1[2] * q2[0] + q1[3] * q2[1] - q1[1] * q2[3], q1[0] * q2[3] + q1[3] * q2[0] + q1[1] * q2[2] - q1[2] * q2[1]] def q_conj(q): return [q[0], -q[1], -q[2], -q[3]] def qv_mult(q, v): q2 = [0] + v return q_mult(q_mult(q, q2), q_conj(q))[1:] def q_from_angle_axis(angle, axis): s = math.sin(angle / 2.0) return [math.cos(angle / 2.0), axis[0] * s, axis[1] * s, axis[2] * s] def p_target(p, q, a=0, b=0, c=1, d=0): v = qv_mult(q, [1, 0, 0]) p1 = [c0 + c1 for c0, c1 in zip(p, v)] denominator = a * (p1[0] - p[0]) + b * (p1[1] - p[1]) + c * (p1[2] - p[2]) if denominator: t = -(a * p[0] + b * p[1] + c * p[2] + d) / denominator return [p[0] + t * (p1[0] - p[0]), p[1] + t * (p1[1] - p[1]), p[2] + t * (p1[2] - p[2])] return v # get keyboard and mouse inputs data = flask.request.get_json() key, mouse = data.get("key", None), data.get("mouse", None) dx, dy, dz = data.get("dx", None), data.get("dy", None), data.get("dz", None) transform = self._gym.get_camera_transform(self._sim, self._envs[self._camera_id], self._cameras[self._camera_id]) # zoom in/out if mouse == "wheel": # compute zoom vector vector = qv_mult([transform.r.w, transform.r.x, transform.r.y, transform.r.z], [-0.025 * dz, 0, 0]) # update transform transform.p.x += vector[0] transform.p.y += vector[1] transform.p.z += vector[2] # orbit camera elif mouse == "left": # convert mouse movement to angle dx *= 0.1 * math.pi / 180 dy *= 0.1 * math.pi / 180 # compute rotation (Z-up) q = q_from_angle_axis(dx, [0, 0, -1]) q = q_mult(q, q_from_angle_axis(dy, [1, 0, 0])) # apply rotation t = p_target([transform.p.x, transform.p.y, transform.p.z], [transform.r.w, transform.r.x, transform.r.y, transform.r.z]) p = qv_mult(q, [transform.p.x - t[0], transform.p.y - t[1], transform.p.z - t[2]]) q = q_mult(q, [transform.r.w, transform.r.x, transform.r.y, transform.r.z]) # update transform transform.p.x = p[0] + t[0] transform.p.y = p[1] + t[1] transform.p.z = p[2] + t[2] transform.r.w, transform.r.x, transform.r.y, transform.r.z = q # pan camera elif mouse == "right": # convert mouse movement to angle dx *= 0.1 * math.pi / 180 dy *= 0.1 * math.pi / 180 # compute rotation (Z-up) q = q_from_angle_axis(dx, [0, 0, -1]) q = q_mult(q, q_from_angle_axis(dy, [1, 0, 0])) # apply rotation q = q_mult(q, [transform.r.w, transform.r.x, transform.r.y, transform.r.z]) # update transform transform.r.w, transform.r.x, transform.r.y, transform.r.z = q # walk camera elif mouse == "middle": # compute displacement vector = qv_mult([transform.r.w, transform.r.x, transform.r.y, transform.r.z], [0, 0.001 * dx, 0.001 * dy]) # update transform transform.p.x += vector[0] transform.p.y += vector[1] transform.p.z += vector[2] # pause stream (V: 86) elif key == 86: self._pause_stream = not self._pause_stream return flask.Response(status=200) # change image type (T: 84) elif key == 84: if self._camera_type == gymapi.IMAGE_COLOR: self._camera_type = gymapi.IMAGE_DEPTH elif self._camera_type == gymapi.IMAGE_DEPTH: self._camera_type = gymapi.IMAGE_COLOR return flask.Response(status=200) else: return flask.Response(status=200) self._gym.set_camera_transform(self._cameras[self._camera_id], self._envs[self._camera_id], transform) return flask.Response(status=200) def _stream(self) -> bytes: """Format the image to be streamed :return: Image encoded as Content-Type :rtype: bytes """ while True: self._event_stream.wait() # prepare image image = imageio.imwrite("<bytes>", self._image, format="JPEG") # stream image yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n') self._event_stream.clear() self._notified = False def setup(self, gym: 'isaacgym.gymapi.Gym', sim: 'isaacgym.gymapi.Sim', envs: List[int], cameras: List[int]) -> None: """Setup the web viewer :param gym: The gym :type gym: isaacgym.gymapi.Gym :param sim: Simulation handle :type sim: isaacgym.gymapi.Sim :param envs: Environment handles :type envs: list of ints :param cameras: Camera handles :type cameras: list of ints """ self._gym = gym self._sim = sim self._envs = envs self._cameras = cameras def render(self, fetch_results: bool = True, step_graphics: bool = True, render_all_camera_sensors: bool = True, wait_for_page_load: bool = True) -> None: """Render and get the image from the current camera This function must be called after the simulation is stepped (post_physics_step). The following Isaac Gym functions are called before get the image. Their calling can be skipped by setting the corresponding argument to False - fetch_results - step_graphics - render_all_camera_sensors :param fetch_results: Call Gym.fetch_results method (default: True) :type fetch_results: bool :param step_graphics: Call Gym.step_graphics method (default: True) :type step_graphics: bool :param render_all_camera_sensors: Call Gym.render_all_camera_sensors method (default: True) :type render_all_camera_sensors: bool :param wait_for_page_load: Wait for the page to load (default: True) :type wait_for_page_load: bool """ # wait for page to load if self._wait_for_page: if wait_for_page_load: if not self._event_load.is_set(): print("Waiting for web page to begin loading...") self._event_load.wait() self._event_load.clear() self._wait_for_page = False # pause stream if self._pause_stream: return if self._notified: return # isaac gym API if fetch_results: self._gym.fetch_results(self._sim, True) if step_graphics: self._gym.step_graphics(self._sim) if render_all_camera_sensors: self._gym.render_all_camera_sensors(self._sim) # get image image = self._gym.get_camera_image(self._sim, self._envs[self._camera_id], self._cameras[self._camera_id], self._camera_type) if self._camera_type == gymapi.IMAGE_COLOR: self._image = image.reshape(image.shape[0], -1, 4)[..., :3] elif self._camera_type == gymapi.IMAGE_DEPTH: self._image = -image.reshape(image.shape[0], -1) minimum = 0 if np.isinf(np.min(self._image)) else np.min(self._image) maximum = 5 if np.isinf(np.max(self._image)) else np.max(self._image) self._image = np.clip(1 - (self._image - minimum) / (maximum - minimum), 0, 1) self._image = np.uint8(255 * self._image) else: raise ValueError("Unsupported camera type") # notify stream thread self._event_stream.set() self._notified = True def ik(jacobian_end_effector: torch.Tensor, current_position: torch.Tensor, current_orientation: torch.Tensor, goal_position: torch.Tensor, goal_orientation: Optional[torch.Tensor] = None, damping_factor: float = 0.05, squeeze_output: bool = True) -> torch.Tensor: """ Inverse kinematics using damped least squares method :param jacobian_end_effector: End effector's jacobian :type jacobian_end_effector: torch.Tensor :param current_position: End effector's current position :type current_position: torch.Tensor :param current_orientation: End effector's current orientation :type current_orientation: torch.Tensor :param goal_position: End effector's goal position :type goal_position: torch.Tensor :param goal_orientation: End effector's goal orientation (default: None) :type goal_orientation: torch.Tensor or None :param damping_factor: Damping factor (default: 0.05) :type damping_factor: float :param squeeze_output: Squeeze output (default: True) :type squeeze_output: bool :return: Change in joint angles :rtype: torch.Tensor """ if goal_orientation is None: goal_orientation = current_orientation # compute error q = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation)) error = torch.cat([goal_position - current_position, # position error q[:, 0:3] * torch.sign(q[:, 3]).unsqueeze(-1)], # orientation error dim=-1).unsqueeze(-1) # solve damped least squares (dO = J.T * V) transpose = torch.transpose(jacobian_end_effector, 1, 2) lmbda = torch.eye(6, device=jacobian_end_effector.device) * (damping_factor ** 2) if squeeze_output: return (transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error).squeeze(dim=2) else: return transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error def print_arguments(args): print("") print("Arguments") for a in args.__dict__: print(f" |-- {a}: {args.__getattribute__(a)}") def print_asset_options(asset_options: 'isaacgym.gymapi.AssetOptions', asset_name: str = ""): attrs = ["angular_damping", "armature", "collapse_fixed_joints", "convex_decomposition_from_submeshes", "default_dof_drive_mode", "density", "disable_gravity", "fix_base_link", "flip_visual_attachments", "linear_damping", "max_angular_velocity", "max_linear_velocity", "mesh_normal_mode", "min_particle_mass", "override_com", "override_inertia", "replace_cylinder_with_capsule", "tendon_limit_stiffness", "thickness", "use_mesh_materials", "use_physx_armature", "vhacd_enabled"] # vhacd_params print("\nAsset options{}".format(f" ({asset_name})" if asset_name else "")) for attr in attrs: print(" |-- {}: {}".format(attr, getattr(asset_options, attr) if hasattr(asset_options, attr) else "--")) # vhacd attributes if attr == "vhacd_enabled" and hasattr(asset_options, attr) and getattr(asset_options, attr): vhacd_attrs = ["alpha", "beta", "concavity", "convex_hull_approximation", "convex_hull_downsampling", "max_convex_hulls", "max_num_vertices_per_ch", "min_volume_per_ch", "mode", "ocl_acceleration", "pca", "plane_downsampling", "project_hull_vertices", "resolution"] print(" |-- vhacd_params:") for vhacd_attr in vhacd_attrs: print(" | |-- {}: {}".format(vhacd_attr, getattr(asset_options.vhacd_params, vhacd_attr) \ if hasattr(asset_options.vhacd_params, vhacd_attr) else "--")) def print_sim_components(gym, sim): print("") print("Sim components") print(" |-- env count:", gym.get_env_count(sim)) print(" |-- actor count:", gym.get_sim_actor_count(sim)) print(" |-- rigid body count:", gym.get_sim_rigid_body_count(sim)) print(" |-- joint count:", gym.get_sim_joint_count(sim)) print(" |-- dof count:", gym.get_sim_dof_count(sim)) print(" |-- force sensor count:", gym.get_sim_force_sensor_count(sim)) def print_env_components(gym, env): print("") print("Env components") print(" |-- actor count:", gym.get_actor_count(env)) print(" |-- rigid body count:", gym.get_env_rigid_body_count(env)) print(" |-- joint count:", gym.get_env_joint_count(env)) print(" |-- dof count:", gym.get_env_dof_count(env)) def print_actor_components(gym, env, actor): print("") print("Actor components") print(" |-- rigid body count:", gym.get_actor_rigid_body_count(env, actor)) print(" |-- joint count:", gym.get_actor_joint_count(env, actor)) print(" |-- dof count:", gym.get_actor_dof_count(env, actor)) print(" |-- actuator count:", gym.get_actor_actuator_count(env, actor)) print(" |-- rigid shape count:", gym.get_actor_rigid_shape_count(env, actor)) print(" |-- soft body count:", gym.get_actor_soft_body_count(env, actor)) print(" |-- tendon count:", gym.get_actor_tendon_count(env, actor)) def print_dof_properties(gymapi, props): print("") print("DOF properties") print(" |-- hasLimits:", props["hasLimits"]) print(" |-- lower:", props["lower"]) print(" |-- upper:", props["upper"]) print(" |-- driveMode:", props["driveMode"]) print(" | |-- {}: gymapi.DOF_MODE_NONE".format(int(gymapi.DOF_MODE_NONE))) print(" | |-- {}: gymapi.DOF_MODE_POS".format(int(gymapi.DOF_MODE_POS))) print(" | |-- {}: gymapi.DOF_MODE_VEL".format(int(gymapi.DOF_MODE_VEL))) print(" | |-- {}: gymapi.DOF_MODE_EFFORT".format(int(gymapi.DOF_MODE_EFFORT))) print(" |-- stiffness:", props["stiffness"]) print(" |-- damping:", props["damping"]) print(" |-- velocity (max):", props["velocity"]) print(" |-- effort (max):", props["effort"]) print(" |-- friction:", props["friction"]) print(" |-- armature:", props["armature"]) def print_links_and_dofs(gym, asset): link_dict = gym.get_asset_rigid_body_dict(asset) dof_dict = gym.get_asset_dof_dict(asset) print("") print("Links") for k in link_dict: print(f" |-- {k}: {link_dict[k]}") print("DOFs") for k in dof_dict: print(f" |-- {k}: {dof_dict[k]}")
Toni-SM/skrl/skrl/utils/omniverse_isaacgym_utils.py
from typing import Mapping, Optional import queue import numpy as np import torch from skrl import logger def _np_quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) return np.stack([x, y, z, w], axis=-1).reshape(shape) def _np_quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape) def _torch_quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) return torch.stack([w, x, y, z], dim=-1).view(shape) def _torch_quat_conjugate(a): # wxyz shape = a.shape a = a.reshape(-1, 4) return torch.cat((a[:, :1], -a[:, 1:]), dim=-1).view(shape) def ik(jacobian_end_effector: torch.Tensor, current_position: torch.Tensor, current_orientation: torch.Tensor, goal_position: torch.Tensor, goal_orientation: Optional[torch.Tensor] = None, method: str = "damped least-squares", method_cfg: Mapping[str, float] = {"scale": 1, "damping": 0.05, "min_singular_value": 1e-5}, squeeze_output: bool = True,) -> torch.Tensor: """Differential inverse kinematics :param jacobian_end_effector: End effector's jacobian :type jacobian_end_effector: torch.Tensor :param current_position: End effector's current position :type current_position: torch.Tensor :param current_orientation: End effector's current orientation :type current_orientation: torch.Tensor :param goal_position: End effector's goal position :type goal_position: torch.Tensor :param goal_orientation: End effector's goal orientation (default: ``None``). If not provided, the current orientation will be used instead. :type goal_orientation: torch.Tensor, optional :param method: Differential inverse kinematics formulation (default: ``"damped least-squares"``). The supported methods are described in the following table: +----------------------------------+----------------------------------+ |IK Method |Method tag | +==================================+==================================+ |Damped least-squares |``"damped least-squares"`` | +----------------------------------+----------------------------------+ |Tanspose |``"transpose"`` | +----------------------------------+----------------------------------+ |Pseduoinverse |``"pseudoinverse"`` | +----------------------------------+----------------------------------+ |Singular-vale decomposition (SVD) |``"singular-vale decomposition"`` | +----------------------------------+----------------------------------+ :type method: str, optional :param method_cfg: Method configurations (default: ``{"scale": 1, "damping": 0.05, "min_singular_value": 1e-5}``) :type method_cfg: dict, optional :param squeeze_output: Squeeze output (default: ``True``) :type squeeze_output: bool, optional :return: Change in joint angles :rtype: torch.Tensor """ if goal_orientation is None: goal_orientation = current_orientation # torch if isinstance(jacobian_end_effector, torch.Tensor): # compute error q = _torch_quat_mul(goal_orientation, _torch_quat_conjugate(current_orientation)) error = torch.cat([goal_position - current_position, # position error q[:, 1:] * torch.sign(q[:, 0]).unsqueeze(-1)], # orientation error dim=-1).unsqueeze(-1) scale = method_cfg.get("scale", 1.0) # adaptive Singular Value Decomposition (SVD) if method == "singular-vale decomposition": min_singular_value = method_cfg.get("min_singular_value", 1e-5) U, S, Vh = torch.linalg.svd(jacobian_end_effector) # U: 6xd, S: dxd, V: d x num_dof inv_s = torch.where(S > min_singular_value, 1.0 / S, torch.zeros_like(S)) pseudoinverse = torch.transpose(Vh, 1, 2)[:, :, :6] @ torch.diag_embed(inv_s) @ torch.transpose(U, 1, 2) if squeeze_output: return (scale * pseudoinverse @ error).squeeze(dim=2) else: return scale * pseudoinverse @ error # jacobian pseudoinverse elif method == "pseudoinverse": pseudoinverse = torch.linalg.pinv(jacobian_end_effector) if squeeze_output: return (scale * pseudoinverse @ error).squeeze(dim=2) else: return scale * pseudoinverse @ error # jacobian transpose elif method == "transpose": transpose = torch.transpose(jacobian_end_effector, 1, 2) if squeeze_output: return (scale * transpose @ error).squeeze(dim=2) else: return scale * transpose @ error # damped least-squares elif method == "damped least-squares": damping = method_cfg.get("damping", 0.05) transpose = torch.transpose(jacobian_end_effector, 1, 2) lmbda = torch.eye(jacobian_end_effector.shape[1], device=jacobian_end_effector.device) * (damping ** 2) if squeeze_output: return (scale * transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error).squeeze(dim=2) else: return scale * transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error else: raise ValueError("Invalid IK method") # numpy # TODO: test and fix this else: # compute error q = _np_quat_mul(goal_orientation, _np_quat_conjugate(current_orientation)) error = np.concatenate([goal_position - current_position, # position error q[:, 0:3] * np.sign(q[:, 3])]) # orientation error # solve damped least squares (dO = J.T * V) transpose = np.transpose(jacobian_end_effector, 1, 2) lmbda = np.eye(6) * (method_cfg.get("damping", 0.05) ** 2) if squeeze_output: return (transpose @ np.linalg.inv(jacobian_end_effector @ transpose + lmbda) @ error) else: return transpose @ np.linalg.inv(jacobian_end_effector @ transpose + lmbda) @ error def get_env_instance(headless: bool = True, enable_livestream: bool = False, enable_viewport: bool = False, multi_threaded: bool = False) -> "omni.isaac.gym.vec_env.VecEnvBase": """ Instantiate a VecEnvBase-based object compatible with OmniIsaacGymEnvs :param headless: Disable UI when running (default: ``True``) :type headless: bool, optional :param enable_livestream: Whether to enable live streaming (default: ``False``) :type enable_livestream: bool, optional :param enable_viewport: Whether to enable viewport (default: ``False``) :type enable_viewport: bool, optional :param multi_threaded: Whether to return a multi-threaded environment instance (default: ``False``) :type multi_threaded: bool, optional :return: Environment instance :rtype: omni.isaac.gym.vec_env.VecEnvBase Example:: from skrl.envs.wrappers.torch import wrap_env from skrl.utils.omniverse_isaacgym_utils import get_env_instance # get environment instance env = get_env_instance(headless=True) # parse sim configuration from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig sim_config = SimConfig({"test": False, "device_id": 0, "headless": True, "multi_gpu": False, "sim_device": "gpu", "enable_livestream": False, "task": {"name": "CustomTask", "physics_engine": "physx", "env": {"numEnvs": 512, "envSpacing": 1.5, "enableDebugVis": False, "clipObservations": 1000.0, "clipActions": 1.0, "controlFrequencyInv": 4}, "sim": {"dt": 0.0083, # 1 / 120 "use_gpu_pipeline": True, "gravity": [0.0, 0.0, -9.81], "add_ground_plane": True, "use_flatcache": True, "enable_scene_query_support": False, "enable_cameras": False, "default_physics_material": {"static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0}, "physx": {"worker_thread_count": 4, "solver_type": 1, "use_gpu": True, "solver_position_iteration_count": 4, "solver_velocity_iteration_count": 1, "contact_offset": 0.005, "rest_offset": 0.0, "bounce_threshold_velocity": 0.2, "friction_offset_threshold": 0.04, "friction_correlation_distance": 0.025, "enable_sleeping": True, "enable_stabilization": True, "max_depenetration_velocity": 1000.0, "gpu_max_rigid_contact_count": 524288, "gpu_max_rigid_patch_count": 33554432, "gpu_found_lost_pairs_capacity": 524288, "gpu_found_lost_aggregate_pairs_capacity": 262144, "gpu_total_aggregate_pairs_capacity": 1048576, "gpu_max_soft_body_contacts": 1048576, "gpu_max_particle_contacts": 1048576, "gpu_heap_capacity": 33554432, "gpu_temp_buffer_capacity": 16777216, "gpu_max_num_partitions": 8}}}}) # import and setup custom task from custom_task import CustomTask task = CustomTask(name="CustomTask", sim_config=sim_config, env=env) env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True) # wrap the environment env = wrap_env(env, "omniverse-isaacgym") """ from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT class _OmniIsaacGymVecEnv(VecEnvBase): def step(self, actions): actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone() self._task.pre_physics_step(actions) for _ in range(self._task.control_frequency_inv): self._world.step(render=self._render) self.sim_frame_count += 1 observations, rewards, dones, info = self._task.post_physics_step() return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \ rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy() def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] class _OmniIsaacGymTrainerMT(TrainerMT): def run(self): pass def stop(self): pass class _OmniIsaacGymVecEnvMT(VecEnvMT): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.action_queue = queue.Queue(1) self.data_queue = queue.Queue(1) def run(self, trainer=None): super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer) def _parse_data(self, data): self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone() self._rewards = data["rew"].to(self._task.rl_device).clone() self._dones = data["reset"].to(self._task.rl_device).clone() self._info = data["extras"].copy() def step(self, actions): if self._stop: raise TaskStopException() actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone() self.send_actions(actions) data = self.get_data() return {"obs": self._observations}, self._rewards, self._dones, self._info def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] def close(self): # end stop signal to main thread self.send_actions(None) self.stop = True if multi_threaded: try: return _OmniIsaacGymVecEnvMT(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport) except TypeError: logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)") return _OmniIsaacGymVecEnvMT(headless=headless) # Isaac Sim 2022.2.0 and earlier else: try: return _OmniIsaacGymVecEnv(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport) except TypeError: logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)") return _OmniIsaacGymVecEnv(headless=headless) # Isaac Sim 2022.2.0 and earlier
Toni-SM/skrl/skrl/utils/model_instantiators/__init__.py
Toni-SM/skrl/skrl/utils/model_instantiators/torch/__init__.py
from typing import Any, Mapping, Optional, Sequence, Tuple, Union from enum import Enum import gym import gymnasium import torch import torch.nn as nn from skrl.models.torch import Model # noqa from skrl.models.torch import CategoricalMixin, DeterministicMixin, GaussianMixin, MultivariateGaussianMixin # noqa __all__ = ["categorical_model", "deterministic_model", "gaussian_model", "multivariate_gaussian_model", "Shape"] class Shape(Enum): """ Enum to select the shape of the model's inputs and outputs """ ONE = 1 STATES = 0 OBSERVATIONS = 0 ACTIONS = -1 STATES_ACTIONS = -2 def _get_activation_function(activation: str) -> nn.Module: """Get the activation function Supported activation functions: - "elu" - "leaky_relu" - "relu" - "selu" - "sigmoid" - "softmax" - "softplus" - "softsign" - "tanh" :param activation: activation function name. If activation is an empty string, a placeholder will be returned (``torch.nn.Identity()``) :type activation: str :raises: ValueError if activation is not a valid activation function :return: activation function :rtype: nn.Module """ if not activation: return torch.nn.Identity() elif activation == "relu": return torch.nn.ReLU() elif activation == "tanh": return torch.nn.Tanh() elif activation == "sigmoid": return torch.nn.Sigmoid() elif activation == "leaky_relu": return torch.nn.LeakyReLU() elif activation == "elu": return torch.nn.ELU() elif activation == "softplus": return torch.nn.Softplus() elif activation == "softsign": return torch.nn.Softsign() elif activation == "selu": return torch.nn.SELU() elif activation == "softmax": return torch.nn.Softmax() else: raise ValueError(f"Unknown activation function: {activation}") def _get_num_units_by_shape(model: Model, shape: Shape) -> int: """Get the number of units in a layer by shape :param model: Model to get the number of units for :type model: Model :param shape: Shape of the layer :type shape: Shape or int :return: Number of units in the layer :rtype: int """ num_units = {Shape.ONE: 1, Shape.STATES: model.num_observations, Shape.ACTIONS: model.num_actions, Shape.STATES_ACTIONS: model.num_observations + model.num_actions} try: return num_units[shape] except: return shape def _generate_sequential(model: Model, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Union[str, None] = "tanh", output_scale: Optional[int] = None) -> nn.Sequential: """Generate a sequential model :param model: model to generate sequential model for :type model: Model :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: None). If None, the output layer will not be scaled :type output_scale: int, optional :return: sequential model :rtype: nn.Sequential """ # input layer input_layer = [nn.Linear(_get_num_units_by_shape(model, input_shape), hiddens[0])] # hidden layers hidden_layers = [] for i in range(len(hiddens) - 1): hidden_layers.append(_get_activation_function(hidden_activation[i])) hidden_layers.append(nn.Linear(hiddens[i], hiddens[i + 1])) hidden_layers.append(_get_activation_function(hidden_activation[-1])) # output layer output_layer = [nn.Linear(hiddens[-1], _get_num_units_by_shape(model, output_shape))] if output_activation is not None: output_layer.append(_get_activation_function(output_activation)) return nn.Sequential(*input_layer, *hidden_layers, *output_layer) def gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, initial_log_std: float = 0, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a Gaussian model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param clip_actions: Flag to indicate whether the actions should be clipped (default: False) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation (default: -20) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation (default: 2) :type max_log_std: float, optional :param initial_log_std: Initial value for the log standard deviation (default: 0) :type initial_log_std: float, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Gaussian model instance :rtype: Model """ class GaussianModel(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions, clip_log_std, min_log_std, max_log_std, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) self.log_std_parameter = nn.Parameter(metadata["initial_log_std"] \ * torch.ones(_get_num_units_by_shape(self, metadata["output_shape"]))) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output * self.instantiator_output_scale, self.log_std_parameter, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale, "initial_log_std": initial_log_std} return GaussianModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions, clip_log_std=clip_log_std, min_log_std=min_log_std, max_log_std=max_log_std) def multivariate_gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, initial_log_std: float = 0, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a multivariate Gaussian model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param clip_actions: Flag to indicate whether the actions should be clipped (default: False) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation (default: -20) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation (default: 2) :type max_log_std: float, optional :param initial_log_std: Initial value for the log standard deviation (default: 0) :type initial_log_std: float, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Multivariate Gaussian model instance :rtype: Model """ class MultivariateGaussianModel(MultivariateGaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions, clip_log_std, min_log_std, max_log_std): Model.__init__(self, observation_space, action_space, device) MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) self.log_std_parameter = nn.Parameter(metadata["initial_log_std"] \ * torch.ones(_get_num_units_by_shape(self, metadata["output_shape"]))) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output * self.instantiator_output_scale, self.log_std_parameter, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale, "initial_log_std": initial_log_std} return MultivariateGaussianModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions, clip_log_std=clip_log_std, min_log_std=min_log_std, max_log_std=max_log_std) def deterministic_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, clip_actions: bool = False, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a deterministic model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: False) :type clip_actions: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Deterministic model instance :rtype: Model """ class DeterministicModel(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output * self.instantiator_output_scale, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale} return DeterministicModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions) def categorical_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, unnormalized_log_prob: bool = True, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = None) -> Model: """Instantiate a categorical model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: True). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: None) :type output_activation: str or None, optional :return: Categorical model instance :rtype: Model """ class CategoricalModel(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device, unnormalized_log_prob): Model.__init__(self, observation_space, action_space, device) CategoricalMixin.__init__(self, unnormalized_log_prob) self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"]) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation} return CategoricalModel(observation_space=observation_space, action_space=action_space, device=device, unnormalized_log_prob=unnormalized_log_prob) def shared_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, structure: str = "", roles: Sequence[str] = [], parameters: Sequence[Mapping[str, Any]] = []) -> Model: """Instantiate a shared model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param structure: Shared model structure (default: ``""``). Note: this parameter is ignored for the moment :type structure: str, optional :param roles: Organized list of model roles (default: ``[]``) :type roles: sequence of strings, optional :param parameters: Organized list of model instantiator parameters (default: ``[]``) :type parameters: sequence of dict, optional :return: Shared model instance :rtype: Model """ class GaussianDeterministicModel(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, roles, metadata): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions=metadata[0]["clip_actions"], clip_log_std=metadata[0]["clip_log_std"], min_log_std=metadata[0]["min_log_std"], max_log_std=metadata[0]["max_log_std"], role=roles[0]) DeterministicMixin.__init__(self, clip_actions=metadata[1]["clip_actions"], role=roles[1]) self._roles = roles self.instantiator_input_type = metadata[0]["input_shape"].value self.instantiator_output_scales = [m["output_scale"] for m in metadata] # shared layers/network self.net = _generate_sequential(model=self, input_shape=metadata[0]["input_shape"], hiddens=metadata[0]["hiddens"][:-1], hidden_activation=metadata[0]["hidden_activation"][:-1], output_shape=metadata[0]["hiddens"][-1], output_activation=metadata[0]["hidden_activation"][-1]) # separated layers ("policy") mean_layers = [nn.Linear(metadata[0]["hiddens"][-1], _get_num_units_by_shape(self, metadata[0]["output_shape"]))] if metadata[0]["output_activation"] is not None: mean_layers.append(_get_activation_function(metadata[0]["output_activation"])) self.mean_net = nn.Sequential(*mean_layers) self.log_std_parameter = nn.Parameter(metadata[0]["initial_log_std"] \ * torch.ones(_get_num_units_by_shape(self, metadata[0]["output_shape"]))) # separated layer ("value") value_layers = [nn.Linear(metadata[1]["hiddens"][-1], _get_num_units_by_shape(self, metadata[1]["output_shape"]))] if metadata[1]["output_activation"] is not None: value_layers.append(_get_activation_function(metadata[1]["output_activation"])) self.value_net = nn.Sequential(*value_layers) def act(self, inputs, role): if role == self._roles[0]: return GaussianMixin.act(self, inputs, role) elif role == self._roles[1]: return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) if role == self._roles[0]: return self.instantiator_output_scales[0] * self.mean_net(output), self.log_std_parameter, {} elif role == self._roles[1]: return self.instantiator_output_scales[1] * self.value_net(output), {} # TODO: define the model using the specified structure return GaussianDeterministicModel(observation_space=observation_space, action_space=action_space, device=device, roles=roles, metadata=parameters)
Toni-SM/skrl/skrl/utils/model_instantiators/jax/__init__.py
from typing import Any, Mapping, Optional, Sequence, Tuple, Union import sys from enum import Enum import gym import gymnasium import flax.linen as nn import jax import jax.numpy as jnp from skrl.models.jax import Model # noqa from skrl.models.jax import CategoricalMixin, DeterministicMixin, GaussianMixin # noqa __all__ = ["categorical_model", "deterministic_model", "gaussian_model", "Shape"] class Shape(Enum): """ Enum to select the shape of the model's inputs and outputs """ ONE = 1 STATES = 0 OBSERVATIONS = 0 ACTIONS = -1 STATES_ACTIONS = -2 def _get_activation_function(activation: str) -> nn.Module: """Get the activation function Supported activation functions: - "elu" - "leaky_relu" - "relu" - "selu" - "sigmoid" - "softmax" - "softplus" - "softsign" - "tanh" :param activation: activation function name. If activation is an empty string, a placeholder will be returned (``lambda x: x``) :type activation: str :raises: ValueError if activation is not a valid activation function :return: activation function :rtype: nn.Module """ if not activation: return lambda x: x elif activation == "relu": return nn.relu elif activation == "tanh": return nn.tanh elif activation == "sigmoid": return nn.sigmoid elif activation == "leaky_relu": return nn.leaky_relu elif activation == "elu": return nn.elu elif activation == "softplus": return nn.softplus elif activation == "softsign": return nn.soft_sign elif activation == "selu": return nn.selu elif activation == "softmax": return nn.softmax else: raise ValueError(f"Unknown activation function: {activation}") def _get_num_units_by_shape(model: Model, shape: Shape) -> int: """Get the number of units in a layer by shape :param model: Model to get the number of units for :type model: Model :param shape: Shape of the layer :type shape: Shape or int :return: Number of units in the layer :rtype: int """ num_units = {Shape.ONE: 1, Shape.STATES: model.num_observations, Shape.ACTIONS: model.num_actions, Shape.STATES_ACTIONS: model.num_observations + model.num_actions} try: return num_units[shape] except: return shape def _generate_sequential(model: Model, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Union[str, None] = "tanh", output_scale: Optional[int] = None) -> nn.Sequential: """Generate a sequential model :param model: model to generate sequential model for :type model: Model :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: None). If None, the output layer will not be scaled :type output_scale: int, optional :return: sequential model :rtype: nn.Sequential """ # input layer input_layer = [nn.Dense(hiddens[0])] # hidden layers hidden_layers = [] for i in range(len(hiddens) - 1): hidden_layers.append(_get_activation_function(hidden_activation[i])) hidden_layers.append(nn.Dense(hiddens[i + 1])) hidden_layers.append(_get_activation_function(hidden_activation[-1])) # output layer output_layer = [nn.Dense(_get_num_units_by_shape(model, output_shape))] if output_activation is not None: output_layer.append(_get_activation_function(output_activation)) return nn.Sequential(input_layer + hidden_layers + output_layer) def gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, initial_log_std: float = 0, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a Gaussian model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param clip_actions: Flag to indicate whether the actions should be clipped (default: False) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation (default: -20) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation (default: 2) :type max_log_std: float, optional :param initial_log_std: Initial value for the log standard deviation (default: 0) :type initial_log_std: float, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Gaussian model instance :rtype: Model """ class GaussianModel(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) # override the hash method for Python versions prior to 3.8 to avoid the following error: # TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes. if sys.version_info < (3, 8): def __hash__(self): return id(self) def setup(self): self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) self.log_std_parameter = self.param("log_std_parameter", lambda _: metadata["initial_log_std"] \ * jnp.ones(_get_num_units_by_shape(self, metadata["output_shape"]))) def __call__(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)) return output * self.instantiator_output_scale, self.log_std_parameter, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale, "initial_log_std": initial_log_std} return GaussianModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions, clip_log_std=clip_log_std, min_log_std=min_log_std, max_log_std=max_log_std) def deterministic_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, clip_actions: bool = False, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a deterministic model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: False) :type clip_actions: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Deterministic model instance :rtype: Model """ class DeterministicModel(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) # override the hash method for Python versions prior to 3.8 to avoid the following error: # TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes. if sys.version_info < (3, 8): def __hash__(self): return id(self) def setup(self): self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) def __call__(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)) return output * self.instantiator_output_scale, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale} return DeterministicModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions) def categorical_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, unnormalized_log_prob: bool = True, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = None) -> Model: """Instantiate a categorical model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: True). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: None) :type output_activation: str or None, optional :return: Categorical model instance :rtype: Model """ class CategoricalModel(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) CategoricalMixin.__init__(self, unnormalized_log_prob) # override the hash method for Python versions prior to 3.8 to avoid the following error: # TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes. if sys.version_info < (3, 8): def __hash__(self): return id(self) def setup(self): self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"]) def __call__(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)) return output, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation} return CategoricalModel(observation_space=observation_space, action_space=action_space, device=device, unnormalized_log_prob=unnormalized_log_prob)
Toni-SM/skrl/skrl/memories/__init__.py
Toni-SM/skrl/skrl/memories/torch/base.py
from typing import List, Optional, Tuple, Union import csv import datetime import functools import operator import os import gym import gymnasium import numpy as np import torch from torch.utils.data.sampler import BatchSampler class Memory: def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[Union[str, torch.device]] = None, export: bool = False, export_format: str = "pt", export_directory: str = "") -> None: """Base class representing a memory with circular buffers Buffers are torch tensors with shape (memory size, number of environments, data size). Circular buffers are implemented with two integers: a memory index and an environment index :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :raises ValueError: The export format is not supported """ self.memory_size = memory_size self.num_envs = num_envs self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) # internal variables self.filled = False self.env_index = 0 self.memory_index = 0 self.tensors = {} self.tensors_view = {} self.tensors_keep_dimensions = {} self.sampling_indexes = None self.all_sequence_indexes = np.concatenate([np.arange(i, memory_size * num_envs + i, num_envs) for i in range(num_envs)]) # exporting data self.export = export self.export_format = export_format self.export_directory = export_directory if not self.export_format in ["pt", "np", "csv"]: raise ValueError(f"Export format not supported ({self.export_format})") def __len__(self) -> int: """Compute and return the current (valid) size of the memory The valid size is calculated as the ``memory_size * num_envs`` if the memory is full (filled). Otherwise, the ``memory_index * num_envs + env_index`` is returned :return: Valid size :rtype: int """ return self.memory_size * self.num_envs if self.filled else self.memory_index * self.num_envs + self.env_index def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space], keep_dimensions: bool = False) -> Union[Tuple, int]: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, tuple or list of integers, gym.Space, or gymnasium.Space :param keep_dimensions: Whether or not to keep the space dimensions (default: ``False``) :type keep_dimensions: bool, optional :raises ValueError: If the space is not supported :return: Size of the space. If ``keep_dimensions`` is True, the space size will be a tuple :rtype: int or tuple of int """ if type(space) in [int, float]: return (int(space),) if keep_dimensions else int(space) elif type(space) in [tuple, list]: return tuple(space) if keep_dimensions else np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return (1,) if keep_dimensions else 1 elif issubclass(type(space), gym.spaces.MultiDiscrete): return space.nvec.shape[0] elif issubclass(type(space), gym.spaces.Box): return tuple(space.shape) if keep_dimensions else np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): if keep_dimensions: raise ValueError("keep_dimensions=True cannot be used with Dict spaces") return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): return (1,) if keep_dimensions else 1 elif issubclass(type(space), gymnasium.spaces.MultiDiscrete): return space.nvec.shape[0] elif issubclass(type(space), gymnasium.spaces.Box): return tuple(space.shape) if keep_dimensions else np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): if keep_dimensions: raise ValueError("keep_dimensions=True cannot be used with Dict spaces") return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) raise ValueError(f"Space type {type(space)} not supported") def share_memory(self) -> None: """Share the tensors between processes """ for tensor in self.tensors.values(): if not tensor.is_cuda: tensor.share_memory_() def get_tensor_names(self) -> Tuple[str]: """Get the name of the internal tensors in alphabetical order :return: Tensor names without internal prefix (_tensor_) :rtype: tuple of strings """ return sorted(self.tensors.keys()) def get_tensor_by_name(self, name: str, keepdim: bool = True) -> torch.Tensor: """Get a tensor by its name :param name: Name of the tensor to retrieve :type name: str :param keepdim: Keep the tensor's shape (memory size, number of environments, size) (default: ``True``) If False, the returned tensor will have a shape of (memory size * number of environments, size) :type keepdim: bool, optional :raises KeyError: The tensor does not exist :return: Tensor :rtype: torch.Tensor """ return self.tensors[name] if keepdim else self.tensors_view[name] def set_tensor_by_name(self, name: str, tensor: torch.Tensor) -> None: """Set a tensor by its name :param name: Name of the tensor to set :type name: str :param tensor: Tensor to set :type tensor: torch.Tensor :raises KeyError: The tensor does not exist """ with torch.no_grad(): self.tensors[name].copy_(tensor) def create_tensor(self, name: str, size: Union[int, Tuple[int], gym.Space, gymnasium.Space], dtype: Optional[torch.dtype] = None, keep_dimensions: bool = False) -> bool: """Create a new internal tensor in memory The tensor will have a 3-components shape (memory size, number of environments, size). The internal representation will use _tensor_<name> as the name of the class property :param name: Tensor name (the name has to follow the python PEP 8 style) :type name: str :param size: Number of elements in the last dimension (effective data size). The product of the elements will be computed for sequences or gym/gymnasium spaces :type size: int, tuple or list of integers, gym.Space, or gymnasium.Space :param dtype: Data type (torch.dtype) (default: ``None``). If None, the global default torch data type will be used :type dtype: torch.dtype or None, optional :param keep_dimensions: Whether or not to keep the dimensions defined through the size parameter (default: ``False``) :type keep_dimensions: bool, optional :raises ValueError: The tensor name exists already but the size or dtype are different :return: True if the tensor was created, otherwise False :rtype: bool """ # compute data size size = self._get_space_size(size, keep_dimensions) # check dtype and size if the tensor exists if name in self.tensors: tensor = self.tensors[name] if tensor.size(-1) != size: raise ValueError(f"Size of tensor {name} ({size}) doesn't match the existing one ({tensor.size(-1)})") if dtype is not None and tensor.dtype != dtype: raise ValueError(f"Dtype of tensor {name} ({dtype}) doesn't match the existing one ({tensor.dtype})") return False # define tensor shape tensor_shape = (self.memory_size, self.num_envs, *size) if keep_dimensions else (self.memory_size, self.num_envs, size) view_shape = (-1, *size) if keep_dimensions else (-1, size) # create tensor (_tensor_<name>) and add it to the internal storage setattr(self, f"_tensor_{name}", torch.zeros(tensor_shape, device=self.device, dtype=dtype)) # update internal variables self.tensors[name] = getattr(self, f"_tensor_{name}") self.tensors_view[name] = self.tensors[name].view(*view_shape) self.tensors_keep_dimensions[name] = keep_dimensions # fill the tensors (float tensors) with NaN for tensor in self.tensors.values(): if torch.is_floating_point(tensor): tensor.fill_(float("nan")) return True def reset(self) -> None: """Reset the memory by cleaning internal indexes and flags Old data will be retained until overwritten, but access through the available methods will not be guaranteed Default values of the internal indexes and flags - filled: False - env_index: 0 - memory_index: 0 """ self.filled = False self.env_index = 0 self.memory_index = 0 def add_samples(self, **tensors: torch.Tensor) -> None: """Record samples in memory Samples should be a tensor with 2-components shape (number of environments, data size). All tensors must be of the same shape According to the number of environments, the following classification is made: - one environment: Store a single sample (tensors with one dimension) and increment the environment index (second index) by one - number of environments less than num_envs: Store the samples and increment the environment index (second index) by the number of the environments - number of environments equals num_envs: Store the samples and increment the memory index (first index) by one :param tensors: Sampled data as key-value arguments where the keys are the names of the tensors to be modified. Non-existing tensors will be skipped :type tensors: dict :raises ValueError: No tensors were provided or the tensors have incompatible shapes """ if not tensors: raise ValueError("No samples to be recorded in memory. Pass samples as key-value arguments (where key is the tensor name)") # dimensions and shapes of the tensors (assume all tensors have the dimensions of the first tensor) tmp = tensors.get("states", tensors[next(iter(tensors))]) # ask for states first dim, shape = tmp.ndim, tmp.shape # multi environment (number of environments equals num_envs) if dim == 2 and shape[0] == self.num_envs: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index].copy_(tensor) self.memory_index += 1 # multi environment (number of environments less than num_envs) elif dim == 2 and shape[0] < self.num_envs: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index, self.env_index:self.env_index + tensor.shape[0]].copy_(tensor) self.env_index += tensor.shape[0] # single environment - multi sample (number of environments greater than num_envs (num_envs = 1)) elif dim == 2 and self.num_envs == 1: for name, tensor in tensors.items(): if name in self.tensors: num_samples = min(shape[0], self.memory_size - self.memory_index) remaining_samples = shape[0] - num_samples # copy the first n samples self.tensors[name][self.memory_index:self.memory_index + num_samples].copy_(tensor[:num_samples].unsqueeze(dim=1)) self.memory_index += num_samples # storage remaining samples if remaining_samples > 0: self.tensors[name][:remaining_samples].copy_(tensor[num_samples:].unsqueeze(dim=1)) self.memory_index = remaining_samples # single environment elif dim == 1: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index, self.env_index].copy_(tensor) self.env_index += 1 else: raise ValueError(f"Expected shape (number of environments = {self.num_envs}, data size), got {shape}") # update indexes and flags if self.env_index >= self.num_envs: self.env_index = 0 self.memory_index += 1 if self.memory_index >= self.memory_size: self.memory_index = 0 self.filled = True # export tensors to file if self.export: self.save(directory=self.export_directory, format=self.export_format) def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]: """Data sampling method to be implemented by the inheriting classes :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :raises NotImplementedError: The method has not been implemented :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of torch.Tensor list """ raise NotImplementedError("The sampling method (.sample()) is not implemented") def sample_by_index(self, names: Tuple[str], indexes: Union[tuple, np.ndarray, torch.Tensor], mini_batches: int = 1) -> List[List[torch.Tensor]]: """Sample data from memory according to their indexes :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param indexes: Indexes used for sampling :type indexes: tuple or list, numpy.ndarray or torch.Tensor :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (number of indexes, data size) :rtype: list of torch.Tensor list """ if mini_batches > 1: batches = BatchSampler(indexes, batch_size=len(indexes) // mini_batches, drop_last=True) return [[self.tensors_view[name][batch] for name in names] for batch in batches] return [[self.tensors_view[name][indexes] for name in names]] def sample_all(self, names: Tuple[str], mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]: """Sample all data from memory :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :return: Sampled data from memory. The sampled tensors will have the following shape: (memory size * number of environments, data size) :rtype: list of torch.Tensor list """ # sequential order if sequence_length > 1: if mini_batches > 1: batches = BatchSampler(self.all_sequence_indexes, batch_size=len(self.all_sequence_indexes) // mini_batches, drop_last=True) return [[self.tensors_view[name][batch] for name in names] for batch in batches] return [[self.tensors_view[name][self.all_sequence_indexes] for name in names]] # default order if mini_batches > 1: indexes = np.arange(self.memory_size * self.num_envs) batches = BatchSampler(indexes, batch_size=len(indexes) // mini_batches, drop_last=True) return [[self.tensors_view[name][batch] for name in names] for batch in batches] return [[self.tensors_view[name] for name in names]] def get_sampling_indexes(self) -> Union[tuple, np.ndarray, torch.Tensor]: """Get the last indexes used for sampling :return: Last sampling indexes :rtype: tuple or list, numpy.ndarray or torch.Tensor """ return self.sampling_indexes def save(self, directory: str = "", format: str = "pt") -> None: """Save the memory to a file Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) :param directory: Path to the folder where the memory will be saved. If not provided, the directory defined in the constructor will be used :type directory: str :param format: Format of the file where the memory will be saved (default: ``"pt"``) :type format: str, optional :raises ValueError: If the format is not supported """ if not directory: directory = self.export_directory os.makedirs(os.path.join(directory, "memories"), exist_ok=True) memory_path = os.path.join(directory, "memories", \ "{}_memory_{}.{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), hex(id(self)), format)) # torch if format == "pt": torch.save({name: self.tensors[name] for name in self.get_tensor_names()}, memory_path) # numpy elif format == "npz": np.savez(memory_path, **{name: self.tensors[name].cpu().numpy() for name in self.get_tensor_names()}) # comma-separated values elif format == "csv": # open csv writer # TODO: support keeping the dimensions with open(memory_path, "a") as file: writer = csv.writer(file) names = self.get_tensor_names() # write headers headers = [[f"{name}.{i}" for i in range(self.tensors_view[name].shape[-1])] for name in names] writer.writerow([item for sublist in headers for item in sublist]) # write rows for i in range(len(self)): writer.writerow(functools.reduce(operator.iconcat, [self.tensors_view[name][i].tolist() for name in names], [])) # unsupported format else: raise ValueError(f"Unsupported format: {format}. Available formats: pt, csv, npz") def load(self, path: str) -> None: """Load the memory from a file Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) :param path: Path to the file where the memory will be loaded :type path: str :raises ValueError: If the format is not supported """ # torch if path.endswith(".pt"): data = torch.load(path) for name in self.get_tensor_names(): setattr(self, f"_tensor_{name}", data[name]) # numpy elif path.endswith(".npz"): data = np.load(path) for name in data: setattr(self, f"_tensor_{name}", torch.tensor(data[name])) # comma-separated values elif path.endswith(".csv"): # TODO: load the memory from a csv pass # unsupported format else: raise ValueError(f"Unsupported format: {path}")
Toni-SM/skrl/skrl/memories/torch/__init__.py
from skrl.memories.torch.base import Memory # isort:skip from skrl.memories.torch.random import RandomMemory
Toni-SM/skrl/skrl/memories/torch/random.py
from typing import List, Optional, Tuple, Union import torch from skrl.memories.torch import Memory class RandomMemory(Memory): def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[Union[str, torch.device]] = None, export: bool = False, export_format: str = "pt", export_directory: str = "", replacement=True) -> None: """Random sampling memory Sample a batch from memory randomly :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :param replacement: Flag to indicate whether the sample is with or without replacement (default: ``True``). Replacement implies that a value can be selected multiple times (the batch size is always guaranteed). Sampling without replacement will return a batch of maximum memory size if the memory size is less than the requested batch size :type replacement: bool, optional :raises ValueError: The export format is not supported """ super().__init__(memory_size, num_envs, device, export, export_format, export_directory) self._replacement = replacement def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]: """Sample a batch from memory randomly :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of torch.Tensor list """ # compute valid memory sizes size = len(self) if sequence_length > 1: sequence_indexes = torch.arange(0, self.num_envs * sequence_length, self.num_envs) size -= sequence_indexes[-1].item() # generate random indexes if self._replacement: indexes = torch.randint(0, size, (batch_size,)) else: # details about the random sampling performance can be found here: # https://discuss.pytorch.org/t/torch-equivalent-of-numpy-random-choice/16146/19 indexes = torch.randperm(size, dtype=torch.long)[:batch_size] # generate sequence indexes if sequence_length > 1: indexes = (sequence_indexes.repeat(indexes.shape[0], 1) + indexes.view(-1, 1)).view(-1) self.sampling_indexes = indexes return self.sample_by_index(names=names, indexes=indexes, mini_batches=mini_batches)
Toni-SM/skrl/skrl/memories/jax/base.py
from typing import List, Mapping, Optional, Tuple, Union import csv import datetime import functools import operator import os import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _copyto(dst, src): """NumPy function <function copyto at 0x7f804ee03430> not yet implemented """ return dst.at[:].set(src) @jax.jit def _copyto_i(dst, src, i): return dst.at[i].set(src) @jax.jit def _copyto_i_j(dst, src, i, j): return dst.at[i, j].set(src) class Memory: def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[jax.Device] = None, export: bool = False, export_format: str = "pt", # TODO: set default format for jax export_directory: str = "") -> None: """Base class representing a memory with circular buffers Buffers are jax or numpy arrays with shape (memory size, number of environments, data size). Circular buffers are implemented with two integers: a memory index and an environment index :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :raises ValueError: The export format is not supported """ self._jax = config.jax.backend == "jax" self.memory_size = memory_size self.num_envs = num_envs if device is None: self.device = jax.devices()[0] else: self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0] # internal variables self.filled = False self.env_index = 0 self.memory_index = 0 self.tensors = {} self.tensors_view = {} self.tensors_keep_dimensions = {} self._views = True # whether the views are not array copies self.sampling_indexes = None self.all_sequence_indexes = np.concatenate([np.arange(i, memory_size * num_envs + i, num_envs) for i in range(num_envs)]) # exporting data self.export = export self.export_format = export_format self.export_directory = export_directory if not self.export_format in ["pt", "np", "csv"]: raise ValueError(f"Export format not supported ({self.export_format})") def __len__(self) -> int: """Compute and return the current (valid) size of the memory The valid size is calculated as the ``memory_size * num_envs`` if the memory is full (filled). Otherwise, the ``memory_index * num_envs + env_index`` is returned :return: Valid size :rtype: int """ return self.memory_size * self.num_envs if self.filled else self.memory_index * self.num_envs + self.env_index def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space], keep_dimensions: bool = False) -> Union[Tuple, int]: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, tuple or list of integers, gym.Space, or gymnasium.Space :param keep_dimensions: Whether or not to keep the space dimensions (default: ``False``) :type keep_dimensions: bool, optional :raises ValueError: If the space is not supported :return: Size of the space. If ``keep_dimensions`` is True, the space size will be a tuple :rtype: int or tuple of int """ if type(space) in [int, float]: return (int(space),) if keep_dimensions else int(space) elif type(space) in [tuple, list]: return tuple(space) if keep_dimensions else np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return (1,) if keep_dimensions else 1 elif issubclass(type(space), gym.spaces.MultiDiscrete): return space.nvec.shape[0] elif issubclass(type(space), gym.spaces.Box): return tuple(space.shape) if keep_dimensions else np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): if keep_dimensions: raise ValueError("keep_dimensions=True cannot be used with Dict spaces") return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): return (1,) if keep_dimensions else 1 elif issubclass(type(space), gymnasium.spaces.MultiDiscrete): return space.nvec.shape[0] elif issubclass(type(space), gymnasium.spaces.Box): return tuple(space.shape) if keep_dimensions else np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): if keep_dimensions: raise ValueError("keep_dimensions=True cannot be used with Dict spaces") return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) raise ValueError(f"Space type {type(space)} not supported") def _get_tensors_view(self, name): return self.tensors_view[name] if self._views else self.tensors[name].reshape(-1, self.tensors[name].shape[-1]) def share_memory(self) -> None: """Share the tensors between processes """ for tensor in self.tensors.values(): pass def get_tensor_names(self) -> Tuple[str]: """Get the name of the internal tensors in alphabetical order :return: Tensor names without internal prefix (_tensor_) :rtype: tuple of strings """ return sorted(self.tensors.keys()) def get_tensor_by_name(self, name: str, keepdim: bool = True) -> Union[np.ndarray, jax.Array]: """Get a tensor by its name :param name: Name of the tensor to retrieve :type name: str :param keepdim: Keep the tensor's shape (memory size, number of environments, size) (default: ``True``) If False, the returned tensor will have a shape of (memory size * number of environments, size) :type keepdim: bool, optional :raises KeyError: The tensor does not exist :return: Tensor :rtype: np.ndarray or jax.Array """ return self.tensors[name] if keepdim else self._get_tensors_view(name) def set_tensor_by_name(self, name: str, tensor: Union[np.ndarray, jax.Array]) -> None: """Set a tensor by its name :param name: Name of the tensor to set :type name: str :param tensor: Tensor to set :type tensor: np.ndarray or jax.Array :raises KeyError: The tensor does not exist """ if self._jax: self.tensors[name] = _copyto(self.tensors[name], tensor) else: np.copyto(self.tensors[name], tensor) def create_tensor(self, name: str, size: Union[int, Tuple[int], gym.Space, gymnasium.Space], dtype: Optional[np.dtype] = None, keep_dimensions: bool = False) -> bool: """Create a new internal tensor in memory The tensor will have a 3-components shape (memory size, number of environments, size). The internal representation will use _tensor_<name> as the name of the class property :param name: Tensor name (the name has to follow the python PEP 8 style) :type name: str :param size: Number of elements in the last dimension (effective data size). The product of the elements will be computed for sequences or gym/gymnasium spaces :type size: int, tuple or list of integers or gym.Space :param dtype: Data type (np.dtype) (default: ``None``). If None, the global default jax.numpy.float32 data type will be used :type dtype: np.dtype or None, optional :param keep_dimensions: Whether or not to keep the dimensions defined through the size parameter (default: ``False``) :type keep_dimensions: bool, optional :raises ValueError: The tensor name exists already but the size or dtype are different :return: True if the tensor was created, otherwise False :rtype: bool """ # compute data size size = self._get_space_size(size, keep_dimensions) # check dtype and size if the tensor exists if name in self.tensors: tensor = self.tensors[name] if tensor.shape[-1] != size: raise ValueError(f"Size of tensor {name} ({size}) doesn't match the existing one ({tensor.shape[-1]})") if dtype is not None and tensor.dtype != dtype: raise ValueError(f"Dtype of tensor {name} ({dtype}) doesn't match the existing one ({tensor.dtype})") return False # define tensor shape tensor_shape = (self.memory_size, self.num_envs, *size) if keep_dimensions else (self.memory_size, self.num_envs, size) view_shape = (-1, *size) if keep_dimensions else (-1, size) # create tensor (_tensor_<name>) and add it to the internal storage if self._jax: setattr(self, f"_tensor_{name}", jnp.zeros(tensor_shape, dtype=dtype)) else: setattr(self, f"_tensor_{name}", np.zeros(tensor_shape, dtype=dtype)) # update internal variables self.tensors[name] = getattr(self, f"_tensor_{name}") self.tensors_view[name] = self.tensors[name].reshape(*view_shape) self.tensors_keep_dimensions[name] = keep_dimensions # fill the tensors (float tensors) with NaN for name, tensor in self.tensors.items(): if tensor.dtype == np.float32 or tensor.dtype == np.float64: if self._jax: self.tensors[name] = _copyto(self.tensors[name], float("nan")) else: self.tensors[name].fill(float("nan")) # check views if self._jax: self._views = False # TODO: check if views are available else: self._views = self._views and self.tensors_view[name].base is self.tensors[name] return True def reset(self) -> None: """Reset the memory by cleaning internal indexes and flags Old data will be retained until overwritten, but access through the available methods will not be guaranteed Default values of the internal indexes and flags - filled: False - env_index: 0 - memory_index: 0 """ self.filled = False self.env_index = 0 self.memory_index = 0 def add_samples(self, **tensors: Mapping[str, Union[np.ndarray, jax.Array]]) -> None: """Record samples in memory Samples should be a tensor with 2-components shape (number of environments, data size). All tensors must be of the same shape According to the number of environments, the following classification is made: - one environment: Store a single sample (tensors with one dimension) and increment the environment index (second index) by one - number of environments less than num_envs: Store the samples and increment the environment index (second index) by the number of the environments - number of environments equals num_envs: Store the samples and increment the memory index (first index) by one :param tensors: Sampled data as key-value arguments where the keys are the names of the tensors to be modified. Non-existing tensors will be skipped :type tensors: dict :raises ValueError: No tensors were provided or the tensors have incompatible shapes """ if not tensors: raise ValueError("No samples to be recorded in memory. Pass samples as key-value arguments (where key is the tensor name)") # dimensions and shapes of the tensors (assume all tensors have the dimensions of the first tensor) tmp = tensors.get("states", tensors[next(iter(tensors))]) # ask for states first dim, shape = tmp.ndim, tmp.shape # multi environment (number of environments equals num_envs) if dim == 2 and shape[0] == self.num_envs: if self._jax: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name] = _copyto_i(self.tensors[name], tensor, self.memory_index) else: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index] = tensor self.memory_index += 1 # multi environment (number of environments less than num_envs) elif dim == 2 and shape[0] < self.num_envs: raise NotImplementedError # TODO: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name] = self.tensors[name].at[self.memory_index, self.env_index:self.env_index + tensor.shape[0]].set(tensor) self.env_index += tensor.shape[0] # single environment - multi sample (number of environments greater than num_envs (num_envs = 1)) elif dim == 2 and self.num_envs == 1: raise NotImplementedError # TODO: for name, tensor in tensors.items(): if name in self.tensors: num_samples = min(shape[0], self.memory_size - self.memory_index) remaining_samples = shape[0] - num_samples # copy the first n samples self.tensors[name] = self.tensors[name].at[self.memory_index:self.memory_index + num_samples].set(tensor[:num_samples].unsqueeze(dim=1)) self.memory_index += num_samples # storage remaining samples if remaining_samples > 0: self.tensors[name] = self.tensors[name].at[:remaining_samples].set(tensor[num_samples:].unsqueeze(dim=1)) self.memory_index = remaining_samples # single environment elif dim == 1: if self._jax: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name] = _copyto_i_j(self.tensors[name], tensor, self.memory_index, self.env_index) else: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index, self.env_index] = tensor self.env_index += 1 else: raise ValueError(f"Expected shape (number of environments = {self.num_envs}, data size), got {shape}") # update indexes and flags if self.env_index >= self.num_envs: self.env_index = 0 self.memory_index += 1 if self.memory_index >= self.memory_size: self.memory_index = 0 self.filled = True # export tensors to file if self.export: self.save(directory=self.export_directory, format=self.export_format) def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1, sequence_length: int = 1) -> List[List[Union[np.ndarray, jax.Array]]]: """Data sampling method to be implemented by the inheriting classes :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :raises NotImplementedError: The method has not been implemented :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of np.ndarray or jax.Array list """ raise NotImplementedError("The sampling method (.sample()) is not implemented") def sample_by_index(self, names: Tuple[str], indexes: Union[tuple, np.ndarray, jax.Array], mini_batches: int = 1) -> List[List[Union[np.ndarray, jax.Array]]]: """Sample data from memory according to their indexes :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param indexes: Indexes used for sampling :type indexes: tuple or list, np.ndarray or jax.Array :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (number of indexes, data size) :rtype: list of np.ndarray or jax.Array list """ if mini_batches > 1: batches = np.array_split(indexes, mini_batches) views = [self._get_tensors_view(name) for name in names] return [[view[batch] for view in views] for batch in batches] return [[self._get_tensors_view(name)[indexes] for name in names]] def sample_all(self, names: Tuple[str], mini_batches: int = 1, sequence_length: int = 1) -> List[List[Union[np.ndarray, jax.Array]]]: """Sample all data from memory :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :return: Sampled data from memory. The sampled tensors will have the following shape: (memory size * number of environments, data size) :rtype: list of np.ndarray or jax.Array list """ # sequential order if sequence_length > 1: if mini_batches > 1: batches = np.array_split(self.all_sequence_indexes, len(self.all_sequence_indexes) // mini_batches) return [[self._get_tensors_view(name)[batch] for name in names] for batch in batches] return [[self._get_tensors_view(name)[self.all_sequence_indexes] for name in names]] # default order if mini_batches > 1: indexes = np.arange(self.memory_size * self.num_envs) batches = np.array_split(indexes, mini_batches) views = [self._get_tensors_view(name) for name in names] return [[view[batch] for view in views] for batch in batches] return [[self._get_tensors_view(name) for name in names]] def get_sampling_indexes(self) -> Union[tuple, np.ndarray, jax.Array]: """Get the last indexes used for sampling :return: Last sampling indexes :rtype: tuple or list, np.ndarray or jax.Array """ return self.sampling_indexes def save(self, directory: str = "", format: str = "pt") -> None: """Save the memory to a file Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) :param directory: Path to the folder where the memory will be saved. If not provided, the directory defined in the constructor will be used :type directory: str :param format: Format of the file where the memory will be saved (default: ``"pt"``) :type format: str, optional :raises ValueError: If the format is not supported """ if not directory: directory = self.export_directory os.makedirs(os.path.join(directory, "memories"), exist_ok=True) memory_path = os.path.join(directory, "memories", \ "{}_memory_{}.{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), hex(id(self)), format)) # torch if format == "pt": import torch torch.save({name: self.tensors[name] for name in self.get_tensor_names()}, memory_path) # numpy elif format == "npz": np.savez(memory_path, **{name: self.tensors[name].cpu().numpy() for name in self.get_tensor_names()}) # comma-separated values elif format == "csv": # open csv writer # TODO: support keeping the dimensions with open(memory_path, "a") as file: writer = csv.writer(file) names = self.get_tensor_names() # write headers headers = [[f"{name}.{i}" for i in range(self.tensors[name].shape[-1])] for name in names] writer.writerow([item for sublist in headers for item in sublist]) # write rows for i in range(len(self)): writer.writerow(functools.reduce(operator.iconcat, [self.tensors[name].reshape(-1, self.tensors[name].shape[-1])[i].tolist() for name in names], [])) # unsupported format else: raise ValueError(f"Unsupported format: {format}. Available formats: pt, csv, npz") def load(self, path: str) -> None: """Load the memory from a file Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) :param path: Path to the file where the memory will be loaded :type path: str :raises ValueError: If the format is not supported """ # torch if path.endswith(".pt"): import torch data = torch.load(path) for name in self.get_tensor_names(): setattr(self, f"_tensor_{name}", jnp.array(data[name].cpu().numpy())) # numpy elif path.endswith(".npz"): data = np.load(path) for name in data: setattr(self, f"_tensor_{name}", jnp.array(data[name])) # comma-separated values elif path.endswith(".csv"): # TODO: load the memory from a csv pass # unsupported format else: raise ValueError(f"Unsupported format: {path}")
Toni-SM/skrl/skrl/memories/jax/__init__.py
from skrl.memories.jax.base import Memory # isort:skip from skrl.memories.jax.random import RandomMemory
Toni-SM/skrl/skrl/memories/jax/random.py
from typing import List, Optional, Tuple import jax import numpy as np from skrl.memories.jax import Memory class RandomMemory(Memory): def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[jax.Device] = None, export: bool = False, export_format: str = "pt", export_directory: str = "", replacement=True) -> None: """Random sampling memory Sample a batch from memory randomly :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which an array is or will be allocated (default: ``None``) :type device: jax.Device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :param replacement: Flag to indicate whether the sample is with or without replacement (default: ``True``). Replacement implies that a value can be selected multiple times (the batch size is always guaranteed). Sampling without replacement will return a batch of maximum memory size if the memory size is less than the requested batch size :type replacement: bool, optional :raises ValueError: The export format is not supported """ super().__init__(memory_size, num_envs, device, export, export_format, export_directory) self._replacement = replacement def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[jax.Array]]: """Sample a batch from memory randomly :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of jax.Array list """ # generate random indexes if self._replacement: indexes = np.random.randint(0, len(self), (batch_size,)) else: indexes = np.random.permutation(len(self))[:batch_size] return self.sample_by_index(names=names, indexes=indexes, mini_batches=mini_batches)
Toni-SM/skrl/tests/test_agents.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.agents.torch import Agent from skrl.agents.torch.a2c import A2C from skrl.agents.torch.amp import AMP from skrl.agents.torch.cem import CEM from skrl.agents.torch.ddpg import DDPG from skrl.agents.torch.dqn import DDQN, DQN from skrl.agents.torch.ppo import PPO from skrl.agents.torch.q_learning import Q_LEARNING from skrl.agents.torch.sac import SAC from skrl.agents.torch.sarsa import SARSA from skrl.agents.torch.td3 import TD3 from skrl.agents.torch.trpo import TRPO from .utils import DummyModel @pytest.fixture def classes_and_kwargs(): return [(A2C, {"models": {"policy": DummyModel()}}), (AMP, {"models": {"policy": DummyModel()}}), (CEM, {"models": {"policy": DummyModel()}}), (DDPG, {"models": {"policy": DummyModel()}}), (DQN, {"models": {"policy": DummyModel()}}), (DDQN, {"models": {"policy": DummyModel()}}), (PPO, {"models": {"policy": DummyModel()}}), (Q_LEARNING, {"models": {"policy": DummyModel()}}), (SAC, {"models": {"policy": DummyModel()}}), (SARSA, {"models": {"policy": DummyModel()}}), (TD3, {"models": {"policy": DummyModel()}}), (TRPO, {"models": {"policy": DummyModel()}})] def test_agent(capsys, classes_and_kwargs): for klass, kwargs in classes_and_kwargs: cfg = {"learning_starts": 1, "experiment": {"write_interval": 0}} agent: Agent = klass(cfg=cfg, **kwargs) agent.init() agent.pre_interaction(timestep=0, timesteps=1) # agent.act(None, timestep=0, timestesps=1) agent.record_transition(states=torch.tensor([]), actions=torch.tensor([]), rewards=torch.tensor([]), next_states=torch.tensor([]), terminated=torch.tensor([]), truncated=torch.tensor([]), infos={}, timestep=0, timesteps=1) agent.post_interaction(timestep=0, timesteps=1)
Toni-SM/skrl/tests/test_examples_gymnasium.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "gymnasium" SCRIPTS = ["ddpg_gymnasium_pendulum.py", "cem_gymnasium_cartpole.py", "dqn_gymnasium_cartpole.py", "q_learning_gymnasium_frozen_lake.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import gymnasium except ImportError as e: warnings.warn(f"\n\nUnable to import gymnasium ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_examples_isaacsim.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest # See the following link for Omniverse Isaac Sim Python environment # https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html PYTHON_ENVIRONMENT = "./python.sh" EXAMPLE_DIR = "isaacsim" SCRIPTS = ["cartpole_example_skrl.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: from omni.isaac.kit import SimulationApp except ImportError as e: warnings.warn(f"\n\nUnable to import SimulationApp ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_examples_omniisaacgym.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest # See the following link for Omniverse Isaac Sim Python environment # https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html PYTHON_ENVIRONMENT = "./python.sh" EXAMPLE_DIR = "omniisaacgym" SCRIPTS = ["ppo_cartpole.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)} headless=True num_envs=64" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import omniisaacgymenvs except ImportError as e: warnings.warn(f"\n\nUnable to import omniisaacgymenvs ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_envs.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.envs.torch import Wrapper, wrap_env from .utils import DummyEnv @pytest.fixture def classes_and_kwargs(): return [] @pytest.mark.parametrize("wrapper", ["gym", "gymnasium", "dm", "robosuite", \ "isaacgym-preview2", "isaacgym-preview3", "isaacgym-preview4", "omniverse-isaacgym"]) def test_wrap_env(capsys, classes_and_kwargs, wrapper): env = DummyEnv(num_envs=1) try: env: Wrapper = wrap_env(env=env, wrapper=wrapper) except ValueError as e: warnings.warn(f"{e}. This test will be skipped for '{wrapper}'") except ModuleNotFoundError as e: warnings.warn(f"{e}. The '{wrapper}' wrapper module is not found. This test will be skipped") env.observation_space env.action_space env.state_space env.num_envs env.device
Toni-SM/skrl/tests/test_examples_robosuite.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "robosuite" SCRIPTS = [] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import gym except ImportError as e: warnings.warn(f"\n\nUnable to import gym ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_resources_schedulers.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.resources.schedulers.torch import KLAdaptiveRL @pytest.fixture def classes_and_kwargs(): return [(KLAdaptiveRL, {})] @pytest.mark.parametrize("optimizer", [torch.optim.Adam([torch.ones((1,))], lr=0.1), torch.optim.SGD([torch.ones((1,))], lr=0.1)]) def test_step(capsys, classes_and_kwargs, optimizer): for klass, kwargs in classes_and_kwargs: scheduler = klass(optimizer, **kwargs) scheduler.step(0.0)
Toni-SM/skrl/tests/test_examples_shimmy.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "shimmy" SCRIPTS = ["dqn_shimmy_atari_pong.py", "sac_shimmy_dm_control_acrobot_swingup_sparse.py", "ddpg_openai_gym_compatibility_pendulum.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import shimmy except ImportError as e: warnings.warn(f"\n\nUnable to import shimmy ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/__init__.py
Toni-SM/skrl/tests/test_model_instantiators.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.models.torch import Model from skrl.utils.model_instantiators import ( Shape, categorical_model, deterministic_model, gaussian_model, multivariate_gaussian_model ) @pytest.fixture def classes_and_kwargs(): return [(categorical_model, {}), (deterministic_model, {}), (gaussian_model, {}), (multivariate_gaussian_model, {})] def test_models(capsys, classes_and_kwargs): for klass, kwargs in classes_and_kwargs: model: Model = klass(observation_space=1, action_space=1, device="cpu", **kwargs)
Toni-SM/skrl/tests/test_examples_isaacgym.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "isaacgym" SCRIPTS = ["ppo_cartpole.py", "trpo_cartpole.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)} headless=True num_envs=64" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import isaacgymenvs except ImportError as e: warnings.warn(f"\n\nUnable to import isaacgymenvs ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/utils.py
import random import gymnasium as gym import torch class DummyEnv(gym.Env): def __init__(self, num_envs, device = "cpu"): self.num_agents = 1 self.num_envs = num_envs self.device = torch.device(device) self.action_space = gym.spaces.Discrete(2) self.observation_space = gym.spaces.Box(low=-1, high=1, shape=(2,)) def __getattr__(self, key): if key in ["_spec_to_space", "observation_spec"]: return lambda *args, **kwargs: None return None def step(self, action): observation = self.observation_space.sample() reward = random.random() terminated = random.random() > 0.95 truncated = random.random() > 0.95 observation = torch.tensor(observation, dtype=torch.float32).view(self.num_envs, -1) reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1) terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) return observation, reward, terminated, truncated, {} def reset(self): observation = self.observation_space.sample() observation = torch.tensor(observation, dtype=torch.float32).view(self.num_envs, -1) return observation, {} def render(self, *args, **kwargs): pass def close(self, *args, **kwargs): pass class _DummyBaseAgent: def __init__(self): pass def record_transition(self, states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps): pass def pre_interaction(self, timestep, timesteps): pass def post_interaction(self, timestep, timesteps): pass def set_running_mode(self, mode): pass class DummyAgent(_DummyBaseAgent): def __init__(self): super().__init__() def init(self, trainer_cfg=None): pass def act(self, states, timestep, timesteps): return torch.tensor([]), None, {} def record_transition(self, states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps): pass def pre_interaction(self, timestep, timesteps): pass def post_interaction(self, timestep, timesteps): pass class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.device = torch.device("cpu") self.layer = torch.nn.Linear(1, 1) def set_mode(self, *args, **kwargs): pass def get_specification(self, *args, **kwargs): return {} def act(self, *args, **kwargs): return torch.tensor([]), None, {}
Toni-SM/skrl/tests/test_memories.py
import string import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.memories.torch import Memory, RandomMemory @pytest.fixture def classes_and_kwargs(): return [(RandomMemory, {})] @pytest.mark.parametrize("device", [None, "cpu", "cuda:0"]) def test_device(capsys, classes_and_kwargs, device): _device = torch.device(device) if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu") for klass, kwargs in classes_and_kwargs: try: memory: Memory = klass(memory_size=1, device=device, **kwargs) except (RuntimeError, AssertionError) as e: with capsys.disabled(): print(e) warnings.warn(f"Invalid device: {device}. This test will be skipped") continue assert memory.device == _device # defined device @hypothesis.given(names=st.sets(st.text(alphabet=string.ascii_letters + string.digits + "_", min_size=1, max_size=10), min_size=1, max_size=10)) @hypothesis.settings(suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], deadline=None) def test_create_tensors(capsys, classes_and_kwargs, names): for klass, kwargs in classes_and_kwargs: memory: Memory = klass(memory_size=1, **kwargs) for name in names: memory.create_tensor(name=name, size=1, dtype=torch.float32) assert memory.get_tensor_names() == sorted(names) @hypothesis.given(memory_size=st.integers(min_value=1, max_value=100), num_envs=st.integers(min_value=1, max_value=10), num_samples=st.integers(min_value=1, max_value=500)) @hypothesis.settings(suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], deadline=None) def test_add_samples(capsys, classes_and_kwargs, memory_size, num_envs, num_samples): for klass, kwargs in classes_and_kwargs: memory: Memory = klass(memory_size=memory_size, num_envs=num_envs, **kwargs) memory.create_tensor(name="tensor_1", size=1, dtype=torch.float32) memory.create_tensor(name="tensor_2", size=2, dtype=torch.float32) # memory_index for _ in range(num_samples): memory.add_samples(tensor_1=torch.zeros((num_envs, 1))) assert memory.memory_index == num_samples % memory_size assert memory.filled == (num_samples >= memory_size) memory.reset() # memory_index, env_index for _ in range(num_samples): memory.add_samples(tensor_2=torch.zeros((2,))) assert memory.memory_index == (num_samples // num_envs) % memory_size assert memory.env_index == num_samples % num_envs assert memory.filled == (num_samples >= memory_size * num_envs)
Toni-SM/skrl/tests/test_examples_isaac_orbit.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest # See the following link for Isaac Orbit environment # https://isaac-orbit.github.io/orbit/source/setup/installation.html PYTHON_ENVIRONMENT = "orbit -p" EXAMPLE_DIR = "isaacorbit" SCRIPTS = ["ppo_cartpole.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)} --headless --num_envs 64" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: from omni.isaac.kit import SimulationApp except ImportError as e: warnings.warn(f"\n\nUnable to import omni.isaac.kit ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_trainers.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.trainers.torch import ManualTrainer, ParallelTrainer, SequentialTrainer, Trainer from .utils import DummyAgent, DummyEnv @pytest.fixture def classes_and_kwargs(): return [(ManualTrainer, {"cfg": {"timesteps": 100}}), (ParallelTrainer, {"cfg": {"timesteps": 100}}), (SequentialTrainer, {"cfg": {"timesteps": 100}})] def test_train(capsys, classes_and_kwargs): env = DummyEnv(num_envs=1) agent = DummyAgent() for klass, kwargs in classes_and_kwargs: trainer: Trainer = klass(env, agents=agent, **kwargs) trainer.train() def test_eval(capsys, classes_and_kwargs): env = DummyEnv(num_envs=1) agent = DummyAgent() for klass, kwargs in classes_and_kwargs: trainer: Trainer = klass(env, agents=agent, **kwargs) trainer.eval()
Toni-SM/skrl/tests/test_examples_deepmind.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "deepmind" SCRIPTS = ["dm_suite_cartpole_swingup_ddpg.py", "dm_manipulation_stack_sac.py", ""] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import gym except ImportError as e: warnings.warn(f"\n\nUnable to import dm_control environments ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_jax_memories_memory.py
import math import unittest import gym import jax import jax.numpy as jnp import numpy as np from skrl.memories.jax import Memory class TestCase(unittest.TestCase): def setUp(self): self.devices = [jax.devices("cpu")[0], jax.devices("gpu")[0]] self.memory_sizes = [10, 100, 1000] self.num_envs = [1, 10, 100] self.names = ["states", "actions", "rewards", "dones"] self.raw_sizes = [gym.spaces.Box(-1, 1, shape=(5,)), gym.spaces.Discrete(5), 1, 1] self.sizes = [5, 1, 1, 1] self.raw_dtypes = [jnp.float32, int, float, bool] self.dtypes = [np.float32, np.int32, np.float32, bool] self.mini_batches = [1, 2, 3, 5, 7] def tearDown(self): pass def test_devices(self): for device in self.devices: # TODO: test pass def test_tensor_names(self): for memory_size, num_envs in zip(self.memory_sizes, self.num_envs): # create memory memory = Memory(memory_size=memory_size, num_envs=num_envs) # create tensors for name, size, dtype in zip(self.names, self.raw_sizes, self.raw_dtypes): memory.create_tensor(name, size, dtype) # test memory.get_tensor_names self.assertCountEqual(self.names, memory.get_tensor_names(), "get_tensor_names") # test memory.get_tensor_by_name for name, size, dtype in zip(self.names, self.sizes, self.dtypes): tensor = memory.get_tensor_by_name(name, keepdim=True) self.assertSequenceEqual(memory.get_tensor_by_name(name, keepdim=True).shape, (memory_size, num_envs, size), "get_tensor_by_name(..., keepdim=True)") self.assertSequenceEqual(memory.get_tensor_by_name(name, keepdim=False).shape, (memory_size * num_envs, size), "get_tensor_by_name(..., keepdim=False)") self.assertEqual(memory.get_tensor_by_name(name, keepdim=True).dtype, dtype, "get_tensor_by_name(...).dtype") # test memory.set_tensor_by_name for name, size, dtype in zip(self.names, self.sizes, self.raw_dtypes): new_tensor = jnp.arange(memory_size * num_envs * size).reshape(memory_size, num_envs, size).astype(dtype) memory.set_tensor_by_name(name, new_tensor) tensor = memory.get_tensor_by_name(name, keepdim=True) self.assertTrue((tensor == new_tensor).all().item(), "set_tensor_by_name(...)") def test_sample(self): for memory_size, num_envs in zip(self.memory_sizes, self.num_envs): # create memory memory = Memory(memory_size=memory_size, num_envs=num_envs) # create tensors for name, size, dtype in zip(self.names, self.raw_sizes, self.raw_dtypes): memory.create_tensor(name, size, dtype) # fill memory for name, size, dtype in zip(self.names, self.sizes, self.raw_dtypes): new_tensor = jnp.arange(memory_size * num_envs * size).reshape(memory_size, num_envs, size).astype(dtype) memory.set_tensor_by_name(name, new_tensor) # test memory.sample_all for i, mini_batches in enumerate(self.mini_batches): samples = memory.sample_all(self.names, mini_batches=mini_batches) for sample, name, size in zip(samples[i], self.names, self.sizes): self.assertSequenceEqual(sample.shape, (memory_size * num_envs, size), f"sample_all(...).shape with mini_batches={mini_batches}") tensor = memory.get_tensor_by_name(name, keepdim=True) self.assertTrue((sample.reshape(memory_size, num_envs, size) == tensor).all().item(), f"sample_all(...) with mini_batches={mini_batches}") if __name__ == '__main__': import sys if not sys.argv[-1] == '--debug': raise RuntimeError('Test can only be runned manually with --debug flag') test = TestCase() test.setUp() for method in dir(test): if method.startswith('test_'): print('Running test: {}'.format(method)) getattr(test, method)() test.tearDown() print('All tests passed.')
Toni-SM/skrl/tests/test_resources_noises.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.resources.noises.torch import GaussianNoise, Noise, OrnsteinUhlenbeckNoise @pytest.fixture def classes_and_kwargs(): return [(GaussianNoise, {"mean": 0, "std": 1}), (OrnsteinUhlenbeckNoise, {"theta": 0.1, "sigma": 0.2, "base_scale": 0.3})] @pytest.mark.parametrize("device", [None, "cpu", "cuda:0"]) def test_device(capsys, classes_and_kwargs, device): _device = torch.device(device) if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu") for klass, kwargs in classes_and_kwargs: try: noise: Noise = klass(device=device, **kwargs) except (RuntimeError, AssertionError) as e: with capsys.disabled(): print(e) warnings.warn(f"Invalid device: {device}. This test will be skipped") continue output = noise.sample((1,)) assert noise.device == _device # defined device assert output.device == _device # runtime device @hypothesis.given(size=st.lists(st.integers(min_value=1, max_value=10), max_size=5)) @hypothesis.settings(suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], deadline=None) def test_sample(capsys, classes_and_kwargs, size): for klass, kwargs in classes_and_kwargs: noise: Noise = klass(**kwargs) # sample output = noise.sample(size) assert output.size() == torch.Size(size) # sample like tensor = torch.rand(size, device="cpu") output = noise.sample_like(tensor) assert output.size() == torch.Size(size)
Toni-SM/skrl/tests/test_examples_gym.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "gym" SCRIPTS = ["ddpg_gym_pendulum.py", "cem_gym_cartpole.py", "dqn_gym_cartpole.py", "q_learning_gym_frozen_lake.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import gym except ImportError as e: warnings.warn(f"\n\nUnable to import gym ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
Toni-SM/skrl/tests/test_resources_preprocessors.py
import warnings import gym import gymnasium import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.resources.preprocessors.torch import RunningStandardScaler @pytest.fixture def classes_and_kwargs(): return [(RunningStandardScaler, {"size": 1})] @pytest.mark.parametrize("device", [None, "cpu", "cuda:0"]) def test_device(capsys, classes_and_kwargs, device): _device = torch.device(device) if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu") for klass, kwargs in classes_and_kwargs: try: preprocessor = klass(device=device, **kwargs) except (RuntimeError, AssertionError) as e: with capsys.disabled(): print(e) warnings.warn(f"Invalid device: {device}. This test will be skipped") continue assert preprocessor.device == _device # defined device assert preprocessor(torch.ones(kwargs["size"], device=_device)).device == _device # runtime device @pytest.mark.parametrize("space_and_size", [(gym.spaces.Box(low=-1, high=1, shape=(2, 3)), 6), (gymnasium.spaces.Box(low=-1, high=1, shape=(2, 3)), 6), (gym.spaces.Discrete(n=3), 1), (gymnasium.spaces.Discrete(n=3), 1)]) def test_forward(capsys, classes_and_kwargs, space_and_size): for klass, kwargs in classes_and_kwargs: space, size = space_and_size preprocessor = klass(size=space, device="cpu") output = preprocessor(torch.rand((10, size), device="cpu")) assert output.shape == torch.Size((10, size))
Toni-SM/skrl/docs/make.bat
@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build if "%1" == "" goto help %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.http://sphinx-doc.org/ exit /b 1 ) %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd
Toni-SM/skrl/docs/requirements.txt
furo==2023.7.26 sphinx sphinx-tabs sphinx-autobuild sphinx-copybutton sphinx-notfound-page numpy
Toni-SM/skrl/docs/README.md
# Documentation ## Install Sphinx and Read the Docs Sphinx Theme ```bash cd docs pip install -r requirements.txt ``` ## Building the documentation ```bash cd docs make html ``` Building each time a file is changed: ```bash cd docs sphinx-autobuild ./source/ _build/html ``` ## Useful links - [Sphinx directives](https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html) - [Math support in Sphinx](https://www.sphinx-doc.org/en/1.0/ext/math.html)
Toni-SM/skrl/docs/source/404.rst
:orphan: Page not found ============== .. image:: _static/data/404-light.svg :width: 50% :align: center :class: only-light :alt: 404 .. image:: _static/data/404-dark.svg :width: 50% :align: center :class: only-dark :alt: 404 .. raw:: html <br> <div style="text-align: center; font-size: 1.75rem;"> <p style="margin: 0;"><strong>404: Puzzle piece not found.</strong></p> <p style="margin: 0;">Did you look under the sofa cushions?</p> </div> <br> <br> Since version 1.0.0, the documentation structure has changed to improve content organization and to provide a better browsing experience. Navigate using the left sidebar or type in the search box to find what you are looking for.
Toni-SM/skrl/docs/source/index.rst
SKRL - Reinforcement Learning library (|version|) ================================================= .. raw:: html <a href="https://pypi.org/project/skrl"> <img alt="pypi" src="https://img.shields.io/pypi/v/skrl"> </a> <a href="https://huggingface.co/skrl"> <img alt="huggingface" src="https://img.shields.io/badge/%F0%9F%A4%97%20models-hugging%20face-F8D521"> </a> <a href="https://github.com/Toni-SM/skrl/discussions"> <img alt="discussions" src="https://img.shields.io/github/discussions/Toni-SM/skrl"> </a> <br> <a href="https://github.com/Toni-SM/skrl/blob/main/LICENSE"> <img alt="license" src="https://img.shields.io/github/license/Toni-SM/skrl"> </a> &nbsp;&nbsp;&nbsp;&nbsp; <a href="https://skrl.readthedocs.io"> <img alt="docs" src="https://readthedocs.org/projects/skrl/badge/?version=latest"> </a> <a href="https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml"> <img alt="pytest" src="https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml/badge.svg"> </a> <a href="https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml"> <img alt="pre-commit" src="https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml/badge.svg"> </a> <br><br> **skrl** is an open-source library for Reinforcement Learning written in Python (on top of `PyTorch <https://pytorch.org/>`_ and `JAX <https://jax.readthedocs.io>`_) and designed with a focus on modularity, readability, simplicity and transparency of algorithm implementation. In addition to supporting the OpenAI `Gym <https://www.gymlibrary.dev>`_ / Farama `Gymnasium <https://gymnasium.farama.org/>`_, `DeepMind <https://github.com/deepmind/dm_env>`_ and other environment interfaces, it allows loading and configuring `NVIDIA Isaac Gym <https://developer.nvidia.com/isaac-gym>`_, `NVIDIA Isaac Orbit <https://isaac-orbit.github.io/orbit/index.html>`_ and `NVIDIA Omniverse Isaac Gym <https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html>`_ environments, enabling agents' simultaneous training by scopes (subsets of environments among all available environments), which may or may not share resources, in the same run. **Main features:** * PyTorch (|_1| |pytorch| |_1|) and JAX (|_1| |jax| |_1|) * Clean code * Modularity and reusability * Documented library, code and implementations * Support for Gym/Gymnasium (single and vectorized), DeepMind, NVIDIA Isaac Gym (preview 2, 3 and 4), NVIDIA Isaac Orbit, NVIDIA Omniverse Isaac Gym environments, among others * Simultaneous learning by scopes in Gym/Gymnasium (vectorized), NVIDIA Isaac Gym, NVIDIA Isaac Orbit and NVIDIA Omniverse Isaac Gym .. raw:: html <br> .. warning:: **skrl** is under **active continuous development**. Make sure you always have the latest version. Visit the `develop <https://github.com/Toni-SM/skrl/tree/develop>`_ branch or its `documentation <https://skrl.readthedocs.io/en/develop>`_ to access the latest updates to be released. | **GitHub repository:** https://github.com/Toni-SM/skrl | **Questions or discussions:** https://github.com/Toni-SM/skrl/discussions | **Citing skrl:** To cite this library (created at Mondragon Unibertsitatea) use the following reference to its article: `skrl: Modular and Flexible Library for Reinforcement Learning <http://jmlr.org/papers/v24/23-0112.html>`_. .. code-block:: bibtex @article{serrano2023skrl, author = {Antonio Serrano-Muñoz and Dimitrios Chrysostomou and Simon Bøgh and Nestor Arana-Arexolaleiba}, title = {skrl: Modular and Flexible Library for Reinforcement Learning}, journal = {Journal of Machine Learning Research}, year = {2023}, volume = {24}, number = {254}, pages = {1--9}, url = {http://jmlr.org/papers/v24/23-0112.html} } .. raw:: html <br><hr> User guide ---------- To start using the library, visit the following links: .. toctree:: :maxdepth: 1 intro/installation intro/getting_started intro/examples intro/data .. raw:: html <br><hr> Library components (overview) ----------------------------- .. toctree:: :caption: API :hidden: api/agents api/multi_agents api/envs api/memories api/models api/resources api/trainers api/utils Agents ^^^^^^ Definition of reinforcement learning algorithms that compute an optimal policy. All agents inherit from one and only one :doc:`base class <api/agents>` (that defines a uniform interface and provides for common functionalities) but which is not tied to the implementation details of the algorithms * :doc:`Advantage Actor Critic <api/agents/a2c>` (**A2C**) * :doc:`Adversarial Motion Priors <api/agents/amp>` (**AMP**) * :doc:`Cross-Entropy Method <api/agents/cem>` (**CEM**) * :doc:`Deep Deterministic Policy Gradient <api/agents/ddpg>` (**DDPG**) * :doc:`Double Deep Q-Network <api/agents/ddqn>` (**DDQN**) * :doc:`Deep Q-Network <api/agents/dqn>` (**DQN**) * :doc:`Proximal Policy Optimization <api/agents/ppo>` (**PPO**) * :doc:`Q-learning <api/agents/q_learning>` (**Q-learning**) * :doc:`Robust Policy Optimization <api/agents/rpo>` (**RPO**) * :doc:`Soft Actor-Critic <api/agents/sac>` (**SAC**) * :doc:`State Action Reward State Action <api/agents/sarsa>` (**SARSA**) * :doc:`Twin-Delayed DDPG <api/agents/td3>` (**TD3**) * :doc:`Trust Region Policy Optimization <api/agents/trpo>` (**TRPO**) Multi-agents ^^^^^^^^^^^^ Definition of reinforcement learning algorithms that compute an optimal policies. All agents (multi-agents) inherit from one and only one :doc:`base class <api/multi_agents>` (that defines a uniform interface and provides for common functionalities) but which is not tied to the implementation details of the algorithms * :doc:`Independent Proximal Policy Optimization <api/multi_agents/ippo>` (**IPPO**) * :doc:`Multi-Agent Proximal Policy Optimization <api/multi_agents/mappo>` (**MAPPO**) Environments ^^^^^^^^^^^^ Definition of the Isaac Gym (preview 2, 3 and 4), Isaac Orbit and Omniverse Isaac Gym environment loaders, and wrappers for the Gym/Gymnasium, DeepMind, Isaac Gym, Isaac Orbit, Omniverse Isaac Gym environments, among others * :doc:`Single-agent environment wrapping <api/envs/wrapping>` for **Gym/Gymnasium**, **DeepMind**, **Isaac Gym**, **Isaac Orbit**, **Omniverse Isaac Gym** environments, among others * :doc:`Multi-agent environment wrapping <api/envs/multi_agents_wrapping>` for **PettingZoo** and **Bi-DexHands** environments * Loading :doc:`Isaac Gym environments <api/envs/isaac_gym>` * Loading :doc:`Isaac Orbit environments <api/envs/isaac_orbit>` * Loading :doc:`Omniverse Isaac Gym environments <api/envs/omniverse_isaac_gym>` Memories ^^^^^^^^ Generic memory definitions. Such memories are not bound to any agent and can be used for any role such as rollout buffer or experience replay memory, for example. All memories inherit from a :doc:`base class <api/memories>` that defines a uniform interface and keeps track (in allocated tensors) of transitions with the environment or other defined data * :doc:`Random memory <api/memories/random>` Models ^^^^^^ Definition of helper mixins for the construction of tabular functions or function approximators using artificial neural networks. This library does not provide predefined policies but helper mixins to create discrete and continuous (stochastic or deterministic) policies in which the user only has to define the tables (tensors) or artificial neural networks. All models inherit from one :doc:`base class <api/models>` that defines a uniform interface and provides for common functionalities. In addition, it is possible to create :doc:`shared model <api/models/shared_model>` by combining the implemented definitions * :doc:`Tabular model <api/models/tabular>` (discrete domain) * :doc:`Categorical model <api/models/categorical>` (discrete domain) * :doc:`Multi-Categorical model <api/models/multicategorical>` (discrete domain) * :doc:`Gaussian model <api/models/gaussian>` (continuous domain) * :doc:`Multivariate Gaussian model <api/models/multivariate_gaussian>` (continuous domain) * :doc:`Deterministic model <api/models/deterministic>` (continuous domain) Trainers ^^^^^^^^ Definition of the procedures responsible for managing the agent's training and interaction with the environment. All trainers inherit from a :doc:`base class <api/trainers>` that defines a uniform interface and provides for common functionalities * :doc:`Sequential trainer <api/trainers/sequential>` * :doc:`Parallel trainer <api/trainers/parallel>` * :doc:`Step trainer <api/trainers/step>` Resources ^^^^^^^^^ Definition of resources used by the agents during training and/or evaluation, such as exploration noises or learning rate schedulers **Noises:** Definition of the noises used by the agents during the exploration stage. All noises inherit from a :doc:`base class <api/resources/noises>` that defines a uniform interface * :doc:`Gaussian <api/resources/noises/gaussian>` noise * :doc:`Ornstein-Uhlenbeck <api/resources/noises/ornstein_uhlenbeck>` noise **Learning rate schedulers:** Definition of learning rate schedulers. All schedulers inherit from the PyTorch :literal:`_LRScheduler` class (see `how to adjust learning rate <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_ in the PyTorch documentation for more details) * :doc:`KL Adaptive <api/resources/schedulers/kl_adaptive>` **Preprocessors:** Definition of preprocessors * :doc:`Running standard scaler <api/resources/preprocessors/running_standard_scaler>` **Optimizers:** Definition of optimizers * :doc:`Adam <api/resources/optimizers/adam>` Utils and configurations ^^^^^^^^^^^^^^^^^^^^^^^^ Definition of utilities and configurations * :doc:`ML frameworks <api/config/frameworks>` configuration * :doc:`Random seed <api/utils/seed>` * Memory and Tensorboard :doc:`file post-processing <api/utils/postprocessing>` * :doc:`Model instantiators <api/utils/model_instantiators>` * :doc:`Hugging Face integration <api/utils/huggingface>` * :doc:`Isaac Gym utils <api/utils/isaacgym_utils>` * :doc:`Omniverse Isaac Gym utils <api/utils/omniverse_isaacgym_utils>`
Toni-SM/skrl/docs/source/conf.py
import os import sys # skrl library sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) print("[DOCS] skrl library path: {}".format(sys.path[0])) import skrl # project information project = "skrl" copyright = "2021, Toni-SM" author = "Toni-SM" if skrl.__version__ != "unknown": release = version = skrl.__version__ else: release = version = "1.1.0" master_doc = "index" # general configuration extensions = [ "sphinx.ext.duration", "sphinx.ext.doctest", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx_tabs.tabs", "sphinx_copybutton", "notfound.extension", ] # generate links to the documentation of objects in external projects intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "gym": ("https://www.gymlibrary.dev/", None), "gymnasium": ("https://gymnasium.farama.org/", None), "numpy": ("https://numpy.org/doc/stable/", None), "torch": ("https://pytorch.org/docs/stable/", None), "jax": ("https://jax.readthedocs.io/en/latest/", None), "flax": ("https://flax.readthedocs.io/en/latest/", None), "optax": ("https://optax.readthedocs.io/en/latest/", None), } pygments_style = "tango" pygments_dark_style = "zenburn" intersphinx_disabled_domains = ["std"] templates_path = ["_templates"] rst_prolog = """ .. include:: <s5defs.txt> .. |_1| unicode:: 0xA0 :trim: .. |_2| unicode:: 0xA0 0xA0 :trim: .. |_3| unicode:: 0xA0 0xA0 0xA0 :trim: .. |_4| unicode:: 0xA0 0xA0 0xA0 0xA0 :trim: .. |_5| unicode:: 0xA0 0xA0 0xA0 0xA0 0xA0 :trim: .. |jax| image:: /_static/data/logo-jax.svg :width: 28 .. |pytorch| image:: /_static/data/logo-torch.svg :width: 16 .. |br| raw:: html <br> """ # HTML output html_theme = "furo" html_title = f"<div style='text-align: center;'><strong>{project}</strong> ({version})</div>" html_scaled_image_link = False html_static_path = ["_static"] html_favicon = "_static/data/favicon.ico" html_css_files = ["css/skrl.css", "css/s5defs-roles.css"] html_theme_options = { # logo "light_logo": "data/logo-light-mode.png", "dark_logo": "data/logo-dark-mode.png", # edit button "source_repository": "https://github.com/Toni-SM/skrl", "source_branch": "../tree/main", "source_directory": "docs/source", # css "light_css_variables": { "color-brand-primary": "#FF4800", "color-brand-content": "#FF4800", }, "dark_css_variables": { "color-brand-primary": "#EAA000", "color-brand-content": "#EAA000", }, } # EPUB output epub_show_urls = "footnote" # autodoc ext autodoc_mock_imports = [ "gym", "gymnasium", "torch", "jax", "jaxlib", "flax", "optax", "tensorboard", "tqdm", "packaging", "isaacgym", ] # copybutton ext copybutton_prompt_text = r">>> |\.\.\. " copybutton_prompt_is_regexp = True # notfound ext notfound_template = "404.rst" notfound_context = { "title": "Page Not Found", "body": """ <h1>Page Not Found</h1> <p>Sorry, we couldn't find that page in skrl.</p> <p>Try using the search box or go to the homepage.</p> """, } # suppress warning messages suppress_warnings = [ "ref.python", # more than one target found for cross-reference ]
Toni-SM/skrl/docs/source/examples/gym/jax_gym_pendulum_ddpg.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, Model from skrl.resources.noises.jax import OrnsteinUhlenbeckNoise from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixins class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(400)(inputs["states"])) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(self.num_actions)(x) # Pendulum-v1 action_space is -2 to 2 return 2 * nn.tanh(x), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(400)(x)) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic"] = Critic(env.observation_space, env.action_space, device) models["target_critic"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["batch_size"] = 100 cfg["random_timesteps"] = 100 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/jax/Pendulum" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_cartpole_cem.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.cem import CEM, CEM_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import CategoricalMixin, Model from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (categorical model) using mixin class Policy(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) CategoricalMixin.__init__(self, unnormalized_log_prob) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(64)(inputs["states"])) x = nn.relu(nn.Dense(64)(x)) x = nn.Dense(self.num_actions)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("CartPole-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's model (function approximator). # CEM requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/cem.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/cem.html#configuration-and-hyperparameters cfg = CEM_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1000 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/jax/CartPole" agent = CEM(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_td3.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 1000 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_trpo_rnn.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO_DEFAULT_CONFIG from skrl.agents.torch.trpo import TRPO_RNN as TRPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [hidden_states]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_td3_gru.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3_RNN as TD3 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role == "target_policy" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {"rnn": [hidden_states]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role in ["target_critic_1", "target_critic_2"] else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_cartpole_dqn.py
import gym # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.dqn import DQN, DQN_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed from skrl.utils.model_instantiators.jax import Shape, deterministic_model config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("CartPole-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=50000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators) using the model instantiator utility. # DQN requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#models models = {} models["q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) models["target_q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#configuration-and-hyperparameters cfg = DQN_DEFAULT_CONFIG.copy() cfg["learning_starts"] = 100 cfg["exploration"]["final_epsilon"] = 0.04 cfg["exploration"]["timesteps"] = 1500 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/jax/CartPole" agent = DQN(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_sac_gru.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC_RNN as SAC from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Actor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), self.log_std_parameter, {"rnn": [hidden_states]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role in ["target_critic_1", "target_critic_2"] else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 cfg["learn_entropy"] = True # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulum_ddpg.py
import gym import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic"] = Critic(env.observation_space, env.action_space, device) models["target_critic"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["batch_size"] = 100 cfg["random_timesteps"] = 100 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/Pendulum" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_trpo.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulum_ppo.py
import gym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("Pendulum-v1", num_envs=4, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["grad_norm_clip"] = 0.5 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/Pendulum" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulum_td3.py
import gym import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 1000 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/Pendulum" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulum_sac.py
import gym import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Actor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), self.log_std_parameter, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 cfg["learn_entropy"] = True # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/Pendulum" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ddpg_lstm.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG_DEFAULT_CONFIG from skrl.agents.torch.ddpg import DDPG_RNN as DDPG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence sequence_index = 1 if role == "target_policy" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {"rnn": [rnn_states[0], rnn_states[1]]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [rnn_states[0], rnn_states[1]]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ppo_gru.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO_DEFAULT_CONFIG from skrl.agents.torch.ppo import PPO_RNN as PPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [hidden_states]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["grad_norm_clip"] = 0.5 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_taxi_sarsa.py
import gym import torch # import the skrl components to build the RL system from skrl.agents.torch.sarsa import SARSA, SARSA_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.models.torch import Model, TabularMixin from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (tabular model) using mixin class EpilonGreedyPolicy(TabularMixin, Model): def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) self.epsilon = epsilon self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32, device=self.device) def compute(self, inputs, role): actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]], dim=-1, keepdim=True).view(-1,1) # choose random actions for exploration according to epsilon indexes = (torch.rand(inputs["states"].shape[0], device=self.device) < self.epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device) return actions, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Taxi-v3") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Taxi-v")][0] print("Taxi-v3 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate the agent's model (table) # SARSA requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sarsa.html#models models = {} models["policy"] = EpilonGreedyPolicy(env.observation_space, env.action_space, device, num_envs=env.num_envs, epsilon=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sarsa.html#configuration-and-hyperparameters cfg = SARSA_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.999 cfg["alpha"] = 0.4 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1600 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Taxi" agent = SARSA(models=models, memory=None, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 80000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ppo.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["grad_norm_clip"] = 0.5 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ddpg_rnn.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG_DEFAULT_CONFIG from skrl.agents.torch.ddpg import DDPG_RNN as DDPG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role == "target_policy" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {"rnn": [hidden_states]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_sac.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Actor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), self.log_std_parameter, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 cfg["learn_entropy"] = True # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_frozen_lake_vector_q_learning.py
import gym import torch # import the skrl components to build the RL system from skrl.agents.torch.q_learning import Q_LEARNING, Q_LEARNING_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.models.torch import Model, TabularMixin from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (tabular model) using mixin class EpilonGreedyPolicy(TabularMixin, Model): def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) self.epsilon = epsilon self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32, device=self.device) def compute(self, inputs, role): actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]], dim=-1, keepdim=True).view(-1,1) # choose random actions for exploration according to epsilon indexes = (torch.rand(inputs["states"].shape[0], device=self.device) < self.epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device) return actions, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("FrozenLake-v0", num_envs=10, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("FrozenLake-v")][0] print("FrozenLake-v0 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=10, asynchronous=False) env = wrap_env(env) device = env.device # instantiate the agent's model (table) # Q-learning requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/q_learning.html#models models = {} models["policy"] = EpilonGreedyPolicy(env.observation_space, env.action_space, device, num_envs=env.num_envs, epsilon=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/q_learning.html#configuration-and-hyperparameters cfg = Q_LEARNING_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.999 cfg["alpha"] = 0.4 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1600 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/FrozenLake" agent = Q_LEARNING(models=models, memory=None, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 80000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulum_vector_ddpg.py
import gym import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=10, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=100000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic"] = Critic(env.observation_space, env.action_space, device) models["target_critic"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["batch_size"] = 100 cfg["random_timesteps"] = 100 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 1000 cfg["experiment"]["directory"] = "runs/torch/Pendulum" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_sac_rnn.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC_RNN as SAC from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Actor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), self.log_std_parameter, {"rnn": [hidden_states]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role in ["target_critic_1", "target_critic_2"] else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 cfg["learn_entropy"] = True # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_frozen_lake_q_learning.py
import gym import torch # import the skrl components to build the RL system from skrl.agents.torch.q_learning import Q_LEARNING, Q_LEARNING_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.models.torch import Model, TabularMixin from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (tabular model) using mixin class EpilonGreedyPolicy(TabularMixin, Model): def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) self.epsilon = epsilon self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32, device=self.device) def compute(self, inputs, role): actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]], dim=-1, keepdim=True).view(-1,1) # choose random actions for exploration according to epsilon indexes = (torch.rand(inputs["states"].shape[0], device=self.device) < self.epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device) return actions, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("FrozenLake-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("FrozenLake-v")][0] print("FrozenLake-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate the agent's model (table) # Q-learning requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/q_learning.html#models models = {} models["policy"] = EpilonGreedyPolicy(env.observation_space, env.action_space, device, num_envs=env.num_envs, epsilon=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/q_learning.html#configuration-and-hyperparameters cfg = Q_LEARNING_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.999 cfg["alpha"] = 0.4 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1600 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/FrozenLake" agent = Q_LEARNING(models=models, memory=None, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 80000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ddpg_gru.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG_DEFAULT_CONFIG from skrl.agents.torch.ddpg import DDPG_RNN as DDPG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role == "target_policy" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {"rnn": [hidden_states]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_td3_lstm.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3_RNN as TD3 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence sequence_index = 1 if role == "target_policy" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {"rnn": [rnn_states[0], rnn_states[1]]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence sequence_index = 1 if role in ["target_critic_1", "target_critic_2"] else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [rnn_states[0], rnn_states[1]]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_pendulum_vector_ddpg.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, Model from skrl.resources.noises.jax import OrnsteinUhlenbeckNoise from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixins class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(400)(inputs["states"])) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(self.num_actions)(x) # Pendulum-v1 action_space is -2 to 2 return 2 * nn.tanh(x), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(400)(x)) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=10, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=100000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic"] = Critic(env.observation_space, env.action_space, device) models["target_critic"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["batch_size"] = 100 cfg["random_timesteps"] = 100 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/jax/Pendulum" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ddpg.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic"] = Critic(env.observation_space, env.action_space, device) models["target_critic"] = Critic(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 1000 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_cartpole_vector_dqn.py
import gym # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.dqn import DQN, DQN_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed from skrl.utils.model_instantiators.jax import Shape, deterministic_model config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("CartPole-v0", num_envs=5, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=5, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=200000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators) using the model instantiator utility. # DQN requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#models models = {} models["q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) models["target_q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#configuration-and-hyperparameters cfg = DQN_DEFAULT_CONFIG.copy() cfg["learning_starts"] = 100 cfg["exploration"]["final_epsilon"] = 0.04 cfg["exploration"]["timesteps"] = 1500 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/jax/CartPole" agent = DQN(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_taxi_vector_sarsa.py
import gym import torch # import the skrl components to build the RL system from skrl.agents.torch.sarsa import SARSA, SARSA_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.models.torch import Model, TabularMixin from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (tabular model) using mixin class EpilonGreedyPolicy(TabularMixin, Model): def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) self.epsilon = epsilon self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32, device=self.device) def compute(self, inputs, role): actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]], dim=-1, keepdim=True).view(-1,1) # choose random actions for exploration according to epsilon indexes = (torch.rand(inputs["states"].shape[0], device=self.device) < self.epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device) return actions, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("Taxi-v3", num_envs=10, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Taxi-v")][0] print("Taxi-v3 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=10, asynchronous=False) env = wrap_env(env) device = env.device # instantiate the agent's model (table) # SARSA requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sarsa.html#models models = {} models["policy"] = EpilonGreedyPolicy(env.observation_space, env.action_space, device, num_envs=env.num_envs, epsilon=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sarsa.html#configuration-and-hyperparameters cfg = SARSA_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.999 cfg["alpha"] = 0.4 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1600 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Taxi" agent = SARSA(models=models, memory=None, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 80000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_trpo_lstm.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO_DEFAULT_CONFIG from skrl.agents.torch.trpo import TRPO_RNN as TRPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [rnn_states[0], rnn_states[1]]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_cartpole_cem.py
import gym import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.cem import CEM, CEM_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import CategoricalMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (categorical model) using mixin class Policy(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True): Model.__init__(self, observation_space, action_space, device) CategoricalMixin.__init__(self, unnormalized_log_prob) self.linear_layer_1 = nn.Linear(self.num_observations, 64) self.linear_layer_2 = nn.Linear(64, 64) self.output_layer = nn.Linear(64, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) return self.output_layer(x), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("CartPole-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's model (function approximator). # CEM requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/cem.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/cem.html#configuration-and-hyperparameters cfg = CEM_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1000 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/CartPole" agent = CEM(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ppo_lstm.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO_DEFAULT_CONFIG from skrl.agents.torch.ppo import PPO_RNN as PPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [rnn_states[0], rnn_states[1]]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["grad_norm_clip"] = 0.5 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_ppo_rnn.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO_DEFAULT_CONFIG from skrl.agents.torch.ppo import PPO_RNN as PPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [hidden_states]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["grad_norm_clip"] = 0.5 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_sac_lstm.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC_RNN as SAC from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Actor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence sequence_index = 1 if role in ["target_critic_1", "target_critic_2"] else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [rnn_states[0], rnn_states[1]]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 cfg["learn_entropy"] = True # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_cartpole_dqn.py
import gym # import the skrl components to build the RL system from skrl.agents.torch.dqn import DQN, DQN_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed from skrl.utils.model_instantiators.torch import Shape, deterministic_model # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("CartPole-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=50000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators) using the model instantiator utility. # DQN requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#models models = {} models["q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) models["target_q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#configuration-and-hyperparameters cfg = DQN_DEFAULT_CONFIG.copy() cfg["learning_starts"] = 100 cfg["exploration"]["final_epsilon"] = 0.04 cfg["exploration"]["timesteps"] = 1500 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/CartPole" agent = DQN(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_cartpole_vector_dqn.py
import gym # import the skrl components to build the RL system from skrl.agents.torch.dqn import DQN, DQN_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed from skrl.utils.model_instantiators.torch import Shape, deterministic_model # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("CartPole-v0", num_envs=5, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=5, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=200000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators) using the model instantiator utility. # DQN requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#models models = {} models["q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) models["target_q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#configuration-and-hyperparameters cfg = DQN_DEFAULT_CONFIG.copy() cfg["learning_starts"] = 100 cfg["exploration"]["final_epsilon"] = 0.04 cfg["exploration"]["timesteps"] = 1500 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/CartPole" agent = DQN(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_pendulum_td3.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, Model from skrl.resources.noises.jax import GaussianNoise from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(400)(inputs["states"])) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(self.num_actions)(x) # Pendulum-v1 action_space is -2 to 2 return 2 * nn.tanh(x), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(400)(x)) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 1000 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/jax/Pendulum" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_pendulum_ppo.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.relu(nn.Dense(64)(inputs["states"])) x = nn.relu(nn.Dense(64)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) # Pendulum-v1 action_space is -2 to 2 return 2 * nn.tanh(x), log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.relu(nn.Dense(64)(inputs["states"])) x = nn.relu(nn.Dense(64)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("Pendulum-v1", num_envs=4, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["grad_norm_clip"] = 0.5 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/jax/Pendulum" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/jax_gym_pendulum_sac.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Actor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.relu(nn.Dense(400)(inputs["states"])) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) # Pendulum-v1 action_space is -2 to 2 return 2 * nn.tanh(x), log_std, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(400)(x)) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 cfg["learn_entropy"] = True # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/jax/Pendulum" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_trpo_gru.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO_DEFAULT_CONFIG from skrl.agents.torch.trpo import TRPO_RNN as TRPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [hidden_states]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_td3_rnn.py
import gym import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3_RNN as TD3 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixin class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role == "target_policy" else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(rnn_output)) x = F.relu(self.linear_layer_2(x)) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.action_layer(x)), {"rnn": [hidden_states]} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=400, sequence_length=20): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.linear_layer_1 = nn.Linear(self.hidden_size + self.num_actions, 400) self.linear_layer_2 = nn.Linear(400, 300) self.linear_layer_3 = nn.Linear(300, 1) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # critic is only used during training rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence sequence_index = 1 if role in ["target_critic_1", "target_critic_2"] else 0 # target networks act on the next state of the environment hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = F.relu(self.linear_layer_1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))) x = F.relu(self.linear_layer_2(x)) return self.linear_layer_3(x), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.make("PendulumNoVel-v1") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=20000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_policy"] = Actor(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device, num_envs=env.num_envs) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["discount_factor"] = 0.98 cfg["batch_size"] = 100 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 1000 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulum_trpo.py
import gym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.vector.make("Pendulum-v1", num_envs=4, asynchronous=False) except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.vector.make(env_id, num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/Pendulum" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/bidexhands/torch_bidexhands_shadow_hand_over_mappo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.envs.loaders.torch import load_bidexhands_env from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.multi_agents.torch.mappo import MAPPO, MAPPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the environment env = load_bidexhands_env(task_name="ShadowHandOver") env = wrap_env(env, wrapper="bidexhands") device = env.device # instantiate memories as rollout buffer (any memory can be used for this) memories = {} for agent_name in env.possible_agents: memories[agent_name] = RandomMemory(memory_size=24, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # MAPPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/multi_agents/mappo.html#models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = Policy(env.observation_space(agent_name), env.action_space(agent_name), device) models[agent_name]["value"] = Value(env.shared_observation_space(agent_name), env.action_space(agent_name), device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/multi_agents/mappo.html#configuration-and-hyperparameters cfg = MAPPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 24 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 6 # 24 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.001 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": next(iter(env.observation_spaces.values())), "device": device} cfg["shared_state_preprocessor"] = RunningStandardScaler cfg["shared_state_preprocessor_kwargs"] = { "size": next(iter(env.shared_observation_spaces.values())), "device": device } cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 180 cfg["experiment"]["checkpoint_interval"] = 1800 cfg["experiment"]["directory"] = "runs/torch/ShadowHandOver" agent = MAPPO(possible_agents=env.possible_agents, models=models, memories=memories, cfg=cfg, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=device, shared_observation_spaces=env.shared_observation_spaces) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 36000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/bidexhands/jax_bidexhands_shadow_hand_over_mappo.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.envs.loaders.jax import load_bidexhands_env from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.multi_agents.jax.mappo import MAPPO, MAPPO_DEFAULT_CONFIG from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(512)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(512)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the environment env = load_bidexhands_env(task_name="ShadowHandOver") env = wrap_env(env, wrapper="bidexhands") device = env.device # instantiate memories as rollout buffer (any memory can be used for this) memories = {} for agent_name in env.possible_agents: memories[agent_name] = RandomMemory(memory_size=24, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # IPPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/multi_agents/ippo.html#models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = Policy(env.observation_space(agent_name), env.action_space(agent_name), device) models[agent_name]["value"] = Value(env.shared_observation_space(agent_name), env.action_space(agent_name), device) # instantiate models' state dict for agent_name in env.possible_agents: for role, model in models[agent_name].items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/multi_agents/ippo.html#configuration-and-hyperparameters cfg = MAPPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 24 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 6 # 24 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.001 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": next(iter(env.observation_spaces.values())), "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 180 cfg["experiment"]["checkpoint_interval"] = 1800 cfg["experiment"]["directory"] = "runs/jax/ShadowHandOver" agent = MAPPO(possible_agents=env.possible_agents, models=models, memories=memories, cfg=cfg, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=device, shared_observation_spaces=env.shared_observation_spaces) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 36000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/bidexhands/torch_bidexhands_shadow_hand_over_ippo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.envs.loaders.torch import load_bidexhands_env from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.multi_agents.torch.ippo import IPPO, IPPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the environment env = load_bidexhands_env(task_name="ShadowHandOver") env = wrap_env(env, wrapper="bidexhands") device = env.device # instantiate memories as rollout buffer (any memory can be used for this) memories = {} for agent_name in env.possible_agents: memories[agent_name] = RandomMemory(memory_size=24, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # IPPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/multi_agents/ippo.html#models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = Policy(env.observation_space(agent_name), env.action_space(agent_name), device) models[agent_name]["value"] = Value(env.observation_space(agent_name), env.action_space(agent_name), device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/multi_agents/ippo.html#configuration-and-hyperparameters cfg = IPPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 24 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 6 # 24 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.001 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": next(iter(env.observation_spaces.values())), "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 180 cfg["experiment"]["checkpoint_interval"] = 1800 cfg["experiment"]["directory"] = "runs/torch/ShadowHandOver" agent = IPPO(possible_agents=env.possible_agents, models=models, memories=memories, cfg=cfg, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 36000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/bidexhands/jax_bidexhands_shadow_hand_over_ippo.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.envs.loaders.jax import load_bidexhands_env from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.multi_agents.jax.ippo import IPPO, IPPO_DEFAULT_CONFIG from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(512)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(512)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the environment env = load_bidexhands_env(task_name="ShadowHandOver") env = wrap_env(env, wrapper="bidexhands") device = env.device # instantiate memories as rollout buffer (any memory can be used for this) memories = {} for agent_name in env.possible_agents: memories[agent_name] = RandomMemory(memory_size=24, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # IPPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/multi_agents/ippo.html#models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = Policy(env.observation_space(agent_name), env.action_space(agent_name), device) models[agent_name]["value"] = Value(env.observation_space(agent_name), env.action_space(agent_name), device) # instantiate models' state dict for agent_name in env.possible_agents: for role, model in models[agent_name].items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/multi_agents/ippo.html#configuration-and-hyperparameters cfg = IPPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 24 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 6 # 24 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.001 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": next(iter(env.observation_spaces.values())), "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 180 cfg["experiment"]["checkpoint_interval"] = 1800 cfg["experiment"]["directory"] = "runs/jax/ShadowHandOver" agent = IPPO(possible_agents=env.possible_agents, models=models, memories=memories, cfg=cfg, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 36000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ingenuity_ppo.py
import isaacgym import isaacgymenvs import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility seed = set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU()) self.mean_layer = nn.Linear(128, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(128, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment using the easy-to-use API from NVIDIA env = isaacgymenvs.make(seed=seed, task="Ingenuity", num_envs=4096, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=True) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 8 cfg["mini_batches"] = 4 # 16 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 40 cfg["experiment"]["checkpoint_interval"] = 400 cfg["experiment"]["directory"] = "runs/torch/Ingenuity" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 8000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-Ingenuity-PPO", filename="agent.pt") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_factory_task_nut_bolt_pick_ppo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 64), nn.ELU()) self.mean_layer = nn.Linear(64, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(64, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="FactoryTaskNutBoltPick") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=120, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 120 # memory_size cfg["learning_epochs"] = 8 cfg["mini_batches"] = 30 # 120 * 128 / 512 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-4 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0.016 cfg["rewards_shaper"] = None cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 614 cfg["experiment"]["checkpoint_interval"] = 6144 cfg["experiment"]["directory"] = "runs/torch/FactoryTaskNutBoltPick" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 122880, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-FactoryTaskNutBoltPick-PPO", filename="agent.pt") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/trpo_cartpole.py
import isaacgym import torch import torch.nn as nn # Import the skrl components to build the RL system from skrl.models.torch import Model, GaussianMixin, DeterministicMixin from skrl.memories.torch import RandomMemory from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.envs.torch import wrap_env from skrl.envs.torch import load_isaacgym_env_preview4 from skrl.utils import set_seed # set the seed for reproducibility set_seed(42) # Define the models (stochastic and deterministic models) for the agent using mixins. # - Policy: takes as input the environment's observation/state and returns an action # - Value: takes the state as input and provides a value to guide the policy class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 32), nn.ELU(), nn.Linear(32, 32), nn.ELU(), nn.Linear(32, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 32), nn.ELU(), nn.Linear(32, 32), nn.ELU(), nn.Linear(32, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # Load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Cartpole") # preview 3 and 4 use the same loader env = wrap_env(env) device = env.device # Instantiate a RandomMemory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # Instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.trpo.html#spaces-and-models models_trpo = {} models_trpo["policy"] = Policy(env.observation_space, env.action_space, device) models_trpo["value"] = Value(env.observation_space, env.action_space, device) # Configure and instantiate the agent. # Only modify some of the default configuration, visit its documentation to see all the options # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.trpo.html#configuration-and-hyperparameters cfg_trpo = TRPO_DEFAULT_CONFIG.copy() cfg_trpo["rollouts"] = 16 # memory_size cfg_trpo["learning_epochs"] = 8 cfg_trpo["mini_batches"] = 1 cfg_trpo["discount_factor"] = 0.99 cfg_trpo["lambda"] = 0.95 cfg_trpo["learning_rate"] = 3e-4 cfg_trpo["grad_norm_clip"] = 1.0 cfg_trpo["value_loss_scale"] = 2.0 cfg_trpo["state_preprocessor"] = RunningStandardScaler cfg_trpo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg_trpo["value_preprocessor"] = RunningStandardScaler cfg_trpo["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints each 16 and 80 timesteps respectively cfg_trpo["experiment"]["write_interval"] = 16 cfg_trpo["experiment"]["checkpoint_interval"] = 80 agent = TRPO(models=models_trpo, memory=memory, cfg=cfg_trpo, observation_space=env.observation_space, action_space=env.action_space, device=device) # Configure and instantiate the RL trainer cfg_trainer = {"timesteps": 1600, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_ddpg_td3_sac_parallel_unshared_memory.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.noises.torch import GaussianNoise, OrnsteinUhlenbeckNoise from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import ParallelTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} if __name__ == '__main__': # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=192) env = wrap_env(env) device = env.device # instantiate memories as experience replay (unique for each agents). # scopes (192 envs): DDPG 64, TD3 64 and SAC 64 memory_ddpg = RandomMemory(memory_size=15625, num_envs=64, device=device) memory_td3 = RandomMemory(memory_size=15625, num_envs=64, device=device) memory_sac = RandomMemory(memory_size=15625, num_envs=64, device=device) # instantiate the agents' models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models_ddpg = {} models_ddpg["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["critic"] = Critic(env.observation_space, env.action_space, device) models_ddpg["target_critic"] = Critic(env.observation_space, env.action_space, device) # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models_td3 = {} models_td3["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["critic_2"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models_sac = {} models_sac["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models_sac["critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["critic_2"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agents (visit their documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg_ddpg = DDPG_DEFAULT_CONFIG.copy() cfg_ddpg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device) cfg_ddpg["gradient_steps"] = 1 cfg_ddpg["batch_size"] = 4096 cfg_ddpg["discount_factor"] = 0.99 cfg_ddpg["polyak"] = 0.005 cfg_ddpg["actor_learning_rate"] = 5e-4 cfg_ddpg["critic_learning_rate"] = 5e-4 cfg_ddpg["random_timesteps"] = 80 cfg_ddpg["learning_starts"] = 80 cfg_ddpg["state_preprocessor"] = RunningStandardScaler cfg_ddpg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_ddpg["experiment"]["write_interval"] = 800 cfg_ddpg["experiment"]["checkpoint_interval"] = 8000 cfg_ddpg["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg_td3 = TD3_DEFAULT_CONFIG.copy() cfg_td3["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg_td3["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg_td3["smooth_regularization_clip"] = 0.5 cfg_td3["gradient_steps"] = 1 cfg_td3["batch_size"] = 4096 cfg_td3["discount_factor"] = 0.99 cfg_td3["polyak"] = 0.005 cfg_td3["actor_learning_rate"] = 5e-4 cfg_td3["critic_learning_rate"] = 5e-4 cfg_td3["random_timesteps"] = 80 cfg_td3["learning_starts"] = 80 cfg_td3["state_preprocessor"] = RunningStandardScaler cfg_td3["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_td3["experiment"]["write_interval"] = 800 cfg_td3["experiment"]["checkpoint_interval"] = 8000 cfg_td3["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg_sac = SAC_DEFAULT_CONFIG.copy() cfg_sac["gradient_steps"] = 1 cfg_sac["batch_size"] = 4096 cfg_sac["discount_factor"] = 0.99 cfg_sac["polyak"] = 0.005 cfg_sac["actor_learning_rate"] = 5e-4 cfg_sac["critic_learning_rate"] = 5e-4 cfg_sac["random_timesteps"] = 80 cfg_sac["learning_starts"] = 80 cfg_sac["grad_norm_clip"] = 0 cfg_sac["learn_entropy"] = True cfg_sac["entropy_learning_rate"] = 5e-3 cfg_sac["initial_entropy_value"] = 1.0 cfg_sac["state_preprocessor"] = RunningStandardScaler cfg_sac["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_sac["experiment"]["write_interval"] = 800 cfg_sac["experiment"]["checkpoint_interval"] = 8000 cfg_sac["experiment"]["directory"] = "runs/torch/Ant" agent_ddpg = DDPG(models=models_ddpg, memory=memory_ddpg, cfg=cfg_ddpg, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_td3 = TD3(models=models_td3, memory=memory_td3, cfg=cfg_td3, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_sac = SAC(models=models_sac, memory=memory_sac, cfg=cfg_sac, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer and define the agent scopes cfg_trainer = {"timesteps": 160000, "headless": True} trainer = ParallelTrainer(cfg=cfg_trainer, env=env, agents=[agent_ddpg, agent_td3, agent_sac], agents_scope=[64, 64, 64]) # scopes (192 envs): DDPG 64, TD3 64 and SAC 64 # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_td3.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixins class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["gradient_steps"] = 1 cfg["batch_size"] = 4096 cfg["discount_factor"] = 0.99 cfg["polyak"] = 0.005 cfg["actor_learning_rate"] = 5e-4 cfg["critic_learning_rate"] = 5e-4 cfg["random_timesteps"] = 80 cfg["learning_starts"] = 80 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Ant" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/jax_cartpole_ppo.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_isaacgym_env_preview4 from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(32)(inputs["states"])) x = nn.elu(nn.Dense(32)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(32)(inputs["states"])) x = nn.elu(nn.Dense(32)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Cartpole") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 8 cfg["mini_batches"] = 1 # 16 * 512 / 8192 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.1 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 16 cfg["experiment"]["checkpoint_interval"] = 80 cfg["experiment"]["directory"] = "runs/jax/Cartpole" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 1600, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-Cartpole-PPO", filename="agent.pickle") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/jax_shadow_hand_ppo.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_isaacgym_env_preview4 from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(512)(inputs["states"])) x = nn.elu(nn.Dense(512)(x)) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(512)(inputs["states"])) x = nn.elu(nn.Dense(512)(x)) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="ShadowHand") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=8, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 8 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 4 # 8 * 16384 / 32768 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 5e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 200 cfg["experiment"]["checkpoint_interval"] = 2000 cfg["experiment"]["directory"] = "runs/jax/ShadowHand" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 40000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/jax_factory_task_nut_bolt_place_ppo.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_isaacgym_env_preview4 from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(128)(x)) x = nn.elu(nn.Dense(64)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(128)(x)) x = nn.elu(nn.Dense(64)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="FactoryTaskNutBoltPlace") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=120, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 120 # memory_size cfg["learning_epochs"] = 8 cfg["mini_batches"] = 30 # 120 * 128 / 512 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-4 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0.016 cfg["rewards_shaper"] = None cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 614 cfg["experiment"]["checkpoint_interval"] = 6144 cfg["experiment"]["directory"] = "runs/jax/FactoryTaskNutBoltPlace" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 122880, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-FactoryTaskNutBoltPlace-PPO", filename="agent.pickle") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_allegro_hand_ppo.py
import isaacgym import isaacgymenvs import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility seed = set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU()) self.mean_layer = nn.Linear(128, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(128, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment using the easy-to-use API from NVIDIA env = isaacgymenvs.make(seed=seed, task="AllegroHand", num_envs=16384, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=True) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=8, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 8 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 4 # 8 * 16384 / 32768 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 5e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 200 cfg["experiment"]["checkpoint_interval"] = 2000 cfg["experiment"]["directory"] = "runs/torch/AllegroHand" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 40000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_humanoid_ppo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 400), nn.ELU(), nn.Linear(400, 200), nn.ELU(), nn.Linear(200, 100), nn.ELU()) self.mean_layer = nn.Linear(100, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(100, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Humanoid") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=32, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 32 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 4 # 32 * 4096 / 32768 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 5e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 160 cfg["experiment"]["checkpoint_interval"] = 1600 cfg["experiment"]["directory"] = "runs/torch/Humanoid" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 32000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-Humanoid-PPO", filename="agent.pt") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/jax_ant_sac.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_isaacgym_env_preview4 from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.relu(nn.Dense(512)(inputs["states"])) x = nn.relu(nn.Dense(256)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return nn.tanh(x), log_std, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(512)(x)) x = nn.relu(nn.Dense(256)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["gradient_steps"] = 1 cfg["batch_size"] = 4096 cfg["discount_factor"] = 0.99 cfg["polyak"] = 0.005 cfg["actor_learning_rate"] = 5e-4 cfg["critic_learning_rate"] = 5e-4 cfg["random_timesteps"] = 80 cfg["learning_starts"] = 80 cfg["grad_norm_clip"] = 0 cfg["learn_entropy"] = True cfg["entropy_learning_rate"] = 5e-3 cfg["initial_entropy_value"] = 1.0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/jax/Ant" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/jax_ingenuity_ppo.py
import isaacgym import isaacgymenvs import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility seed = set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment using the easy-to-use API from NVIDIA env = isaacgymenvs.make(seed=seed, task="Ingenuity", num_envs=4096, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=True) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 8 cfg["mini_batches"] = 4 # 16 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 40 cfg["experiment"]["checkpoint_interval"] = 400 cfg["experiment"]["directory"] = "runs/jax/Ingenuity" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 8000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-Ingenuity-PPO", filename="agent.pickle") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_franka_cube_stack_ppo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 64), nn.ELU()) self.mean_layer = nn.Linear(64, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(64, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="FrankaCubeStack") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=32, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 32 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 16 # 32 * 8192 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 5e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = None cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1600 cfg["experiment"]["checkpoint_interval"] = 16000 cfg["experiment"]["directory"] = "runs/torch/FrankaCubeStack" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 320000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_sac.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["gradient_steps"] = 1 cfg["batch_size"] = 4096 cfg["discount_factor"] = 0.99 cfg["polyak"] = 0.005 cfg["actor_learning_rate"] = 5e-4 cfg["critic_learning_rate"] = 5e-4 cfg["random_timesteps"] = 80 cfg["learning_starts"] = 80 cfg["grad_norm_clip"] = 0 cfg["learn_entropy"] = True cfg["entropy_learning_rate"] = 5e-3 cfg["initial_entropy_value"] = 1.0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Ant" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_ppo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 64), nn.ELU()) self.mean_layer = nn.Linear(64, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(64, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 4 cfg["mini_batches"] = 2 # 16 * 4096 / 32768 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 40 cfg["experiment"]["checkpoint_interval"] = 400 cfg["experiment"]["directory"] = "runs/torch/Ant" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 8000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-Ant-PPO", filename="agent.pt") # agent.load(path) # # start evaluation # trainer.eval()
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_ddpg_td3_sac_sequential_shared_memory.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.noises.torch import GaussianNoise, OrnsteinUhlenbeckNoise from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay (unique to all agents) memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agents' models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models_ddpg = {} models_ddpg["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["critic"] = Critic(env.observation_space, env.action_space, device) models_ddpg["target_critic"] = Critic(env.observation_space, env.action_space, device) # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models_td3 = {} models_td3["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["critic_2"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models_sac = {} models_sac["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models_sac["critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["critic_2"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agents (visit their documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg_ddpg = DDPG_DEFAULT_CONFIG.copy() cfg_ddpg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device) cfg_ddpg["gradient_steps"] = 1 cfg_ddpg["batch_size"] = 4096 cfg_ddpg["discount_factor"] = 0.99 cfg_ddpg["polyak"] = 0.005 cfg_ddpg["actor_learning_rate"] = 5e-4 cfg_ddpg["critic_learning_rate"] = 5e-4 cfg_ddpg["random_timesteps"] = 80 cfg_ddpg["learning_starts"] = 80 cfg_ddpg["state_preprocessor"] = RunningStandardScaler cfg_ddpg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_ddpg["experiment"]["write_interval"] = 800 cfg_ddpg["experiment"]["checkpoint_interval"] = 8000 cfg_ddpg["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg_td3 = TD3_DEFAULT_CONFIG.copy() cfg_td3["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg_td3["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg_td3["smooth_regularization_clip"] = 0.5 cfg_td3["gradient_steps"] = 1 cfg_td3["batch_size"] = 4096 cfg_td3["discount_factor"] = 0.99 cfg_td3["polyak"] = 0.005 cfg_td3["actor_learning_rate"] = 5e-4 cfg_td3["critic_learning_rate"] = 5e-4 cfg_td3["random_timesteps"] = 80 cfg_td3["learning_starts"] = 80 cfg_td3["state_preprocessor"] = RunningStandardScaler cfg_td3["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_td3["experiment"]["write_interval"] = 800 cfg_td3["experiment"]["checkpoint_interval"] = 8000 cfg_td3["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg_sac = SAC_DEFAULT_CONFIG.copy() cfg_sac["gradient_steps"] = 1 cfg_sac["batch_size"] = 4096 cfg_sac["discount_factor"] = 0.99 cfg_sac["polyak"] = 0.005 cfg_sac["actor_learning_rate"] = 5e-4 cfg_sac["critic_learning_rate"] = 5e-4 cfg_sac["random_timesteps"] = 80 cfg_sac["learning_starts"] = 80 cfg_sac["grad_norm_clip"] = 0 cfg_sac["learn_entropy"] = True cfg_sac["entropy_learning_rate"] = 5e-3 cfg_sac["initial_entropy_value"] = 1.0 cfg_sac["state_preprocessor"] = RunningStandardScaler cfg_sac["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_sac["experiment"]["write_interval"] = 800 cfg_sac["experiment"]["checkpoint_interval"] = 8000 cfg_sac["experiment"]["directory"] = "runs/torch/Ant" agent_ddpg = DDPG(models=models_ddpg, memory=memory, # shared memory cfg=cfg_ddpg, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_td3 = TD3(models=models_td3, memory=memory, # shared memory cfg=cfg_td3, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_sac = SAC(models=models_sac, memory=memory, # shared memory cfg=cfg_sac, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent_ddpg, agent_td3, agent_sac], agents_scope=[]) # start training trainer.train()
Toni-SM/skrl/docs/source/examples/isaacgym/jax_trifinger_ppo.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_isaacgym_env_preview4 from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(256)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.elu(nn.Dense(128)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Trifinger") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=8, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 8 # memory_size cfg["learning_epochs"] = 4 cfg["mini_batches"] = 8 # 8 * 16384 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.0 cfg["kl_threshold"] = 0.016 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/jax/Trifinger" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()