file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_humanoid_ppo.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define shared model (stochastic and deterministic models) using mixins
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-5, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 400),
nn.ELU(),
nn.Linear(400, 200),
nn.ELU(),
nn.Linear(200, 100),
nn.ELU())
self.mean_layer = nn.Linear(100, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
self.value_layer = nn.Linear(100, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return torch.tanh(self.mean_layer(self.net(inputs["states"]))), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Humanoid-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=32, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Shared(env.observation_space, env.action_space, device)
models["value"] = models["policy"] # same instance: shared model
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 32 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 8 # 32 * 1024 / 4096
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 4.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = lambda rewards, *args, **kwargs: rewards * 0.01
cfg["time_limit_bootstrap"] = False
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 80
cfg["experiment"]["checkpoint_interval"] = 800
cfg["experiment"]["directory"] = "runs/torch/Isaac-Humanoid-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 16000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Humanoid-v0-PPO", filename="agent.pt")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 5,082 | Python | 37.801526 | 102 | 0.668634 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_ant_sac.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.sac import SAC, SAC_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class StochasticActor(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False,
clip_log_std=True, min_log_std=-5, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.relu(nn.Dense(512)(inputs["states"]))
x = nn.relu(nn.Dense(256)(x))
x = nn.Dense(self.num_actions)(x)
log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions))
return nn.tanh(x), log_std, {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)
x = nn.relu(nn.Dense(512)(x))
x = nn.relu(nn.Dense(256)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0", num_envs=64)
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# SAC requires 5 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models
models = {}
models["policy"] = StochasticActor(env.observation_space, env.action_space, device)
models["critic_1"] = Critic(env.observation_space, env.action_space, device)
models["critic_2"] = Critic(env.observation_space, env.action_space, device)
models["target_critic_1"] = Critic(env.observation_space, env.action_space, device)
models["target_critic_2"] = Critic(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters
cfg = SAC_DEFAULT_CONFIG.copy()
cfg["gradient_steps"] = 1
cfg["batch_size"] = 4096
cfg["discount_factor"] = 0.99
cfg["polyak"] = 0.005
cfg["actor_learning_rate"] = 5e-4
cfg["critic_learning_rate"] = 5e-4
cfg["random_timesteps"] = 80
cfg["learning_starts"] = 80
cfg["grad_norm_clip"] = 0
cfg["learn_entropy"] = True
cfg["entropy_learning_rate"] = 5e-3
cfg["initial_entropy_value"] = 1.0
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 800
cfg["experiment"]["checkpoint_interval"] = 8000
cfg["experiment"]["directory"] = "runs/jax/Isaac-Ant-v0"
agent = SAC(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 160000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,362 | Python | 38.725926 | 102 | 0.705334 |
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_ant_sac.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class StochasticActor(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-5, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 1))
def compute(self, inputs, role):
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0", num_envs=64)
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# SAC requires 5 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models
models = {}
models["policy"] = StochasticActor(env.observation_space, env.action_space, device)
models["critic_1"] = Critic(env.observation_space, env.action_space, device)
models["critic_2"] = Critic(env.observation_space, env.action_space, device)
models["target_critic_1"] = Critic(env.observation_space, env.action_space, device)
models["target_critic_2"] = Critic(env.observation_space, env.action_space, device)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters
cfg = SAC_DEFAULT_CONFIG.copy()
cfg["gradient_steps"] = 1
cfg["batch_size"] = 4096
cfg["discount_factor"] = 0.99
cfg["polyak"] = 0.005
cfg["actor_learning_rate"] = 5e-4
cfg["critic_learning_rate"] = 5e-4
cfg["random_timesteps"] = 80
cfg["learning_starts"] = 80
cfg["grad_norm_clip"] = 0
cfg["learn_entropy"] = True
cfg["entropy_learning_rate"] = 5e-3
cfg["initial_entropy_value"] = 1.0
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 800
cfg["experiment"]["checkpoint_interval"] = 8000
cfg["experiment"]["directory"] = "runs/torch/Isaac-Ant-v0"
agent = SAC(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 160000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 4,436 | Python | 39.336363 | 93 | 0.672904 |
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_ant_ppo.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define shared model (stochastic and deterministic models) using mixins
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU())
self.mean_layer = nn.Linear(64, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
self.value_layer = nn.Linear(64, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Shared(env.observation_space, env.action_space, device)
models["value"] = models["policy"] # same instance: shared model
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 16 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 4 # 16 * 1024 / 4096
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 1.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = lambda rewards, *args, **kwargs: rewards * 0.1
cfg["time_limit_bootstrap"] = True
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 40
cfg["experiment"]["checkpoint_interval"] = 400
cfg["experiment"]["directory"] = "runs/torch/Isaac-Ant-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 8000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Ant-v0-PPO", filename="agent.pt")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 5,051 | Python | 37.564885 | 101 | 0.667195 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_lift_franka_ppo.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.resources.schedulers.jax import KLAdaptiveLR
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(256)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(64)(x))
x = nn.Dense(self.num_actions)(x)
log_std = self.param("log_std", lambda _: jnp.ones(self.num_actions))
return x, log_std, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(256)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(64)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Lift-Franka-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=96, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Policy(env.observation_space, env.action_space, device)
models["value"] = Value(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 96 # memory_size
cfg["learning_epochs"] = 5
cfg["mini_batches"] = 4 # 96 * 4096 / 98304
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 1e-3
cfg["learning_rate_scheduler"] = KLAdaptiveLR
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01, "min_lr": 1e-5}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.01
cfg["value_loss_scale"] = 1.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = None
cfg["time_limit_bootstrap"] = True
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 336
cfg["experiment"]["checkpoint_interval"] = 3360
cfg["experiment"]["directory"] = "runs/jax/Isaac-Lift-Franka-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 67200, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Lift-Franka-v0-PPO", filename="agent.pickle")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 6,089 | Python | 37.789809 | 109 | 0.688783 |
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_lift_franka_ppo.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.resources.schedulers.torch import KLAdaptiveLR
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define shared model (stochastic and deterministic models) using mixins
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU())
self.mean_layer = nn.Linear(64, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.ones(self.num_actions))
self.value_layer = nn.Linear(64, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Lift-Franka-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=96, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Shared(env.observation_space, env.action_space, device)
models["value"] = models["policy"] # same instance: shared model
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 96 # memory_size
cfg["learning_epochs"] = 5
cfg["mini_batches"] = 4 # 96 * 4096 / 98304
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 1e-3
cfg["learning_rate_scheduler"] = KLAdaptiveLR
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01, "min_lr": 1e-5}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.01
cfg["value_loss_scale"] = 1.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = None
cfg["time_limit_bootstrap"] = True
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 336
cfg["experiment"]["checkpoint_interval"] = 3360
cfg["experiment"]["directory"] = "runs/torch/Isaac-Lift-Franka-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 67200, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Lift-Franka-v0-PPO", filename="agent.pt")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 5,052 | Python | 37.572519 | 105 | 0.667854 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_ant_ddpg.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.ddpg import DDPG, DDPG_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, Model
from skrl.resources.noises.jax import OrnsteinUhlenbeckNoise
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (deterministic models) using mixins
class DeterministicActor(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.relu(nn.Dense(512)(inputs["states"]))
x = nn.relu(nn.Dense(256)(x))
x = nn.Dense(self.num_actions)(x)
return nn.tanh(x), {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)
x = nn.relu(nn.Dense(512)(x))
x = nn.relu(nn.Dense(256)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0", num_envs=64)
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# DDPG requires 4 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models
models = {}
models["policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["critic"] = Critic(env.observation_space, env.action_space, device)
models["target_critic"] = Critic(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters
cfg = DDPG_DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device)
cfg["gradient_steps"] = 1
cfg["batch_size"] = 4096
cfg["discount_factor"] = 0.99
cfg["polyak"] = 0.005
cfg["actor_learning_rate"] = 5e-4
cfg["critic_learning_rate"] = 5e-4
cfg["random_timesteps"] = 80
cfg["learning_starts"] = 80
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 800
cfg["experiment"]["checkpoint_interval"] = 8000
cfg["experiment"]["directory"] = "runs/jax/Isaac-Ant-v0"
agent = DDPG(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 160000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,110 | Python | 38.315384 | 106 | 0.71272 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_reach_franka_ppo.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.resources.schedulers.jax import KLAdaptiveRL
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(256)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(64)(x))
x = nn.Dense(self.num_actions)(x)
log_std = self.param("log_std", lambda _: 0.5 * jnp.ones(self.num_actions))
return nn.tanh(x), log_std, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(256)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(64)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Reach-Franka-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Policy(env.observation_space, env.action_space, device)
models["value"] = Value(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 16 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 8 # 16 * 2048 / 4096
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 2.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = None
cfg["time_limit_bootstrap"] = False
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 80
cfg["experiment"]["checkpoint_interval"] = 800
cfg["experiment"]["directory"] = "runs/jax/Isaac-Reach-Franka-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 16000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Reach-Franka-v0-PPO", filename="agent.pickle")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 6,088 | Python | 37.783439 | 110 | 0.688896 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_humanoid_ppo.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.resources.schedulers.jax import KLAdaptiveRL
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False,
clip_log_std=True, min_log_std=-5, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(400)(inputs["states"]))
x = nn.elu(nn.Dense(200)(x))
x = nn.elu(nn.Dense(100)(x))
x = nn.Dense(self.num_actions)(x)
log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions))
return nn.tanh(x), log_std, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(400)(inputs["states"]))
x = nn.elu(nn.Dense(200)(x))
x = nn.elu(nn.Dense(100)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Humanoid-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=32, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Policy(env.observation_space, env.action_space, device)
models["value"] = Value(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 32 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 8 # 32 * 1024 / 4096
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 4.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = lambda rewards, *args, **kwargs: rewards * 0.01
cfg["time_limit_bootstrap"] = False
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 80
cfg["experiment"]["checkpoint_interval"] = 800
cfg["experiment"]["directory"] = "runs/jax/Isaac-Humanoid-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 16000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Humanoid-v0-PPO", filename="agent.pickle")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 6,115 | Python | 37.955414 | 106 | 0.689125 |
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_reach_franka_ppo.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define shared model (stochastic and deterministic models) using mixins
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU())
self.mean_layer = nn.Linear(64, self.num_actions)
self.log_std_parameter = nn.Parameter(0.5 * torch.ones(self.num_actions))
self.value_layer = nn.Linear(64, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return torch.tanh(self.mean_layer(self.net(inputs["states"]))), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Reach-Franka-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Shared(env.observation_space, env.action_space, device)
models["value"] = models["policy"] # same instance: shared model
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 16 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 8 # 16 * 2048 / 4096
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 2.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = None
cfg["time_limit_bootstrap"] = False
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 80
cfg["experiment"]["checkpoint_interval"] = 800
cfg["experiment"]["directory"] = "runs/torch/Isaac-Reach-Franka-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 16000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Reach-Franka-v0-PPO", filename="agent.pt")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 5,054 | Python | 37.587786 | 106 | 0.668184 |
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_ant_ddpg.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, Model
from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (deterministic models) using mixins
class DeterministicActor(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, self.num_actions),
nn.Tanh())
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 1))
def compute(self, inputs, role):
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0", num_envs=64)
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# DDPG requires 4 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models
models = {}
models["policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["critic"] = Critic(env.observation_space, env.action_space, device)
models["target_critic"] = Critic(env.observation_space, env.action_space, device)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters
cfg = DDPG_DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device)
cfg["gradient_steps"] = 1
cfg["batch_size"] = 4096
cfg["discount_factor"] = 0.99
cfg["polyak"] = 0.005
cfg["actor_learning_rate"] = 5e-4
cfg["critic_learning_rate"] = 5e-4
cfg["random_timesteps"] = 80
cfg["learning_starts"] = 80
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 800
cfg["experiment"]["checkpoint_interval"] = 8000
cfg["experiment"]["directory"] = "runs/torch/Isaac-Ant-v0"
agent = DDPG(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 160000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 4,201 | Python | 39.019047 | 106 | 0.67841 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_ant_ppo.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.resources.schedulers.jax import KLAdaptiveRL
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(256)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(64)(x))
x = nn.Dense(self.num_actions)(x)
log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions))
return x, log_std, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(256)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(64)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Policy(env.observation_space, env.action_space, device)
models["value"] = Value(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 16 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 4 # 16 * 1024 / 4096
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 1.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = lambda rewards, *args, **kwargs: rewards * 0.1
cfg["time_limit_bootstrap"] = True
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 40
cfg["experiment"]["checkpoint_interval"] = 400
cfg["experiment"]["directory"] = "runs/jax/Isaac-Ant-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 8000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Ant-v0-PPO", filename="agent.pickle")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 6,088 | Python | 37.783439 | 102 | 0.688239 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_ant_td3.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.td3 import TD3, TD3_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, Model
from skrl.resources.noises.jax import GaussianNoise
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (deterministic models) using mixins
class DeterministicActor(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.relu(nn.Dense(512)(inputs["states"]))
x = nn.relu(nn.Dense(256)(x))
x = nn.Dense(self.num_actions)(x)
return nn.tanh(x), {}
class Critic(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)
x = nn.relu(nn.Dense(512)(x))
x = nn.relu(nn.Dense(256)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Ant-v0", num_envs=64)
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# TD3 requires 6 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models
models = {}
models["policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device)
models["critic_1"] = Critic(env.observation_space, env.action_space, device)
models["critic_2"] = Critic(env.observation_space, env.action_space, device)
models["target_critic_1"] = Critic(env.observation_space, env.action_space, device)
models["target_critic_2"] = Critic(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters
cfg = TD3_DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device)
cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.1, device=device)
cfg["smooth_regularization_clip"] = 0.5
cfg["gradient_steps"] = 1
cfg["batch_size"] = 4096
cfg["discount_factor"] = 0.99
cfg["polyak"] = 0.005
cfg["actor_learning_rate"] = 5e-4
cfg["critic_learning_rate"] = 5e-4
cfg["random_timesteps"] = 80
cfg["learning_starts"] = 80
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 800
cfg["experiment"]["checkpoint_interval"] = 8000
cfg["experiment"]["directory"] = "runs/jax/Isaac-Ant-v0"
agent = TD3(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 160000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,327 | Python | 38.761194 | 102 | 0.713723 |
Toni-SM/skrl/docs/source/examples/isaacorbit/torch_cartpole_ppo.py | import torch
import torch.nn as nn
# import the skrl components to build the RL system
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.torch import load_isaac_orbit_env
from skrl.envs.wrappers.torch import wrap_env
from skrl.memories.torch import RandomMemory
from skrl.models.torch import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.trainers.torch import SequentialTrainer
from skrl.utils import set_seed
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define shared model (stochastic and deterministic models) using mixins
class Shared(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
nn.ELU(),
nn.Linear(32, 32),
nn.ELU())
self.mean_layer = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.ones(self.num_actions))
self.value_layer = nn.Linear(32, 1)
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Shared(env.observation_space, env.action_space, device)
models["value"] = models["policy"] # same instance: shared model
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 16 # memory_size
cfg["learning_epochs"] = 8
cfg["mini_batches"] = 1 # 16 * 512 / 8192
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 3e-4
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 2.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = None
cfg["time_limit_bootstrap"] = True
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 16
cfg["experiment"]["checkpoint_interval"] = 80
cfg["experiment"]["directory"] = "runs/torch/Isaac-Cartpole-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1600, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
# # ---------------------------------------------------------
# # comment the code above: `trainer.train()`, and...
# # uncomment the following lines to evaluate a trained agent
# # ---------------------------------------------------------
# from skrl.utils.huggingface import download_model_from_huggingface
# # download the trained agent's checkpoint from Hugging Face Hub and load it
# path = download_model_from_huggingface("skrl/IsaacOrbit-Isaac-Cartpole-v0-PPO", filename="agent.pt")
# agent.load(path)
# # start evaluation
# trainer.eval()
| 4,922 | Python | 37.16279 | 102 | 0.677164 |
Toni-SM/skrl/docs/source/examples/isaacorbit/jax_velocity_anymal_c_ppo.py | """
Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment):
* Python 3.7 is only supported up to jax<=0.3.25.
See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022.
* Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions.
See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`.
* The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3.
* Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module".
"""
import flax.linen as nn
import jax
import jax.numpy as jnp
jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier
# import the skrl components to build the RL system
from skrl import config
from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.envs.loaders.jax import load_isaac_orbit_env
from skrl.envs.wrappers.jax import wrap_env
from skrl.memories.jax import RandomMemory
from skrl.models.jax import DeterministicMixin, GaussianMixin, Model
from skrl.resources.preprocessors.jax import RunningStandardScaler
from skrl.resources.schedulers.jax import KLAdaptiveRL
from skrl.trainers.jax import SequentialTrainer
from skrl.utils import set_seed
config.jax.backend = "jax" # or "numpy"
# seed for reproducibility
set_seed() # e.g. `set_seed(42)` for fixed seed
# define models (stochastic and deterministic models) using mixins
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(128)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(128)(x))
x = nn.Dense(self.num_actions)(x)
log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions))
return x, log_std, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def __hash__(self): # for Isaac Sim 2022.2.1 or earlier
return id(self)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.elu(nn.Dense(128)(inputs["states"]))
x = nn.elu(nn.Dense(128)(x))
x = nn.elu(nn.Dense(128)(x))
x = nn.Dense(1)(x)
return x, {}
# load and wrap the Isaac Orbit environment
env = load_isaac_orbit_env(task_name="Isaac-Velocity-Anymal-C-v0")
env = wrap_env(env)
device = env.device
# instantiate a memory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=24, num_envs=env.num_envs, device=device)
# instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models
models = {}
models["policy"] = Policy(env.observation_space, env.action_space, device)
models["value"] = Value(env.observation_space, env.action_space, device)
# instantiate models' state dict
for role, model in models.items():
model.init_state_dict(role)
# configure and instantiate the agent (visit its documentation to see all the options)
# https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters
cfg = PPO_DEFAULT_CONFIG.copy()
cfg["rollouts"] = 24 # memory_size
cfg["learning_epochs"] = 5
cfg["mini_batches"] = 4 # 24 * 4096 / 24576
cfg["discount_factor"] = 0.99
cfg["lambda"] = 0.95
cfg["learning_rate"] = 1e-3
cfg["learning_rate_scheduler"] = KLAdaptiveRL
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
cfg["random_timesteps"] = 0
cfg["learning_starts"] = 0
cfg["grad_norm_clip"] = 1.0
cfg["ratio_clip"] = 0.2
cfg["value_clip"] = 0.2
cfg["clip_predicted_values"] = True
cfg["entropy_loss_scale"] = 0.0
cfg["value_loss_scale"] = 1.0
cfg["kl_threshold"] = 0
cfg["rewards_shaper"] = None
cfg["time_limit_bootstrap"] = False
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints (in timesteps)
cfg["experiment"]["write_interval"] = 60
cfg["experiment"]["checkpoint_interval"] = 600
cfg["experiment"]["directory"] = "runs/jax/Isaac-Velocity-Anymal-C-v0"
agent = PPO(models=models,
memory=memory,
cfg=cfg,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 12000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,528 | Python | 37.664335 | 102 | 0.700253 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_omniverse_isaacgym_env.py | import torch
import numpy as np
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omniisaacgymenvs.robots.articulations.franka import Franka as Robot
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import get_prim_at_path
from skrl.utils import omniverse_isaacgym_utils
# post_physics_step calls
# - get_observations()
# - get_states()
# - calculate_metrics()
# - is_done()
# - get_extras()
TASK_CFG = {"test": False,
"device_id": 0,
"headless": True,
"sim_device": "gpu",
"enable_livestream": False,
"warp": False,
"seed": 42,
"task": {"name": "ReachingFranka",
"physics_engine": "physx",
"env": {"numEnvs": 1024,
"envSpacing": 1.5,
"episodeLength": 100,
"enableDebugVis": False,
"clipObservations": 1000.0,
"clipActions": 1.0,
"controlFrequencyInv": 4,
"actionScale": 2.5,
"dofVelocityScale": 0.1,
"controlSpace": "cartesian"},
"sim": {"dt": 0.0083, # 1 / 120
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"add_ground_plane": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": {"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0},
"physx": {"worker_thread_count": 4,
"solver_type": 1,
"use_gpu": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04,
"friction_correlation_distance": 0.025,
"enable_sleeping": True,
"enable_stabilization": True,
"max_depenetration_velocity": 1000.0,
"gpu_max_rigid_contact_count": 524288,
"gpu_max_rigid_patch_count": 33554432,
"gpu_found_lost_pairs_capacity": 524288,
"gpu_found_lost_aggregate_pairs_capacity": 262144,
"gpu_total_aggregate_pairs_capacity": 1048576,
"gpu_max_soft_body_contacts": 1048576,
"gpu_max_particle_contacts": 1048576,
"gpu_heap_capacity": 33554432,
"gpu_temp_buffer_capacity": 16777216,
"gpu_max_num_partitions": 8},
"robot": {"override_usd_defaults": False,
"fixed_base": False,
"enable_self_collisions": False,
"enable_gyroscopic_forces": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.005,
"stabilization_threshold": 0.001,
"density": -1,
"max_depenetration_velocity": 1000.0,
"contact_offset": 0.005,
"rest_offset": 0.0},
"target": {"override_usd_defaults": False,
"fixed_base": True,
"make_kinematic": True,
"enable_self_collisions": False,
"enable_gyroscopic_forces": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.005,
"stabilization_threshold": 0.001,
"density": -1,
"max_depenetration_velocity": 1000.0,
"contact_offset": 0.005,
"rest_offset": 0.0}}}}
class RobotView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: str = "robot_view") -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
class ReachingFrankaTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.dt = 1 / 120.0
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._action_scale = self._task_cfg["env"]["actionScale"]
self._dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self._control_space = self._task_cfg["env"]["controlSpace"]
# observation and action space
self._num_observations = 18
if self._control_space == "joint":
self._num_actions = 7
elif self._control_space == "cartesian":
self._num_actions = 3
else:
raise ValueError("Invalid control space: {}".format(self._control_space))
self._end_effector_link = "panda_leftfinger"
RLTask.__init__(self, name, env)
def set_up_scene(self, scene) -> None:
self.get_robot()
self.get_target()
super().set_up_scene(scene)
# robot view
self._robots = RobotView(prim_paths_expr="/World/envs/.*/robot", name="robot_view")
scene.add(self._robots)
# end-effectors view
self._end_effectors = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/{}".format(self._end_effector_link), name="end_effector_view")
scene.add(self._end_effectors)
# hands view (cartesian)
if self._control_space == "cartesian":
self._hands = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/panda_hand", name="hand_view", reset_xform_properties=False)
scene.add(self._hands)
# target view
self._targets = RigidPrimView(prim_paths_expr="/World/envs/.*/target", name="target_view", reset_xform_properties=False)
scene.add(self._targets)
self.init_data()
def get_robot(self):
robot = Robot(prim_path=self.default_zero_env_path + "/robot",
translation=torch.tensor([0.0, 0.0, 0.0]),
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]),
name="robot")
self._sim_config.apply_articulation_settings("robot", get_prim_at_path(robot.prim_path), self._sim_config.parse_actor_config("robot"))
def get_target(self):
target = DynamicSphere(prim_path=self.default_zero_env_path + "/target",
name="target",
radius=0.025,
color=torch.tensor([1, 0, 0]))
self._sim_config.apply_articulation_settings("target", get_prim_at_path(target.prim_path), self._sim_config.parse_actor_config("target"))
target.set_collision_enabled(False)
def init_data(self) -> None:
self.robot_default_dof_pos = torch.tensor(np.radians([0, -45, 0, -135, 0, 90, 45, 0, 0]), device=self._device, dtype=torch.float32)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
if self._control_space == "cartesian":
self.jacobians = torch.zeros((self._num_envs, 10, 6, 9), device=self._device)
self.hand_pos, self.hand_rot = torch.zeros((self._num_envs, 3), device=self._device), torch.zeros((self._num_envs, 4), device=self._device)
def get_observations(self) -> dict:
robot_dof_pos = self._robots.get_joint_positions(clone=False)
robot_dof_vel = self._robots.get_joint_velocities(clone=False)
end_effector_pos, end_effector_rot = self._end_effectors.get_world_poses(clone=False)
target_pos, target_rot = self._targets.get_world_poses(clone=False)
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) \
/ (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self._dof_vel_scale
generalization_noise = torch.rand((dof_vel_scaled.shape[0], 7), device=self._device) + 0.5
self.obs_buf[:, 0] = self.progress_buf / self._max_episode_length
self.obs_buf[:, 1:8] = dof_pos_scaled[:, :7]
self.obs_buf[:, 8:15] = dof_vel_scaled[:, :7] * generalization_noise
self.obs_buf[:, 15:18] = target_pos - self._env_pos
# compute distance for calculate_metrics() and is_done()
self._computed_distance = torch.norm(end_effector_pos - target_pos, dim=-1)
if self._control_space == "cartesian":
self.jacobians = self._robots.get_jacobians(clone=False)
self.hand_pos, self.hand_rot = self._hands.get_world_poses(clone=False)
self.hand_pos -= self._env_pos
return {self._robots.name: {"obs_buf": self.obs_buf}}
def pre_physics_step(self, actions) -> None:
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
env_ids_int32 = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
if self._control_space == "joint":
targets = self.robot_dof_targets[:, :7] + self.robot_dof_speed_scales[:7] * self.dt * self.actions * self._action_scale
elif self._control_space == "cartesian":
goal_position = self.hand_pos + actions / 100.0
delta_dof_pos = omniverse_isaacgym_utils.ik(jacobian_end_effector=self.jacobians[:, 8 - 1, :, :7], # franka hand index: 8
current_position=self.hand_pos,
current_orientation=self.hand_rot,
goal_position=goal_position,
goal_orientation=None)
targets = self.robot_dof_targets[:, :7] + delta_dof_pos
self.robot_dof_targets[:, :7] = torch.clamp(targets, self.robot_dof_lower_limits[:7], self.robot_dof_upper_limits[:7])
self.robot_dof_targets[:, 7:] = 0
self._robots.set_joint_position_targets(self.robot_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids) -> None:
indices = env_ids.to(dtype=torch.int32)
# reset robot
pos = torch.clamp(self.robot_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_robot_dofs), device=self._device) - 0.5),
self.robot_dof_lower_limits, self.robot_dof_upper_limits)
dof_pos = torch.zeros((len(indices), self._robots.num_dof), device=self._device)
dof_pos[:, :] = pos
dof_pos[:, 7:] = 0
dof_vel = torch.zeros((len(indices), self._robots.num_dof), device=self._device)
self.robot_dof_targets[env_ids, :] = pos
self.robot_dof_pos[env_ids, :] = pos
self._robots.set_joint_position_targets(self.robot_dof_targets[env_ids], indices=indices)
self._robots.set_joint_positions(dof_pos, indices=indices)
self._robots.set_joint_velocities(dof_vel, indices=indices)
# reset target
pos = (torch.rand((len(env_ids), 3), device=self._device) - 0.5) * 2 \
* torch.tensor([0.25, 0.25, 0.10], device=self._device) \
+ torch.tensor([0.50, 0.00, 0.20], device=self._device)
self._targets.set_world_poses(pos + self._env_pos[env_ids], indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_robot_dofs = self._robots.num_dof
self.robot_dof_pos = torch.zeros((self.num_envs, self.num_robot_dofs), device=self._device)
dof_limits = self._robots.get_dof_limits()
self.robot_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.robot_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits)
self.robot_dof_targets = torch.zeros((self._num_envs, self.num_robot_dofs), dtype=torch.float, device=self._device)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = -self._computed_distance
def is_done(self) -> None:
self.reset_buf.fill_(0)
# target reached
self.reset_buf = torch.where(self._computed_distance <= 0.035, torch.ones_like(self.reset_buf), self.reset_buf)
# max episode length
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 14,470 | Python | 50.682143 | 152 | 0.517554 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_real_skrl_eval.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
# Define only the policy for evaluation
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# Load the environment
from reaching_franka_real_env import ReachingFranka
control_space = "joint" # joint or cartesian
motion_type = "waypoint" # waypoint or impedance
camera_tracking = False # True for USB-camera tracking
env = ReachingFranka(robot_ip="172.16.0.2",
device="cpu",
control_space=control_space,
motion_type=motion_type,
camera_tracking=camera_tracking)
# wrap the environment
env = wrap_env(env)
device = env.device
# Instantiate the agent's policy.
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard each 32 timesteps an ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 0
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# load checkpoints
if control_space == "joint":
agent.load("./agent_joint.pt")
elif control_space == "cartesian":
agent.load("./agent_cartesian.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start evaluation
trainer.eval()
| 3,319 | Python | 36.30337 | 102 | 0.664357 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_real_env.py | import gym
import time
import threading
import numpy as np
from packaging import version
import frankx
class ReachingFranka(gym.Env):
def __init__(self, robot_ip="172.16.0.2", device="cuda:0", control_space="joint", motion_type="waypoint", camera_tracking=False):
# gym API
self._drepecated_api = version.parse(gym.__version__) < version.parse(" 0.25.0")
self.device = device
self.control_space = control_space # joint or cartesian
self.motion_type = motion_type # waypoint or impedance
if self.control_space == "cartesian" and self.motion_type == "impedance":
# The operation of this mode (Cartesian-impedance) was adjusted later without being able to test it on the real robot.
# Dangerous movements may occur for the operator and the robot.
# Comment the following line of code if you want to proceed with this mode.
raise ValueError("See comment in the code to proceed with this mode")
pass
# camera tracking (disabled by default)
self.camera_tracking = camera_tracking
if self.camera_tracking:
threading.Thread(target=self._update_target_from_camera).start()
# spaces
self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32)
if self.control_space == "joint":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32)
elif self.control_space == "cartesian":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32)
else:
raise ValueError("Invalid control space:", self.control_space)
# init real franka
print("Connecting to robot at {}...".format(robot_ip))
self.robot = frankx.Robot(robot_ip)
self.robot.set_default_behavior()
self.robot.recover_from_errors()
# the robot's response can be better managed by independently setting the following properties, for example:
# - self.robot.velocity_rel = 0.2
# - self.robot.acceleration_rel = 0.1
# - self.robot.jerk_rel = 0.01
self.robot.set_dynamic_rel(0.25)
self.gripper = self.robot.get_gripper()
print("Robot connected")
self.motion = None
self.motion_thread = None
self.dt = 1 / 120.0
self.action_scale = 2.5
self.dof_vel_scale = 0.1
self.max_episode_length = 100
self.robot_dof_speed_scales = 1
self.target_pos = np.array([0.65, 0.2, 0.2])
self.robot_default_dof_pos = np.radians([0, -45, 0, -135, 0, 90, 45])
self.robot_dof_lower_limits = np.array([-2.8973, -1.7628, -2.8973, -3.0718, -2.8973, -0.0175, -2.8973])
self.robot_dof_upper_limits = np.array([ 2.8973, 1.7628, 2.8973, -0.0698, 2.8973, 3.7525, 2.8973])
self.progress_buf = 1
self.obs_buf = np.zeros((18,), dtype=np.float32)
def _update_target_from_camera(self):
pixel_to_meter = 1.11 / 375 # m/px: adjust for custom cases
import cv2
cap = cv2.VideoCapture(0)
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# convert to HSV and remove noise
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
hsv = cv2.medianBlur(hsv, 15)
# color matching in HSV
mask = cv2.inRange(hsv, np.array([80, 100, 100]), np.array([100, 255, 255]))
M = cv2.moments(mask)
if M["m00"]:
x = M["m10"] / M["m00"]
y = M["m01"] / M["m00"]
# real-world position (fixed z to 0.2 meters)
pos = np.array([pixel_to_meter * (y - 185), pixel_to_meter * (x - 320), 0.2])
if self is not None:
self.target_pos = pos
# draw target
frame = cv2.circle(frame, (int(x), int(y)), 30, (0,0,255), 2)
frame = cv2.putText(frame, str(np.round(pos, 4).tolist()), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA)
# show images
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
cap.release()
def _get_observation_reward_done(self):
# get robot state
try:
robot_state = self.robot.get_state(read_once=True)
except frankx.InvalidOperationException:
robot_state = self.robot.get_state(read_once=False)
# observation
robot_dof_pos = np.array(robot_state.q)
robot_dof_vel = np.array(robot_state.dq)
end_effector_pos = np.array(robot_state.O_T_EE[-4:-1])
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self.dof_vel_scale
self.obs_buf[0] = self.progress_buf / float(self.max_episode_length)
self.obs_buf[1:8] = dof_pos_scaled
self.obs_buf[8:15] = dof_vel_scaled
self.obs_buf[15:18] = self.target_pos
# reward
distance = np.linalg.norm(end_effector_pos - self.target_pos)
reward = -distance
# done
done = self.progress_buf >= self.max_episode_length - 1
done = done or distance <= 0.075
print("Distance:", distance)
if done:
print("Target or Maximum episode length reached")
time.sleep(1)
return self.obs_buf, reward, done
def reset(self):
print("Reseting...")
# end current motion
if self.motion is not None:
self.motion.finish()
self.motion_thread.join()
self.motion = None
self.motion_thread = None
# open/close gripper
# self.gripper.open()
# self.gripper.clamp()
# go to 1) safe position, 2) random position
self.robot.move(frankx.JointMotion(self.robot_default_dof_pos.tolist()))
dof_pos = self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5)
self.robot.move(frankx.JointMotion(dof_pos.tolist()))
# get target position from prompt
if not self.camera_tracking:
while True:
try:
print("Enter target position (X, Y, Z) in meters")
raw = input("or press [Enter] key for a random target position: ")
if raw:
self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')])
else:
noise = (2 * np.random.rand(3) - 1) * np.array([0.25, 0.25, 0.10])
self.target_pos = np.array([0.5, 0.0, 0.2]) + noise
print("Target position:", self.target_pos)
break
except ValueError:
print("Invalid input. Try something like: 0.65, 0.0, 0.2")
# initial pose
affine = frankx.Affine(frankx.Kinematics.forward(dof_pos.tolist()))
affine = affine * frankx.Affine(x=0, y=0, z=-0.10335, a=np.pi/2)
# motion type
if self.motion_type == "waypoint":
self.motion = frankx.WaypointMotion([frankx.Waypoint(affine)], return_when_finished=False)
elif self.motion_type == "impedance":
self.motion = frankx.ImpedanceMotion(500, 50)
else:
raise ValueError("Invalid motion type:", self.motion_type)
self.motion_thread = self.robot.move_async(self.motion)
if self.motion_type == "impedance":
self.motion.target = affine
input("Press [Enter] to continue")
self.progress_buf = 0
observation, reward, done = self._get_observation_reward_done()
if self._drepecated_api:
return observation
else:
return observation, {}
def step(self, action):
self.progress_buf += 1
# control space
# joint
if self.control_space == "joint":
# get robot state
try:
robot_state = self.robot.get_state(read_once=True)
except frankx.InvalidOperationException:
robot_state = self.robot.get_state(read_once=False)
# forward kinematics
dof_pos = np.array(robot_state.q) + (self.robot_dof_speed_scales * self.dt * action * self.action_scale)
affine = frankx.Affine(self.robot.forward_kinematics(dof_pos.flatten().tolist()))
affine = affine * frankx.Affine(x=0, y=0, z=-0.10335, a=np.pi/2)
# cartesian
elif self.control_space == "cartesian":
action /= 100.0
if self.motion_type == "waypoint":
affine = frankx.Affine(x=action[0], y=action[1], z=action[2])
elif self.motion_type == "impedance":
# get robot pose
try:
robot_pose = self.robot.current_pose(read_once=True)
except frankx.InvalidOperationException:
robot_pose = self.robot.current_pose(read_once=False)
affine = robot_pose * frankx.Affine(x=action[0], y=action[1], z=action[2])
# motion type
# waypoint motion
if self.motion_type == "waypoint":
if self.control_space == "joint":
self.motion.set_next_waypoint(frankx.Waypoint(affine))
elif self.control_space == "cartesian":
self.motion.set_next_waypoint(frankx.Waypoint(affine, frankx.Waypoint.Relative))
# impedance motion
elif self.motion_type == "impedance":
self.motion.target = affine
else:
raise ValueError("Invalid motion type:", self.motion_type)
# the use of time.sleep is for simplicity. This does not guarantee control at a specific frequency
time.sleep(0.1) # lower frequency, at 30Hz there are discontinuities
observation, reward, done = self._get_observation_reward_done()
if self._drepecated_api:
return observation, reward, done, {}
else:
return observation, reward, done, done, {}
def render(self, *args, **kwargs):
pass
def close(self):
pass
| 10,370 | Python | 38.888461 | 144 | 0.568274 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_isaacgym_skrl_eval.py | import isaacgym
import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
# Define only the policy for evaluation
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# instantiate and configure the task
headless = True # set headless to False for rendering
from reaching_franka_isaacgym_env import ReachingFrankaTask, TASK_CFG
TASK_CFG["headless"] = headless
TASK_CFG["env"]["numEnvs"] = 64
TASK_CFG["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
env = ReachingFrankaTask(cfg=TASK_CFG)
# wrap the environment
env = wrap_env(env, "isaacgym-preview4")
device = env.device
# Instantiate the agent's policy.
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard each 32 timesteps an ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 0
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# load checkpoints
if TASK_CFG["env"]["controlSpace"] == "joint":
agent.load("./agent_joint_isaacgym.pt")
elif TASK_CFG["env"]["controlSpace"] == "cartesian":
agent.load("./agent_cartesian_isaacgym.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start evaluation
trainer.eval()
| 3,288 | Python | 35.544444 | 102 | 0.678528 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_omniverse_isaacgym_skrl_eval.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.utils.omniverse_isaacgym_utils import get_env_instance
from skrl.envs.torch import wrap_env
from skrl.utils import set_seed
# Seed for reproducibility
seed = set_seed() # e.g. `set_seed(42)` for fixed seed
# Define only the policy for evaluation
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# instance VecEnvBase and setup task
headless = False # set headless to False for rendering
env = get_env_instance(headless=headless)
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from reaching_franka_omniverse_isaacgym_env import ReachingFrankaTask, TASK_CFG
TASK_CFG["seed"] = seed
TASK_CFG["headless"] = headless
TASK_CFG["task"]["env"]["numEnvs"] = 64
TASK_CFG["task"]["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
sim_config = SimConfig(TASK_CFG)
task = ReachingFrankaTask(name="ReachingFranka", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# wrap the environment
env = wrap_env(env, "omniverse-isaacgym")
device = env.device
# Instantiate the agent's policy.
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard each 32 timesteps an ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 0
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# load checkpoints
if TASK_CFG["task"]["env"]["controlSpace"] == "joint":
agent.load("./agent_joint.pt")
elif TASK_CFG["task"]["env"]["controlSpace"] == "cartesian":
agent.load("./agent_cartesian.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 5000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start evaluation
trainer.eval()
| 3,789 | Python | 37.282828 | 102 | 0.68778 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_isaacgym_env.py | import os
import numpy as np
import torch
from isaacgym import gymtorch, gymapi
# isaacgymenvs (VecTask class)
import sys
import isaacgymenvs
sys.path.append(list(isaacgymenvs.__path__)[0])
from tasks.base.vec_task import VecTask
from skrl.utils import isaacgym_utils
TASK_CFG = {"name": "ReachingFranka",
"physics_engine": "physx",
"rl_device": "cuda:0",
"sim_device": "cuda:0",
"graphics_device_id": 0,
"headless": False,
"virtual_screen_capture": False,
"force_render": True,
"env": {"numEnvs": 1024,
"envSpacing": 1.5,
"episodeLength": 100,
"enableDebugVis": False,
"clipObservations": 1000.0,
"clipActions": 1.0,
"controlFrequencyInv": 4,
"actionScale": 2.5,
"dofVelocityScale": 0.1,
"controlSpace": "cartesian",
"enableCameraSensors": False},
"sim": {"dt": 0.0083, # 1 / 120
"substeps": 1,
"up_axis": "z",
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"physx": {"num_threads": 4,
"solver_type": 1,
"use_gpu": True,
"num_position_iterations": 4,
"num_velocity_iterations": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"max_depenetration_velocity": 1000.0,
"default_buffer_size_multiplier": 5.0,
"max_gpu_contact_pairs": 1048576,
"num_subscenes": 4,
"contact_collection": 0}},
"task": {"randomize": False}}
class ReachingFrankaTask(VecTask):
def __init__(self, cfg):
self.cfg = cfg
rl_device = cfg["rl_device"]
sim_device = cfg["sim_device"]
graphics_device_id = cfg["graphics_device_id"]
headless = cfg["headless"]
virtual_screen_capture = cfg["virtual_screen_capture"]
force_render = cfg["force_render"]
self.dt = 1 / 120.0
self._action_scale = self.cfg["env"]["actionScale"]
self._dof_vel_scale = self.cfg["env"]["dofVelocityScale"]
self._control_space = self.cfg["env"]["controlSpace"]
self.max_episode_length = self.cfg["env"]["episodeLength"] # name required for VecTask
self.debug_viz = self.cfg["env"]["enableDebugVis"]
# observation and action space
self.cfg["env"]["numObservations"] = 18
if self._control_space == "joint":
self.cfg["env"]["numActions"] = 7
elif self._control_space == "cartesian":
self.cfg["env"]["numActions"] = 3
else:
raise ValueError("Invalid control space: {}".format(self._control_space))
self._end_effector_link = "panda_leftfinger"
# setup VecTask
super().__init__(config=self.cfg,
rl_device=rl_device,
sim_device=sim_device,
graphics_device_id=graphics_device_id,
headless=headless,
virtual_screen_capture=virtual_screen_capture,
force_render=force_render)
# tensors and views: DOFs, roots, rigid bodies
dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)
root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)
rigid_body_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)
self.root_state = gymtorch.wrap_tensor(root_state_tensor)
self.rigid_body_state = gymtorch.wrap_tensor(rigid_body_state_tensor)
self.dof_pos = self.dof_state.view(self.num_envs, -1, 2)[..., 0]
self.dof_vel = self.dof_state.view(self.num_envs, -1, 2)[..., 1]
self.root_pos = self.root_state[:, 0:3].view(self.num_envs, -1, 3)
self.root_rot = self.root_state[:, 3:7].view(self.num_envs, -1, 4)
self.root_vel_lin = self.root_state[:, 7:10].view(self.num_envs, -1, 3)
self.root_vel_ang = self.root_state[:, 10:13].view(self.num_envs, -1, 3)
self.rigid_body_pos = self.rigid_body_state[:, 0:3].view(self.num_envs, -1, 3)
self.rigid_body_rot = self.rigid_body_state[:, 3:7].view(self.num_envs, -1, 4)
self.rigid_body_vel_lin = self.rigid_body_state[:, 7:10].view(self.num_envs, -1, 3)
self.rigid_body_vel_ang = self.rigid_body_state[:, 10:13].view(self.num_envs, -1, 3)
# tensors and views: jacobian
if self._control_space == "cartesian":
jacobian_tensor = self.gym.acquire_jacobian_tensor(self.sim, "robot")
self.jacobian = gymtorch.wrap_tensor(jacobian_tensor)
self.jacobian_end_effector = self.jacobian[:, self.rigid_body_dict_robot[self._end_effector_link] - 1, :, :7]
self.reset_idx(torch.arange(self.num_envs, device=self.device))
def create_sim(self):
self.sim_params.up_axis = gymapi.UP_AXIS_Z
self.sim_params.gravity.x = 0
self.sim_params.gravity.y = 0
self.sim_params.gravity.z = -9.81
self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)
self._create_ground_plane()
self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs)))
def _create_ground_plane(self):
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
self.gym.add_ground(self.sim, plane_params)
def _create_envs(self, num_envs, spacing, num_per_row):
lower = gymapi.Vec3(-spacing, -spacing, 0.0)
upper = gymapi.Vec3(spacing, spacing, spacing)
asset_root = os.path.join(os.path.dirname(os.path.abspath(isaacgymenvs.__file__)), "../assets")
robot_asset_file = "urdf/franka_description/robots/franka_panda.urdf"
# robot asset
asset_options = gymapi.AssetOptions()
asset_options.flip_visual_attachments = True
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = True
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS
asset_options.use_mesh_materials = True
robot_asset = self.gym.load_asset(self.sim, asset_root, robot_asset_file, asset_options)
# target asset
asset_options = gymapi.AssetOptions()
asset_options.fix_base_link = True
asset_options.collapse_fixed_joints = False
asset_options.disable_gravity = True
asset_options.thickness = 0.001
asset_options.use_mesh_materials = True
target_asset = self.gym.create_sphere(self.sim, 0.025, asset_options)
robot_dof_stiffness = torch.tensor([400, 400, 400, 400, 400, 400, 400, 1.0e6, 1.0e6], dtype=torch.float32, device=self.device)
robot_dof_damping = torch.tensor([80, 80, 80, 80, 80, 80, 80, 1.0e2, 1.0e2], dtype=torch.float, device=self.device)
# set robot dof properties
robot_dof_props = self.gym.get_asset_dof_properties(robot_asset)
self.robot_dof_lower_limits = []
self.robot_dof_upper_limits = []
for i in range(9):
robot_dof_props["driveMode"][i] = gymapi.DOF_MODE_POS
if self.physics_engine == gymapi.SIM_PHYSX:
robot_dof_props["stiffness"][i] = robot_dof_stiffness[i]
robot_dof_props["damping"][i] = robot_dof_damping[i]
else:
robot_dof_props["stiffness"][i] = 7000.0
robot_dof_props["damping"][i] = 50.0
self.robot_dof_lower_limits.append(robot_dof_props["lower"][i])
self.robot_dof_upper_limits.append(robot_dof_props["upper"][i])
self.robot_dof_lower_limits = torch.tensor(self.robot_dof_lower_limits, device=self.device)
self.robot_dof_upper_limits = torch.tensor(self.robot_dof_upper_limits, device=self.device)
self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits)
robot_dof_props["effort"][7] = 200
robot_dof_props["effort"][8] = 200
self.handle_targets = []
self.handle_robots = []
self.handle_envs = []
indexes_sim_robot = []
indexes_sim_target = []
for i in range(self.num_envs):
# create env instance
env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row)
# create robot instance
pose = gymapi.Transform()
pose.p = gymapi.Vec3(0.0, 0.0, 0.0)
pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1)
robot_actor = self.gym.create_actor(env=env_ptr,
asset=robot_asset,
pose=pose,
name="robot",
group=i, # collision group
filter=1, # mask off collision
segmentationId=0)
self.gym.set_actor_dof_properties(env_ptr, robot_actor, robot_dof_props)
indexes_sim_robot.append(self.gym.get_actor_index(env_ptr, robot_actor, gymapi.DOMAIN_SIM))
# create target instance
pose = gymapi.Transform()
pose.p = gymapi.Vec3(0.5, 0.0, 0.2)
pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1)
target_actor = self.gym.create_actor(env=env_ptr,
asset=target_asset,
pose=pose,
name="target",
group=i + 1, # collision group
filter=1, # mask off collision
segmentationId=1)
indexes_sim_target.append(self.gym.get_actor_index(env_ptr, target_actor, gymapi.DOMAIN_SIM))
self.gym.set_rigid_body_color(env_ptr, target_actor, 0, gymapi.MESH_VISUAL, gymapi.Vec3(1., 0., 0.))
self.handle_envs.append(env_ptr)
self.handle_robots.append(robot_actor)
self.handle_targets.append(target_actor)
self.indexes_sim_robot = torch.tensor(indexes_sim_robot, dtype=torch.int32, device=self.device)
self.indexes_sim_target = torch.tensor(indexes_sim_target, dtype=torch.int32, device=self.device)
self.num_robot_dofs = self.gym.get_asset_dof_count(robot_asset)
self.rigid_body_dict_robot = self.gym.get_asset_rigid_body_dict(robot_asset)
self.init_data()
def init_data(self):
self.robot_default_dof_pos = torch.tensor(np.radians([0, -45, 0, -135, 0, 90, 45, 0, 0]), device=self.device, dtype=torch.float32)
self.robot_dof_targets = torch.zeros((self.num_envs, self.num_robot_dofs), device=self.device, dtype=torch.float32)
if self._control_space == "cartesian":
self.end_effector_pos = torch.zeros((self.num_envs, 3), device=self.device)
self.end_effector_rot = torch.zeros((self.num_envs, 4), device=self.device)
def compute_reward(self):
self.rew_buf[:] = -self._computed_distance
self.reset_buf.fill_(0)
# target reached
self.reset_buf = torch.where(self._computed_distance <= 0.035, torch.ones_like(self.reset_buf), self.reset_buf)
# max episode length
self.reset_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
# double restart correction (why?, is it necessary?)
self.rew_buf = torch.where(self.progress_buf == 0, -0.75 * torch.ones_like(self.reset_buf), self.rew_buf)
self.reset_buf = torch.where(self.progress_buf == 0, torch.zeros_like(self.reset_buf), self.reset_buf)
def compute_observations(self):
self.gym.refresh_dof_state_tensor(self.sim)
self.gym.refresh_actor_root_state_tensor(self.sim)
self.gym.refresh_rigid_body_state_tensor(self.sim)
if self._control_space == "cartesian":
self.gym.refresh_jacobian_tensors(self.sim)
robot_dof_pos = self.dof_pos
robot_dof_vel = self.dof_vel
self.end_effector_pos = self.rigid_body_pos[:, self.rigid_body_dict_robot[self._end_effector_link]]
self.end_effector_rot = self.rigid_body_rot[:, self.rigid_body_dict_robot[self._end_effector_link]]
target_pos = self.root_pos[:, 1]
target_rot = self.root_rot[:, 1]
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) \
/ (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self._dof_vel_scale
generalization_noise = torch.rand((dof_vel_scaled.shape[0], 7), device=self.device) + 0.5
self.obs_buf[:, 0] = self.progress_buf / self.max_episode_length
self.obs_buf[:, 1:8] = dof_pos_scaled[:, :7]
self.obs_buf[:, 8:15] = dof_vel_scaled[:, :7] * generalization_noise
self.obs_buf[:, 15:18] = target_pos
# compute distance for compute_reward()
self._computed_distance = torch.norm(self.end_effector_pos - target_pos, dim=-1)
def reset_idx(self, env_ids):
# reset robot
pos = torch.clamp(self.robot_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_robot_dofs), device=self.device) - 0.5),
self.robot_dof_lower_limits, self.robot_dof_upper_limits)
pos[:, 7:] = 0
self.robot_dof_targets[env_ids, :] = pos[:]
self.dof_pos[env_ids, :] = pos[:]
self.dof_vel[env_ids, :] = 0
indexes = self.indexes_sim_robot[env_ids]
self.gym.set_dof_position_target_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.robot_dof_targets),
gymtorch.unwrap_tensor(indexes),
len(env_ids))
self.gym.set_dof_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.dof_state),
gymtorch.unwrap_tensor(indexes),
len(env_ids))
# reset targets
pos = (torch.rand((len(env_ids), 3), device=self.device) - 0.5) * 2
pos[:, 0] = 0.50 + pos[:, 0] * 0.25
pos[:, 1] = 0.00 + pos[:, 1] * 0.25
pos[:, 2] = 0.20 + pos[:, 2] * 0.10
self.root_pos[env_ids, 1, :] = pos[:]
indexes = self.indexes_sim_target[env_ids]
self.gym.set_actor_root_state_tensor_indexed(self.sim,
gymtorch.unwrap_tensor(self.root_state),
gymtorch.unwrap_tensor(indexes),
len(env_ids))
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def pre_physics_step(self, actions):
actions = actions.clone().to(self.device)
if self._control_space == "joint":
targets = self.robot_dof_targets[:, :7] + self.robot_dof_speed_scales[:7] * self.dt * actions * self._action_scale
elif self._control_space == "cartesian":
goal_position = self.end_effector_pos + actions / 100.0
delta_dof_pos = isaacgym_utils.ik(jacobian_end_effector=self.jacobian_end_effector,
current_position=self.end_effector_pos,
current_orientation=self.end_effector_rot,
goal_position=goal_position,
goal_orientation=None)
targets = self.robot_dof_targets[:, :7] + delta_dof_pos
self.robot_dof_targets[:, :7] = torch.clamp(targets, self.robot_dof_lower_limits[:7], self.robot_dof_upper_limits[:7])
self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.robot_dof_targets))
def post_physics_step(self):
self.progress_buf += 1
env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(env_ids) > 0:
self.reset_idx(env_ids)
self.compute_observations()
self.compute_reward()
| 17,190 | Python | 46.09863 | 151 | 0.553054 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_omniverse_isaacgym_skrl_train.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.utils.omniverse_isaacgym_utils import get_env_instance
from skrl.envs.torch import wrap_env
from skrl.utils import set_seed
# Seed for reproducibility
seed = set_seed() # e.g. `set_seed(42)` for fixed seed
# Define the models (stochastic and deterministic models) for the agent using helper mixin.
# - Policy: takes as input the environment's observation/state and returns an action
# - Value: takes the state as input and provides a value to guide the policy
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# instance VecEnvBase and setup task
headless = True # set headless to False for rendering
env = get_env_instance(headless=headless)
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from reaching_franka_omniverse_isaacgym_env import ReachingFrankaTask, TASK_CFG
TASK_CFG["seed"] = seed
TASK_CFG["headless"] = headless
TASK_CFG["task"]["env"]["numEnvs"] = 1024
TASK_CFG["task"]["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
sim_config = SimConfig(TASK_CFG)
task = ReachingFrankaTask(name="ReachingFranka", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# wrap the environment
env = wrap_env(env, "omniverse-isaacgym")
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
models_ppo["value"] = Value(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["rollouts"] = 16
cfg_ppo["learning_epochs"] = 8
cfg_ppo["mini_batches"] = 8
cfg_ppo["discount_factor"] = 0.99
cfg_ppo["lambda"] = 0.95
cfg_ppo["learning_rate"] = 5e-4
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["grad_norm_clip"] = 1.0
cfg_ppo["ratio_clip"] = 0.2
cfg_ppo["value_clip"] = 0.2
cfg_ppo["clip_predicted_values"] = True
cfg_ppo["entropy_loss_scale"] = 0.0
cfg_ppo["value_loss_scale"] = 2.0
cfg_ppo["kl_threshold"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 32 and 250 timesteps respectively
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 250
agent = PPO(models=models_ppo,
memory=memory,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 5000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,539 | Python | 40.037037 | 102 | 0.67449 |
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_isaacgym_skrl_train.py | import isaacgym
import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
from skrl.utils import set_seed
# set the seed for reproducibility
set_seed(42)
# Define the models (stochastic and deterministic models) for the agent using helper mixin.
# - Policy: takes as input the environment's observation/state and returns an action
# - Value: takes the state as input and provides a value to guide the policy
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# instantiate and configure the task
headless = True # set headless to False for rendering
from reaching_franka_isaacgym_env import ReachingFrankaTask, TASK_CFG
TASK_CFG["headless"] = headless
TASK_CFG["env"]["numEnvs"] = 1024
TASK_CFG["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
env = ReachingFrankaTask(cfg=TASK_CFG)
# wrap the environment
env = wrap_env(env, "isaacgym-preview4")
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
models_ppo["value"] = Value(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["rollouts"] = 16
cfg_ppo["learning_epochs"] = 8
cfg_ppo["mini_batches"] = 8
cfg_ppo["discount_factor"] = 0.99
cfg_ppo["lambda"] = 0.95
cfg_ppo["learning_rate"] = 5e-4
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["grad_norm_clip"] = 1.0
cfg_ppo["ratio_clip"] = 0.2
cfg_ppo["value_clip"] = 0.2
cfg_ppo["clip_predicted_values"] = True
cfg_ppo["entropy_loss_scale"] = 0.0
cfg_ppo["value_loss_scale"] = 2.0
cfg_ppo["kl_threshold"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 32 and 250 timesteps respectively
cfg_ppo["experiment"]["write_interval"] = 5
cfg_ppo["experiment"]["checkpoint_interval"] = 250
agent = PPO(models=models_ppo,
memory=memory,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 5000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,118 | Python | 38.076336 | 102 | 0.667448 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_env.py | import time
import numpy as np
import gymnasium as gym
import libiiwa
class ReachingIiwa(gym.Env):
def __init__(self, control_space="joint"):
self.control_space = control_space # joint or cartesian
# spaces
self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32)
if self.control_space == "joint":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32)
elif self.control_space == "cartesian":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32)
else:
raise ValueError("Invalid control space:", self.control_space)
# init iiwa
print("Connecting to robot...")
self.robot = libiiwa.LibIiwa()
self.robot.set_control_interface(libiiwa.ControlInterface.CONTROL_INTERFACE_SERVO)
self.robot.set_desired_joint_velocity_rel(0.5)
self.robot.set_desired_joint_acceleration_rel(0.5)
self.robot.set_desired_joint_jerk_rel(0.5)
self.robot.set_desired_cartesian_velocity(10)
self.robot.set_desired_cartesian_acceleration(10)
self.robot.set_desired_cartesian_jerk(10)
print("Robot connected")
self.motion = None
self.motion_thread = None
self.dt = 1 / 120.0
self.action_scale = 2.5
self.dof_vel_scale = 0.1
self.max_episode_length = 100
self.robot_dof_speed_scales = 1
self.target_pos = np.array([0.65, 0.2, 0.2])
self.robot_default_dof_pos = np.radians([0, 0, 0, -90, 0, 90, 0])
self.robot_dof_lower_limits = np.array([-2.9671, -2.0944, -2.9671, -2.0944, -2.9671, -2.0944, -3.0543])
self.robot_dof_upper_limits = np.array([ 2.9671, 2.0944, 2.9671, 2.0944, 2.9671, 2.0944, 3.0543])
self.progress_buf = 1
self.obs_buf = np.zeros((18,), dtype=np.float32)
def _get_observation_reward_done(self):
# get robot state
robot_state = self.robot.get_state(refresh=True)
# observation
robot_dof_pos = robot_state["joint_position"]
robot_dof_vel = robot_state["joint_velocity"]
end_effector_pos = robot_state["cartesian_position"]
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self.dof_vel_scale
self.obs_buf[0] = self.progress_buf / float(self.max_episode_length)
self.obs_buf[1:8] = dof_pos_scaled
self.obs_buf[8:15] = dof_vel_scaled
self.obs_buf[15:18] = self.target_pos
# reward
distance = np.linalg.norm(end_effector_pos - self.target_pos)
reward = -distance
# done
done = self.progress_buf >= self.max_episode_length - 1
done = done or distance <= 0.075
print("Distance:", distance)
if done:
print("Target or Maximum episode length reached")
time.sleep(1)
return self.obs_buf, reward, done
def reset(self):
print("Reseting...")
# go to 1) safe position, 2) random position
self.robot.command_joint_position(self.robot_default_dof_pos)
time.sleep(3)
dof_pos = self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5)
self.robot.command_joint_position(dof_pos)
time.sleep(1)
# get target position from prompt
while True:
try:
print("Enter target position (X, Y, Z) in meters")
raw = input("or press [Enter] key for a random target position: ")
if raw:
self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')])
else:
noise = (2 * np.random.rand(3) - 1) * np.array([0.1, 0.2, 0.2])
self.target_pos = np.array([0.6, 0.0, 0.4]) + noise
print("Target position:", self.target_pos)
break
except ValueError:
print("Invalid input. Try something like: 0.65, 0.0, 0.4")
input("Press [Enter] to continue")
self.progress_buf = 0
observation, reward, done = self._get_observation_reward_done()
return observation, {}
def step(self, action):
self.progress_buf += 1
# get robot state
robot_state = self.robot.get_state(refresh=True)
# control space
# joint
if self.control_space == "joint":
dof_pos = robot_state["joint_position"] + (self.robot_dof_speed_scales * self.dt * action * self.action_scale)
self.robot.command_joint_position(dof_pos)
# cartesian
elif self.control_space == "cartesian":
end_effector_pos = robot_state["cartesian_position"] + action / 100.0
self.robot.command_cartesian_pose(end_effector_pos)
# the use of time.sleep is for simplicity. It does not guarantee control at a specific frequency
time.sleep(1 / 30.0)
observation, reward, terminated = self._get_observation_reward_done()
return observation, reward, terminated, False, {}
def render(self, *args, **kwargs):
pass
def close(self):
pass
| 5,314 | Python | 35.404109 | 144 | 0.589198 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_omniverse_isaacgym_skrl_eval.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.utils.omniverse_isaacgym_utils import get_env_instance
from skrl.envs.torch import wrap_env
from skrl.utils import set_seed
# Seed for reproducibility
seed = set_seed() # e.g. `set_seed(42)` for fixed seed
# Define only the policy for evaluation
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# instance VecEnvBase and setup task
headless = False # set headless to False for rendering
env = get_env_instance(headless=headless)
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from reaching_iiwa_omniverse_isaacgym_env import ReachingIiwaTask, TASK_CFG
TASK_CFG["seed"] = seed
TASK_CFG["headless"] = headless
TASK_CFG["task"]["env"]["numEnvs"] = 64
TASK_CFG["task"]["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
sim_config = SimConfig(TASK_CFG)
task = ReachingIiwaTask(name="ReachingIiwa", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# wrap the environment
env = wrap_env(env, "omniverse-isaacgym")
device = env.device
# Instantiate the agent's policy.
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard each 32 timesteps an ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 0
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# load checkpoints
if TASK_CFG["task"]["env"]["controlSpace"] == "joint":
agent.load("./agent_joint.pt")
elif TASK_CFG["task"]["env"]["controlSpace"] == "cartesian":
agent.load("./agent_cartesian.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 5000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start evaluation
trainer.eval()
| 3,781 | Python | 37.20202 | 102 | 0.68712 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_ros2_env.py | import time
import numpy as np
import gymnasium as gym
import rclpy
from rclpy.node import Node
from rclpy.qos import QoSPresetProfiles
import sensor_msgs.msg
import geometry_msgs.msg
import libiiwa_msgs.srv
class ReachingIiwa(gym.Env):
def __init__(self, control_space="joint"):
self.control_space = control_space # joint or cartesian
# spaces
self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32)
if self.control_space == "joint":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32)
elif self.control_space == "cartesian":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32)
else:
raise ValueError("Invalid control space:", self.control_space)
# initialize the ROS node
rclpy.init()
self.node = Node(self.__class__.__name__)
import threading
threading.Thread(target=self._spin).start()
# create publishers
self.pub_command_joint = self.node.create_publisher(sensor_msgs.msg.JointState, '/iiwa/command/joint', QoSPresetProfiles.SYSTEM_DEFAULT.value)
self.pub_command_cartesian = self.node.create_publisher(geometry_msgs.msg.Pose, '/iiwa/command/cartesian', QoSPresetProfiles.SYSTEM_DEFAULT.value)
# keep compatibility with libiiwa Python API
self.robot_state = {"joint_position": np.zeros((7,)),
"joint_velocity": np.zeros((7,)),
"cartesian_position": np.zeros((3,))}
# create subscribers
self.node.create_subscription(msg_type=sensor_msgs.msg.JointState,
topic='/iiwa/state/joint_states',
callback=self._callback_joint_states,
qos_profile=QoSPresetProfiles.SYSTEM_DEFAULT.value)
self.node.create_subscription(msg_type=geometry_msgs.msg.Pose,
topic='/iiwa/state/end_effector_pose',
callback=self._callback_end_effector_pose,
qos_profile=QoSPresetProfiles.SYSTEM_DEFAULT.value)
# service clients
client_control_interface = self.node.create_client(libiiwa_msgs.srv.SetString, '/iiwa/set_control_interface')
client_control_interface.wait_for_service()
request = libiiwa_msgs.srv.SetString.Request()
request.data = "SERVO" # or "servo"
client_control_interface.call(request)
client_joint_velocity_rel = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_joint_velocity_rel')
client_joint_acceleration_rel = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_joint_acceleration_rel')
client_joint_jerk_rel = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_joint_jerk_rel')
client_cartesian_velocity = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_cartesian_velocity')
client_cartesian_acceleration = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_cartesian_acceleration')
client_cartesian_jerk = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_cartesian_jerk')
client_joint_velocity_rel.wait_for_service()
client_joint_acceleration_rel.wait_for_service()
client_joint_jerk_rel.wait_for_service()
client_cartesian_velocity.wait_for_service()
client_cartesian_acceleration.wait_for_service()
client_cartesian_jerk.wait_for_service()
request = libiiwa_msgs.srv.SetNumber.Request()
request.data = 0.5
client_joint_velocity_rel.call(request)
client_joint_acceleration_rel.call(request)
client_joint_jerk_rel.call(request)
request.data = 10.0
client_cartesian_velocity.call(request)
client_cartesian_acceleration.call(request)
client_cartesian_jerk.call(request)
print("Robot connected")
self.motion = None
self.motion_thread = None
self.dt = 1 / 120.0
self.action_scale = 2.5
self.dof_vel_scale = 0.1
self.max_episode_length = 100
self.robot_dof_speed_scales = 1
self.target_pos = np.array([0.65, 0.2, 0.2])
self.robot_default_dof_pos = np.radians([0, 0, 0, -90, 0, 90, 0])
self.robot_dof_lower_limits = np.array([-2.9671, -2.0944, -2.9671, -2.0944, -2.9671, -2.0944, -3.0543])
self.robot_dof_upper_limits = np.array([ 2.9671, 2.0944, 2.9671, 2.0944, 2.9671, 2.0944, 3.0543])
self.progress_buf = 1
self.obs_buf = np.zeros((18,), dtype=np.float32)
def _spin(self):
rclpy.spin(self.node)
def _callback_joint_states(self, msg):
self.robot_state["joint_position"] = np.array(msg.position)
self.robot_state["joint_velocity"] = np.array(msg.velocity)
def _callback_end_effector_pose(self, msg):
positon = msg.position
self.robot_state["cartesian_position"] = np.array([positon.x, positon.y, positon.z])
def _get_observation_reward_done(self):
# observation
robot_dof_pos = self.robot_state["joint_position"]
robot_dof_vel = self.robot_state["joint_velocity"]
end_effector_pos = self.robot_state["cartesian_position"]
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self.dof_vel_scale
self.obs_buf[0] = self.progress_buf / float(self.max_episode_length)
self.obs_buf[1:8] = dof_pos_scaled
self.obs_buf[8:15] = dof_vel_scaled
self.obs_buf[15:18] = self.target_pos
# reward
distance = np.linalg.norm(end_effector_pos - self.target_pos)
reward = -distance
# done
done = self.progress_buf >= self.max_episode_length - 1
done = done or distance <= 0.075
print("Distance:", distance)
if done:
print("Target or Maximum episode length reached")
time.sleep(1)
return self.obs_buf, reward, done
def reset(self):
print("Reseting...")
# go to 1) safe position, 2) random position
msg = sensor_msgs.msg.JointState()
msg.position = self.robot_default_dof_pos.tolist()
self.pub_command_joint.publish(msg)
time.sleep(3)
msg.position = (self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5)).tolist()
self.pub_command_joint.publish(msg)
time.sleep(1)
# get target position from prompt
while True:
try:
print("Enter target position (X, Y, Z) in meters")
raw = input("or press [Enter] key for a random target position: ")
if raw:
self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')])
else:
noise = (2 * np.random.rand(3) - 1) * np.array([0.1, 0.2, 0.2])
self.target_pos = np.array([0.6, 0.0, 0.4]) + noise
print("Target position:", self.target_pos)
break
except ValueError:
print("Invalid input. Try something like: 0.65, 0.0, 0.4")
input("Press [Enter] to continue")
self.progress_buf = 0
observation, reward, done = self._get_observation_reward_done()
return observation, {}
def step(self, action):
self.progress_buf += 1
# control space
# joint
if self.control_space == "joint":
joint_positions = self.robot_state["joint_position"] + (self.robot_dof_speed_scales * self.dt * action * self.action_scale)
msg = sensor_msgs.msg.JointState()
msg.position = joint_positions.tolist()
self.pub_command_joint.publish(msg)
# cartesian
elif self.control_space == "cartesian":
end_effector_pos = self.robot_state["cartesian_position"] + action / 100.0
msg = geometry_msgs.msg.Pose()
msg.position.x = end_effector_pos[0]
msg.position.y = end_effector_pos[1]
msg.position.z = end_effector_pos[2]
msg.orientation.x = np.nan
msg.orientation.y = np.nan
msg.orientation.z = np.nan
msg.orientation.w = np.nan
self.pub_command_cartesian.publish(msg)
# the use of time.sleep is for simplicity. It does not guarantee control at a specific frequency
time.sleep(1 / 30.0)
observation, reward, terminated = self._get_observation_reward_done()
return observation, reward, terminated, False, {}
def render(self, *args, **kwargs):
pass
def close(self):
# shutdown the node
self.node.destroy_node()
rclpy.shutdown()
| 9,047 | Python | 40.315068 | 154 | 0.607605 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_skrl_eval.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
# Define only the policy for evaluation
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# Load the environment
from reaching_iiwa_real_env import ReachingIiwa
control_space = "joint" # joint or cartesian
env = ReachingIiwa(control_space=control_space)
# wrap the environment
env = wrap_env(env)
device = env.device
# Instantiate the agent's policy.
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard each 32 timesteps an ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 0
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# load checkpoints
if control_space == "joint":
agent.load("./agent_joint.pt")
elif control_space == "cartesian":
agent.load("./agent_cartesian.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start evaluation
trainer.eval()
| 3,027 | Python | 35.481927 | 102 | 0.676577 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_omniverse_isaacgym_skrl_train.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
from skrl.memories.torch import RandomMemory
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.schedulers.torch import KLAdaptiveRL
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.utils.omniverse_isaacgym_utils import get_env_instance
from skrl.envs.torch import wrap_env
from skrl.utils import set_seed
# Seed for reproducibility
seed = set_seed() # e.g. `set_seed(42)` for fixed seed
# Define the models (stochastic and deterministic models) for the agent using helper mixin.
# - Policy: takes as input the environment's observation/state and returns an action
# - Value: takes the state as input and provides a value to guide the policy
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
class Value(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, 1))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# instance VecEnvBase and setup task
headless = True # set headless to False for rendering
env = get_env_instance(headless=headless)
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
from reaching_iiwa_omniverse_isaacgym_env import ReachingIiwaTask, TASK_CFG
TASK_CFG["seed"] = seed
TASK_CFG["headless"] = headless
TASK_CFG["task"]["env"]["numEnvs"] = 1024
TASK_CFG["task"]["env"]["controlSpace"] = "joint" # "joint" or "cartesian"
sim_config = SimConfig(TASK_CFG)
task = ReachingIiwaTask(name="ReachingIiwa", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# wrap the environment
env = wrap_env(env, "omniverse-isaacgym")
device = env.device
# Instantiate a RandomMemory as rollout buffer (any memory can be used for this)
memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device)
# Instantiate the agent's models (function approximators).
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
models_ppo["value"] = Value(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["rollouts"] = 16
cfg_ppo["learning_epochs"] = 8
cfg_ppo["mini_batches"] = 8
cfg_ppo["discount_factor"] = 0.99
cfg_ppo["lambda"] = 0.95
cfg_ppo["learning_rate"] = 5e-4
cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL
cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008}
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["grad_norm_clip"] = 1.0
cfg_ppo["ratio_clip"] = 0.2
cfg_ppo["value_clip"] = 0.2
cfg_ppo["clip_predicted_values"] = True
cfg_ppo["entropy_loss_scale"] = 0.0
cfg_ppo["value_loss_scale"] = 2.0
cfg_ppo["kl_threshold"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg_ppo["value_preprocessor"] = RunningStandardScaler
cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device}
# logging to TensorBoard and write checkpoints each 32 and 250 timesteps respectively
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 250
agent = PPO(models=models_ppo,
memory=memory,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 5000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start training
trainer.train()
| 5,531 | Python | 39.977777 | 102 | 0.674019 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_ros_ros2_skrl_eval.py | import torch
import torch.nn as nn
# Import the skrl components to build the RL system
from skrl.models.torch import Model, GaussianMixin
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.resources.preprocessors.torch import RunningStandardScaler
from skrl.trainers.torch import SequentialTrainer
from skrl.envs.torch import wrap_env
# Define only the policy for evaluation
class Policy(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 256),
nn.ELU(),
nn.Linear(256, 128),
nn.ELU(),
nn.Linear(128, 64),
nn.ELU(),
nn.Linear(64, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# Load the environment according to the ROS version
def get_active_ros_version():
import os
if os.environ.get("ROS_DISTRO"):
return "ROS2" if os.environ.get("AMENT_PREFIX_PATH") else "ROS"
return ""
active_ros_version = get_active_ros_version()
if active_ros_version == "ROS":
from reaching_iiwa_real_ros_env import ReachingIiwa
elif active_ros_version == "ROS2":
from reaching_iiwa_real_ros2_env import ReachingIiwa
else:
print("No active ROS version found")
exit()
control_space = "joint" # joint or cartesian
env = ReachingIiwa(control_space=control_space)
# wrap the environment
env = wrap_env(env)
device = env.device
# Instantiate the agent's policy.
# PPO requires 2 models, visit its documentation for more details
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models
models_ppo = {}
models_ppo["policy"] = Policy(env.observation_space, env.action_space, device)
# Configure and instantiate the agent.
# Only modify some of the default configuration, visit its documentation to see all the options
# https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters
cfg_ppo = PPO_DEFAULT_CONFIG.copy()
cfg_ppo["random_timesteps"] = 0
cfg_ppo["learning_starts"] = 0
cfg_ppo["state_preprocessor"] = RunningStandardScaler
cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
# logging to TensorBoard each 32 timesteps an ignore checkpoints
cfg_ppo["experiment"]["write_interval"] = 32
cfg_ppo["experiment"]["checkpoint_interval"] = 0
agent = PPO(models=models_ppo,
memory=None,
cfg=cfg_ppo,
observation_space=env.observation_space,
action_space=env.action_space,
device=device)
# load checkpoints
if control_space == "joint":
agent.load("./agent_joint.pt")
elif control_space == "cartesian":
agent.load("./agent_cartesian.pt")
# Configure and instantiate the RL trainer
cfg_trainer = {"timesteps": 1000, "headless": True}
trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent)
# start evaluation
trainer.eval()
| 3,461 | Python | 34.690721 | 102 | 0.676394 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_omniverse_isaacgym_env.py | import torch
import numpy as np
from omniisaacgymenvs.tasks.base.rl_task import RLTask
from omni.isaac.core.prims import RigidPrimView
from omni.isaac.core.articulations import ArticulationView
from omni.isaac.core.objects import DynamicSphere
from omni.isaac.core.utils.prims import get_prim_at_path
from robots.iiwa14 import Iiwa14 as Robot
from skrl.utils import omniverse_isaacgym_utils
# post_physics_step calls
# - get_observations()
# - get_states()
# - calculate_metrics()
# - is_done()
# - get_extras()
TASK_CFG = {"test": False,
"device_id": 0,
"headless": True,
"sim_device": "gpu",
"enable_livestream": False,
"warp": False,
"seed": 42,
"task": {"name": "ReachingIiwa",
"physics_engine": "physx",
"env": {"numEnvs": 1024,
"envSpacing": 1.5,
"episodeLength": 100,
"enableDebugVis": False,
"clipObservations": 1000.0,
"clipActions": 1.0,
"controlFrequencyInv": 4,
"actionScale": 2.5,
"dofVelocityScale": 0.1,
"controlSpace": "cartesian"},
"sim": {"dt": 0.0083, # 1 / 120
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"add_ground_plane": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": {"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0},
"physx": {"worker_thread_count": 4,
"solver_type": 1,
"use_gpu": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04,
"friction_correlation_distance": 0.025,
"enable_sleeping": True,
"enable_stabilization": True,
"max_depenetration_velocity": 1000.0,
"gpu_max_rigid_contact_count": 524288,
"gpu_max_rigid_patch_count": 33554432,
"gpu_found_lost_pairs_capacity": 524288,
"gpu_found_lost_aggregate_pairs_capacity": 262144,
"gpu_total_aggregate_pairs_capacity": 1048576,
"gpu_max_soft_body_contacts": 1048576,
"gpu_max_particle_contacts": 1048576,
"gpu_heap_capacity": 33554432,
"gpu_temp_buffer_capacity": 16777216,
"gpu_max_num_partitions": 8},
"robot": {"override_usd_defaults": False,
"fixed_base": False,
"enable_self_collisions": False,
"enable_gyroscopic_forces": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.005,
"stabilization_threshold": 0.001,
"density": -1,
"max_depenetration_velocity": 1000.0,
"contact_offset": 0.005,
"rest_offset": 0.0},
"target": {"override_usd_defaults": False,
"fixed_base": True,
"make_kinematic": True,
"enable_self_collisions": False,
"enable_gyroscopic_forces": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"sleep_threshold": 0.005,
"stabilization_threshold": 0.001,
"density": -1,
"max_depenetration_velocity": 1000.0,
"contact_offset": 0.005,
"rest_offset": 0.0}}}}
class RobotView(ArticulationView):
def __init__(self, prim_paths_expr: str, name: str = "robot_view") -> None:
super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False)
class ReachingIiwaTask(RLTask):
def __init__(self, name, sim_config, env, offset=None) -> None:
self._sim_config = sim_config
self._cfg = sim_config.config
self._task_cfg = sim_config.task_config
self.dt = 1 / 120.0
self._num_envs = self._task_cfg["env"]["numEnvs"]
self._env_spacing = self._task_cfg["env"]["envSpacing"]
self._action_scale = self._task_cfg["env"]["actionScale"]
self._dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"]
self._max_episode_length = self._task_cfg["env"]["episodeLength"]
self._control_space = self._task_cfg["env"]["controlSpace"]
# observation and action space
self._num_observations = 18
if self._control_space == "joint":
self._num_actions = 7
elif self._control_space == "cartesian":
self._num_actions = 3
else:
raise ValueError("Invalid control space: {}".format(self._control_space))
self._end_effector_link = "iiwa_link_7"
RLTask.__init__(self, name, env)
def set_up_scene(self, scene) -> None:
self.get_robot()
self.get_target()
super().set_up_scene(scene)
# robot view
self._robots = RobotView(prim_paths_expr="/World/envs/.*/robot", name="robot_view")
scene.add(self._robots)
# end-effectors view
self._end_effectors = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/{}".format(self._end_effector_link), name="end_effector_view")
scene.add(self._end_effectors)
# target view
self._targets = RigidPrimView(prim_paths_expr="/World/envs/.*/target", name="target_view", reset_xform_properties=False)
scene.add(self._targets)
self.init_data()
def get_robot(self):
robot = Robot(prim_path=self.default_zero_env_path + "/robot",
translation=torch.tensor([0.0, 0.0, 0.0]),
orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]),
name="robot")
self._sim_config.apply_articulation_settings("robot", get_prim_at_path(robot.prim_path), self._sim_config.parse_actor_config("robot"))
def get_target(self):
target = DynamicSphere(prim_path=self.default_zero_env_path + "/target",
name="target",
radius=0.025,
color=torch.tensor([1, 0, 0]))
self._sim_config.apply_articulation_settings("target", get_prim_at_path(target.prim_path), self._sim_config.parse_actor_config("target"))
target.set_collision_enabled(False)
def init_data(self) -> None:
self.robot_default_dof_pos = torch.tensor(np.radians([0, 0, 0, -90, 0, 90, 0]), device=self._device, dtype=torch.float32)
self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device)
if self._control_space == "cartesian":
self.jacobians = torch.zeros((self._num_envs, 7, 6, 7), device=self._device)
self.end_effector_pos, self.end_effector_rot = torch.zeros((self._num_envs, 3), device=self._device), torch.zeros((self._num_envs, 4), device=self._device)
def get_observations(self) -> dict:
robot_dof_pos = self._robots.get_joint_positions(clone=False)
robot_dof_vel = self._robots.get_joint_velocities(clone=False)
end_effector_pos, end_effector_rot = self._end_effectors.get_world_poses(clone=False)
target_pos, target_rot = self._targets.get_world_poses(clone=False)
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) \
/ (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self._dof_vel_scale
generalization_noise = torch.rand((dof_vel_scaled.shape[0], 7), device=self._device) + 0.5
self.obs_buf[:, 0] = self.progress_buf / self._max_episode_length
self.obs_buf[:, 1:8] = dof_pos_scaled
self.obs_buf[:, 8:15] = dof_vel_scaled * generalization_noise
self.obs_buf[:, 15:18] = target_pos - self._env_pos
# compute distance for calculate_metrics() and is_done()
self._computed_distance = torch.norm(end_effector_pos - target_pos, dim=-1)
if self._control_space == "cartesian":
self.jacobians = self._robots.get_jacobians(clone=False)
self.end_effector_pos, self.end_effector_rot = end_effector_pos, end_effector_rot
self.end_effector_pos -= self._env_pos
return {self._robots.name: {"obs_buf": self.obs_buf}}
def pre_physics_step(self, actions) -> None:
reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)
if len(reset_env_ids) > 0:
self.reset_idx(reset_env_ids)
self.actions = actions.clone().to(self._device)
env_ids_int32 = torch.arange(self._robots.count, dtype=torch.int32, device=self._device)
if self._control_space == "joint":
targets = self.robot_dof_targets + self.robot_dof_speed_scales * self.dt * self.actions * self._action_scale
elif self._control_space == "cartesian":
goal_position = self.end_effector_pos + actions / 100.0
delta_dof_pos = omniverse_isaacgym_utils.ik(jacobian_end_effector=self.jacobians[:, 7 - 1, :, :7], # iiwa_link_7 index: 7
current_position=self.end_effector_pos,
current_orientation=self.end_effector_rot,
goal_position=goal_position,
goal_orientation=None)
targets = self.robot_dof_targets[:, :7] + delta_dof_pos
self.robot_dof_targets = torch.clamp(targets, self.robot_dof_lower_limits, self.robot_dof_upper_limits)
self._robots.set_joint_position_targets(self.robot_dof_targets, indices=env_ids_int32)
def reset_idx(self, env_ids) -> None:
indices = env_ids.to(dtype=torch.int32)
# reset robot
pos = torch.clamp(self.robot_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_robot_dofs), device=self._device) - 0.5),
self.robot_dof_lower_limits, self.robot_dof_upper_limits)
dof_pos = torch.zeros((len(indices), self._robots.num_dof), device=self._device)
dof_pos[:] = pos
dof_vel = torch.zeros((len(indices), self._robots.num_dof), device=self._device)
self.robot_dof_targets[env_ids, :] = pos
self.robot_dof_pos[env_ids, :] = pos
self._robots.set_joint_position_targets(self.robot_dof_targets[env_ids], indices=indices)
self._robots.set_joint_positions(dof_pos, indices=indices)
self._robots.set_joint_velocities(dof_vel, indices=indices)
# reset target
pos = (torch.rand((len(env_ids), 3), device=self._device) - 0.5) * 2 \
* torch.tensor([0.10, 0.20, 0.20], device=self._device) \
+ torch.tensor([0.60, 0.00, 0.40], device=self._device)
self._targets.set_world_poses(pos + self._env_pos[env_ids], indices=indices)
# bookkeeping
self.reset_buf[env_ids] = 0
self.progress_buf[env_ids] = 0
def post_reset(self):
self.num_robot_dofs = self._robots.num_dof
self.robot_dof_pos = torch.zeros((self.num_envs, self.num_robot_dofs), device=self._device)
dof_limits = self._robots.get_dof_limits()
self.robot_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device)
self.robot_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device)
self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits)
self.robot_dof_targets = torch.zeros((self._num_envs, self.num_robot_dofs), dtype=torch.float, device=self._device)
# randomize all envs
indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device)
self.reset_idx(indices)
def calculate_metrics(self) -> None:
self.rew_buf[:] = -self._computed_distance
def is_done(self) -> None:
self.reset_buf.fill_(0)
# target reached
self.reset_buf = torch.where(self._computed_distance <= 0.035, torch.ones_like(self.reset_buf), self.reset_buf)
# max episode length
self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
| 14,113 | Python | 50.137681 | 167 | 0.517395 |
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_ros_env.py | import time
import numpy as np
import gymnasium as gym
import rospy
import sensor_msgs.msg
import geometry_msgs.msg
import libiiwa_msgs.srv
class ReachingIiwa(gym.Env):
def __init__(self, control_space="joint"):
self.control_space = control_space # joint or cartesian
# spaces
self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32)
if self.control_space == "joint":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32)
elif self.control_space == "cartesian":
self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32)
else:
raise ValueError("Invalid control space:", self.control_space)
# create publishers
self.pub_command_joint = rospy.Publisher('/iiwa/command/joint', sensor_msgs.msg.JointState, queue_size=1)
self.pub_command_cartesian = rospy.Publisher('/iiwa/command/cartesian', geometry_msgs.msg.Pose, queue_size=1)
# keep compatibility with libiiwa Python API
self.robot_state = {"joint_position": np.zeros((7,)),
"joint_velocity": np.zeros((7,)),
"cartesian_position": np.zeros((3,))}
# create subscribers
rospy.Subscriber('/iiwa/state/joint_states', sensor_msgs.msg.JointState, self._callback_joint_states)
rospy.Subscriber('/iiwa/state/end_effector_pose', geometry_msgs.msg.Pose, self._callback_end_effector_pose)
# create service clients
rospy.wait_for_service('/iiwa/set_control_interface')
proxy = rospy.ServiceProxy('/iiwa/set_control_interface', libiiwa_msgs.srv.SetString)
proxy("SERVO") # or "servo"
rospy.wait_for_service('/iiwa/set_desired_joint_velocity_rel')
rospy.wait_for_service('/iiwa/set_desired_joint_acceleration_rel')
rospy.wait_for_service('/iiwa/set_desired_joint_jerk_rel')
proxy = rospy.ServiceProxy('/iiwa/set_desired_joint_velocity_rel', libiiwa_msgs.srv.SetNumber)
proxy(0.5)
proxy = rospy.ServiceProxy('/iiwa/set_desired_joint_acceleration_rel', libiiwa_msgs.srv.SetNumber)
proxy(0.5)
proxy = rospy.ServiceProxy('/iiwa/set_desired_joint_jerk_rel', libiiwa_msgs.srv.SetNumber)
proxy(0.5)
rospy.wait_for_service('/iiwa/set_desired_cartesian_velocity')
rospy.wait_for_service('/iiwa/set_desired_cartesian_acceleration')
rospy.wait_for_service('/iiwa/set_desired_cartesian_jerk')
proxy = rospy.ServiceProxy('/iiwa/set_desired_cartesian_velocity', libiiwa_msgs.srv.SetNumber)
proxy(10.0)
proxy = rospy.ServiceProxy('/iiwa/set_desired_cartesian_acceleration', libiiwa_msgs.srv.SetNumber)
proxy(10.0)
proxy = rospy.ServiceProxy('/iiwa/set_desired_cartesian_jerk', libiiwa_msgs.srv.SetNumber)
proxy(10.0)
# initialize the ROS node
rospy.init_node(self.__class__.__name__)
print("Robot connected")
self.motion = None
self.motion_thread = None
self.dt = 1 / 120.0
self.action_scale = 2.5
self.dof_vel_scale = 0.1
self.max_episode_length = 100
self.robot_dof_speed_scales = 1
self.target_pos = np.array([0.65, 0.2, 0.2])
self.robot_default_dof_pos = np.radians([0, 0, 0, -90, 0, 90, 0])
self.robot_dof_lower_limits = np.array([-2.9671, -2.0944, -2.9671, -2.0944, -2.9671, -2.0944, -3.0543])
self.robot_dof_upper_limits = np.array([ 2.9671, 2.0944, 2.9671, 2.0944, 2.9671, 2.0944, 3.0543])
self.progress_buf = 1
self.obs_buf = np.zeros((18,), dtype=np.float32)
def _callback_joint_states(self, msg):
self.robot_state["joint_position"] = np.array(msg.position)
self.robot_state["joint_velocity"] = np.array(msg.velocity)
def _callback_end_effector_pose(self, msg):
positon = msg.position
self.robot_state["cartesian_position"] = np.array([positon.x, positon.y, positon.z])
def _get_observation_reward_done(self):
# observation
robot_dof_pos = self.robot_state["joint_position"]
robot_dof_vel = self.robot_state["joint_velocity"]
end_effector_pos = self.robot_state["cartesian_position"]
dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0
dof_vel_scaled = robot_dof_vel * self.dof_vel_scale
self.obs_buf[0] = self.progress_buf / float(self.max_episode_length)
self.obs_buf[1:8] = dof_pos_scaled
self.obs_buf[8:15] = dof_vel_scaled
self.obs_buf[15:18] = self.target_pos
# reward
distance = np.linalg.norm(end_effector_pos - self.target_pos)
reward = -distance
# done
done = self.progress_buf >= self.max_episode_length - 1
done = done or distance <= 0.075
print("Distance:", distance)
if done:
print("Target or Maximum episode length reached")
time.sleep(1)
return self.obs_buf, reward, done
def reset(self):
print("Reseting...")
# go to 1) safe position, 2) random position
msg = sensor_msgs.msg.JointState()
msg.position = self.robot_default_dof_pos.tolist()
self.pub_command_joint.publish(msg)
time.sleep(3)
msg.position = (self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5)).tolist()
self.pub_command_joint.publish(msg)
time.sleep(1)
# get target position from prompt
while True:
try:
print("Enter target position (X, Y, Z) in meters")
raw = input("or press [Enter] key for a random target position: ")
if raw:
self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')])
else:
noise = (2 * np.random.rand(3) - 1) * np.array([0.1, 0.2, 0.2])
self.target_pos = np.array([0.6, 0.0, 0.4]) + noise
print("Target position:", self.target_pos)
break
except ValueError:
print("Invalid input. Try something like: 0.65, 0.0, 0.4")
input("Press [Enter] to continue")
self.progress_buf = 0
observation, reward, done = self._get_observation_reward_done()
return observation, {}
def step(self, action):
self.progress_buf += 1
# control space
# joint
if self.control_space == "joint":
joint_positions = self.robot_state["joint_position"] + (self.robot_dof_speed_scales * self.dt * action * self.action_scale)
msg = sensor_msgs.msg.JointState()
msg.position = joint_positions.tolist()
self.pub_command_joint.publish(msg)
# cartesian
elif self.control_space == "cartesian":
end_effector_pos = self.robot_state["cartesian_position"] + action / 100.0
msg = geometry_msgs.msg.Pose()
msg.position.x = end_effector_pos[0]
msg.position.y = end_effector_pos[1]
msg.position.z = end_effector_pos[2]
msg.orientation.x = np.nan
msg.orientation.y = np.nan
msg.orientation.z = np.nan
msg.orientation.w = np.nan
self.pub_command_cartesian.publish(msg)
# the use of time.sleep is for simplicity. It does not guarantee control at a specific frequency
time.sleep(1 / 30.0)
observation, reward, terminated = self._get_observation_reward_done()
return observation, reward, terminated, False, {}
def render(self, *args, **kwargs):
pass
def close(self):
pass
| 7,831 | Python | 39.371134 | 144 | 0.605542 |
Toni-SM/skrl/docs/source/snippets/utils_postprocessing.py | # [start-memory_file_iterator-torch]
from skrl.utils import postprocessing
# assuming there is a directory called "memories" with Torch files in it
memory_iterator = postprocessing.MemoryFileIterator("memories/*.pt")
for filename, data in memory_iterator:
filename # str: basename of the current file
data # dict: keys are the names of the memory tensors in the file.
# Tensor shapes are (memory size, number of envs, specific content size)
# example of simple usage:
# print the filenames of all memories and their tensor shapes
print("\nfilename:", filename)
print(" |-- states:", data['states'].shape)
print(" |-- actions:", data['actions'].shape)
print(" |-- rewards:", data['rewards'].shape)
print(" |-- next_states:", data['next_states'].shape)
print(" |-- dones:", data['dones'].shape)
# [end-memory_file_iterator-torch]
# [start-memory_file_iterator-numpy]
from skrl.utils import postprocessing
# assuming there is a directory called "memories" with NumPy files in it
memory_iterator = postprocessing.MemoryFileIterator("memories/*.npz")
for filename, data in memory_iterator:
filename # str: basename of the current file
data # dict: keys are the names of the memory arrays in the file.
# Array shapes are (memory size, number of envs, specific content size)
# example of simple usage:
# print the filenames of all memories and their array shapes
print("\nfilename:", filename)
print(" |-- states:", data['states'].shape)
print(" |-- actions:", data['actions'].shape)
print(" |-- rewards:", data['rewards'].shape)
print(" |-- next_states:", data['next_states'].shape)
print(" |-- dones:", data['dones'].shape)
# [end-memory_file_iterator-numpy]
# [start-memory_file_iterator-csv]
from skrl.utils import postprocessing
# assuming there is a directory called "memories" with CSV files in it
memory_iterator = postprocessing.MemoryFileIterator("memories/*.csv")
for filename, data in memory_iterator:
filename # str: basename of the current file
data # dict: keys are the names of the memory list of lists extracted from the file.
# List lengths are (memory size * number of envs) and
# sublist lengths are (specific content size)
# example of simple usage:
# print the filenames of all memories and their list lengths
print("\nfilename:", filename)
print(" |-- states:", len(data['states']))
print(" |-- actions:", len(data['actions']))
print(" |-- rewards:", len(data['rewards']))
print(" |-- next_states:", len(data['next_states']))
print(" |-- dones:", len(data['dones']))
# [end-memory_file_iterator-csv]
# [start-tensorboard_file_iterator-list]
from skrl.utils import postprocessing
# assuming there is a directory called "runs" with experiments and Tensorboard files in it
tensorboard_iterator = postprocessing.TensorboardFileIterator("runs/*/events.out.tfevents.*", \
tags=["Reward / Total reward (mean)"])
for dirname, data in tensorboard_iterator:
dirname # str: path of the directory (experiment name) containing the Tensorboard file
data # dict: keys are the tags, values are lists of [step, value] pairs
# example of simple usage:
# print the directory name and the value length for the "Reward / Total reward (mean)" tag
print("\ndirname:", dirname)
for tag, values in data.items():
print(" |-- tag:", tag)
print(" | |-- value length:", len(values))
# [end-tensorboard_file_iterator-list]
| 3,582 | Python | 40.66279 | 95 | 0.676159 |
Toni-SM/skrl/docs/source/snippets/shared_model.py | # [start-mlp-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, GaussianMixin, DeterministicMixin
# define the shared model
class SharedModel(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction, role="policy")
DeterministicMixin.__init__(self, clip_actions, role="value")
# shared layers/network
self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
nn.ELU(),
nn.Linear(32, 32),
nn.ELU())
# separated layers ("policy")
self.mean_layer = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
# separated layer ("value")
self.value_layer = nn.Linear(32, 1)
# override the .act(...) method to disambiguate its call
def act(self, inputs, role):
if role == "policy":
return GaussianMixin.act(self, inputs, role)
elif role == "value":
return DeterministicMixin.act(self, inputs, role)
# forward the input to compute model output according to the specified role
def compute(self, inputs, role):
if role == "policy":
return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {}
elif role == "value":
return self.value_layer(self.net(inputs["states"])), {}
# instantiate the shared model and pass the same instance to the other key
models = {}
models["policy"] = SharedModel(env.observation_space, env.action_space, env.device)
models["value"] = models["policy"]
# [end-mlp-torch]
| 1,974 | Python | 39.306122 | 116 | 0.624113 |
Toni-SM/skrl/docs/source/snippets/noises.py | # [start-base-class-torch]
from typing import Union, Tuple
import torch
from skrl.resources.noises.torch import Noise
class CustomNoise(Noise):
def __init__(self, device: Union[str, torch.device] = "cuda:0") -> None:
"""
:param device: Device on which a torch tensor is or will be allocated (default: "cuda:0")
:type device: str or torch.device, optional
"""
super().__init__(device)
def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor:
"""Sample noise
:param size: Shape of the sampled tensor
:type size: tuple or list of integers, or torch.Size
:return: Sampled noise
:rtype: torch.Tensor
"""
# ================================
# - sample noise
# ================================
# [end-base-class-torch]
# [start-base-class-jax]
from typing import Optional, Union, Tuple
import numpy as np
import jaxlib
import jax.numpy as jnp
from skrl.resources.noises.torch import Noise
class CustomNoise(Noise):
def __init__(self, device: Optional[Union[str, jaxlib.xla_extension.Device]] = None) -> None:
"""Custom noise
:param device: Device on which a jax array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or jaxlib.xla_extension.Device, optional
"""
super().__init__(device)
def sample(self, size: Tuple[int]) -> Union[np.ndarray, jnp.ndarray]:
"""Sample noise
:param size: Shape of the sampled tensor
:type size: tuple or list of integers
:return: Sampled noise
:rtype: np.ndarray or jnp.ndarray
"""
# ================================
# - sample noise
# ================================
# [end-base-class-jax]
# =============================================================================
# [torch-start-gaussian]
from skrl.resources.noises.torch import GaussianNoise
cfg = DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = GaussianNoise(mean=0, std=0.2, device="cuda:0")
# [torch-end-gaussian]
# [jax-start-gaussian]
from skrl.resources.noises.jax import GaussianNoise
cfg = DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = GaussianNoise(mean=0, std=0.2)
# [jax-end-gaussian]
# =============================================================================
# [torch-start-ornstein-uhlenbeck]
from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise
cfg = DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.2, base_scale=1.0, device="cuda:0")
# [torch-end-ornstein-uhlenbeck]
# [jax-start-ornstein-uhlenbeck]
from skrl.resources.noises.jax import OrnsteinUhlenbeckNoise
cfg = DEFAULT_CONFIG.copy()
cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.2, base_scale=1.0)
# [jax-end-ornstein-uhlenbeck]
| 2,976 | Python | 28.77 | 108 | 0.589718 |
Toni-SM/skrl/docs/source/snippets/gaussian_model.py | # [start-definition-torch]
class GaussianModel(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
# [end-definition-torch]
# [start-definition-jax]
class GaussianModel(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
# [end-definition-jax]
# =============================================================================
# [start-mlp-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, GaussianMixin
# define the model
class MLP(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.net = nn.Sequential(nn.Linear(self.num_observations, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# [end-mlp-sequential-torch]
# [start-mlp-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, GaussianMixin
# define the model
class MLP(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.fc1 = nn.Linear(self.num_observations, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
x = self.fc1(inputs["states"])
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# [end-mlp-functional-torch]
# [start-mlp-setup-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, GaussianMixin
# define the model
class MLP(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def setup(self):
self.fc1 = nn.Dense(64)
self.fc2 = nn.Dense(32)
self.fc3 = nn.Dense(self.num_actions)
self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions))
def __call__(self, inputs, role):
x = self.fc1(inputs["states"])
x = nn.relu(x)
x = self.fc2(x)
x = nn.relu(x)
x = self.fc3(x)
return nn.tanh(x), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-mlp-setup-jax]
# [start-mlp-compact-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, GaussianMixin
# define the model
class MLP(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.Dense(64)(inputs["states"])
x = nn.relu(x)
x = nn.Dense(32)(x)
x = nn.relu(x)
x = nn.Dense(self.num_actions)(x)
log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions))
return nn.tanh(x), log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-mlp-compact-jax]
# =============================================================================
# [start-cnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, GaussianMixin
# define the model
class CNN(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.net = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 16),
nn.Tanh(),
nn.Linear(16, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
return self.net(inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# [end-cnn-sequential-torch]
# [start-cnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, GaussianMixin
# define the model
class CNN(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.conv1 = nn.Conv2d(3, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(16, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
x = inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = torch.tanh(x)
x = self.fc3(x)
x = torch.tanh(x)
x = self.fc4(x)
x = torch.tanh(x)
x = self.fc5(x)
return x, self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# [end-cnn-functional-torch]
# [start-cnn-setup-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, GaussianMixin
# define the model
class CNN(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
def setup(self):
self.conv1 = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")
self.conv2 = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")
self.conv3 = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")
self.fc1 = nn.Dense(512)
self.fc2 = nn.Dense(16)
self.fc3 = nn.Dense(64)
self.fc4 = nn.Dense(32)
self.fc5 = nn.Dense(self.num_actions)
self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions))
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = self.conv1(x)
x = nn.relu(x)
x = self.conv2(x)
x = nn.relu(x)
x = self.conv3(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = self.fc1(x)
x = nn.relu(x)
x = self.fc2(x)
x = nn.tanh(x)
x = self.fc3(x)
x = nn.tanh(x)
x = self.fc4(x)
x = nn.tanh(x)
x = self.fc5(x)
return nn.tanh(x), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-cnn-setup-jax]
# [start-cnn-compact-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, GaussianMixin
# define the model
class CNN(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = nn.Dense(512)(x)
x = nn.relu(x)
x = nn.Dense(16)(x)
x = nn.tanh(x)
x = nn.Dense(64)(x)
x = nn.tanh(x)
x = nn.Dense(32)(x)
x = nn.tanh(x)
x = nn.Dense(self.num_actions)(x)
log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions))
return nn.tanh(x), log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-cnn-compact-jax]
# =============================================================================
# [start-rnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, GaussianMixin
# define the model
class RNN(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-sequential-torch]
# [start-rnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, GaussianMixin
# define the model
class RNN(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-functional-torch]
# =============================================================================
# [start-gru-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, GaussianMixin
# define the model
class GRU(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-sequential-torch]
# [start-gru-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, GaussianMixin
# define the model
class GRU(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-functional-torch]
# =============================================================================
# [start-lstm-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, GaussianMixin
# define the model
class LSTM(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-sequential-torch]
# [start-lstm-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, GaussianMixin
# define the model
class LSTM(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-functional-torch]
| 41,575 | Python | 41.038423 | 146 | 0.566133 |
Toni-SM/skrl/docs/source/snippets/model_mixin.py | # [start-model-torch]
from typing import Optional, Union, Mapping, Sequence, Tuple, Any
import gym, gymnasium
import torch
from skrl.models.torch import Model
class CustomModel(Model):
def __init__(self,
observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
device: Optional[Union[str, torch.device]] = None) -> None:
"""Custom model
:param observation_space: Observation/state space or shape.
The ``num_observations`` property will contain the size of that space
:type observation_space: int, sequence of int, gym.Space, gymnasium.Space
:param action_space: Action space or shape.
The ``num_actions`` property will contain the size of that space
:type action_space: int, sequence of int, gym.Space, gymnasium.Space
:param device: Device on which a torch tensor is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or torch.device, optional
"""
super().__init__(observation_space, action_space, device)
# =====================================
# - define custom attributes and others
# =====================================
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act according to the specified behavior
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function for stochastic models
or None for deterministic models. The third component is a dictionary containing extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dictionary
"""
# ==============================
# - act in response to the state
# ==============================
# [end-model-torch]
# [start-model-jax]
from typing import Optional, Union, Mapping, Tuple, Any
import gym, gymnasium
import flax
import jaxlib
import jax.numpy as jnp
from skrl.models.jax import Model
class CustomModel(Model):
def __init__(self,
observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
device: Optional[Union[str, jaxlib.xla_extension.Device]] = None,
parent: Optional[Any] = None,
name: Optional[str] = None) -> None:
"""Custom model
:param observation_space: Observation/state space or shape.
The ``num_observations`` property will contain the size of that space
:type observation_space: int, sequence of int, gym.Space, gymnasium.Space
:param action_space: Action space or shape.
The ``num_actions`` property will contain the size of that space
:type action_space: int, sequence of int, gym.Space, gymnasium.Space
:param device: Device on which a jax array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or jaxlib.xla_extension.Device, optional
:param parent: The parent Module of this Module (default: ``None``).
It is a Flax reserved attribute
:type parent: str, optional
:param name: The name of this Module (default: ``None``).
It is a Flax reserved attribute
:type name: str, optional
"""
Model.__init__(self, observation_space, action_space, device, parent, name)
# =====================================
# - define custom attributes and others
# =====================================
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[jnp.ndarray, Any]],
role: str = "",
params: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, Union[jnp.ndarray, None], Mapping[str, Union[jnp.ndarray, Any]]]:
"""Act according to the specified behavior
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically jnp.ndarray
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the mean actions ``"mean_actions"``
and extra output values
:rtype: tuple of jnp.ndarray, jnp.ndarray or None, and dictionary
"""
# ==============================
# - act in response to the state
# ==============================
# [end-model-jax]
# =============================================================================
# [start-mixin-torch]
from typing import Union, Mapping, Tuple, Any
import torch
class CustomMixin:
def __init__(self, role: str = "") -> None:
"""Custom mixin
:param role: Role play by the model (default: ``""``)
:type role: str, optional
"""
# =====================================
# - define custom attributes and others
# =====================================
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act according to the specified behavior
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function for stochastic models
or None for deterministic models. The third component is a dictionary containing extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dictionary
"""
# ==============================
# - act in response to the state
# ==============================
# [end-mixin-torch]
# [start-mixin-jax]
from typing import Optional, Union, Mapping, Tuple, Any
import flax
import jax.numpy as jnp
class CustomMixin:
def __init__(self, role: str = "") -> None:
"""Custom mixin
:param role: Role play by the model (default: ``""``)
:type role: str, optional
"""
# =====================================
# - define custom attributes and others
# =====================================
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[jnp.ndarray, Any]],
role: str = "",
params: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, Union[jnp.ndarray, None], Mapping[str, Union[jnp.ndarray, Any]]]:
"""Act according to the specified behavior
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically jnp.ndarray
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the mean actions ``"mean_actions"``
and extra output values
:rtype: tuple of jnp.ndarray, jnp.ndarray or None, and dictionary
"""
# ==============================
# - act in response to the state
# ==============================
# [end-mixin-jax]
| 9,778 | Python | 43.857798 | 137 | 0.562896 |
Toni-SM/skrl/docs/source/snippets/multivariate_gaussian_model.py | # [start-definition-torch]
class MultivariateGaussianModel(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device=None,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
# [end-definition-torch]
# =============================================================================
# [start-mlp-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class MLP(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Linear(self.num_observations, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2)
# [end-mlp-sequential-torch]
# [start-mlp-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class MLP(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.fc1 = nn.Linear(self.num_observations, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
x = self.fc1(inputs["states"])
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2)
# [end-mlp-functional-torch]
# =============================================================================
# [start-cnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class CNN(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.net = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 16),
nn.Tanh(),
nn.Linear(16, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, self.num_actions))
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
return self.net(inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)), self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2)
# [end-cnn-sequential-torch]
# [start-cnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class CNN(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device,
clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.conv1 = nn.Conv2d(3, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(16, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
x = inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = torch.tanh(x)
x = self.fc3(x)
x = torch.tanh(x)
x = self.fc4(x)
x = torch.tanh(x)
x = self.fc5(x)
return x, self.log_std_parameter, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2)
# [end-cnn-functional-torch]
# =============================================================================
# [start-rnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class RNN(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-sequential-torch]
# [start-rnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class RNN(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-functional-torch]
# =============================================================================
# [start-gru-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class GRU(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-sequential-torch]
# [start-gru-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class GRU(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-functional-torch]
# =============================================================================
# [start-lstm-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class LSTM(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions),
nn.Tanh())
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-sequential-torch]
# [start-lstm-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultivariateGaussianMixin
# define the model
class LSTM(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, self.num_actions)
self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return torch.tanh(x), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=True,
clip_log_std=True,
min_log_std=-20,
max_log_std=2,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-functional-torch]
| 34,003 | Python | 42.876129 | 146 | 0.563186 |
Toni-SM/skrl/docs/source/snippets/multi_agents_basic_usage.py | # [start-ippo-torch]
# import the agent and its default configuration
from skrl.multi_agents.torch.ippo import IPPO, IPPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
for agent_name in env.possible_agents:
models[agent_name] = {}
models[agent_name]["policy"] = ...
models[agent_name]["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = IPPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memories <memories>)
agent = IPPO(possible_agents=env.possible_agents,
models=models,
memory=memories, # only required during training
cfg=cfg_agent,
observation_spaces=env.observation_spaces,
action_spaces=env.action_spaces,
device=env.device)
# [end-ippo-torch]
# [start-ippo-jax]
# import the agent and its default configuration
from skrl.multi_agents.jax.ippo import IPPO, IPPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
for agent_name in env.possible_agents:
models[agent_name] = {}
models[agent_name]["policy"] = ...
models[agent_name]["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = IPPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memories <memories>)
agent = IPPO(possible_agents=env.possible_agents,
models=models,
memory=memories, # only required during training
cfg=cfg_agent,
observation_spaces=env.observation_spaces,
action_spaces=env.action_spaces,
device=env.device)
# [end-ippo-jax]
# [start-mappo-torch]
# import the agent and its default configuration
from skrl.multi_agents.torch.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
for agent_name in env.possible_agents:
models[agent_name] = {}
models[agent_name]["policy"] = ...
models[agent_name]["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = MAPPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memories <memories>)
agent = MAPPO(possible_agents=env.possible_agents,
models=models,
memory=memories, # only required during training
cfg=cfg_agent,
observation_spaces=env.observation_spaces,
action_spaces=env.action_spaces,
device=env.device,
shared_observation_spaces=env.shared_observation_spaces)
# [end-mappo-torch]
# [start-mappo-jax]
# import the agent and its default configuration
from skrl.multi_agents.jax.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
for agent_name in env.possible_agents:
models[agent_name] = {}
models[agent_name]["policy"] = ...
models[agent_name]["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = MAPPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memories <memories>)
agent = MAPPO(possible_agents=env.possible_agents,
models=models,
memory=memories, # only required during training
cfg=cfg_agent,
observation_spaces=env.observation_spaces,
action_spaces=env.action_spaces,
device=env.device,
shared_observation_spaces=env.shared_observation_spaces)
# [end-mappo-jax]
| 3,674 | Python | 32.715596 | 70 | 0.66957 |
Toni-SM/skrl/docs/source/snippets/categorical_model.py | # [start-definition-torch]
class CategoricalModel(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
# [end-definition-torch]
# [start-definition-jax]
class CategoricalModel(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
CategoricalMixin.__init__(self, unnormalized_log_prob)
# [end-definition-jax]
# =============================================================================
# [start-mlp-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, CategoricalMixin
# define the model
class MLP(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.net = nn.Sequential(nn.Linear(self.num_observations, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# [end-mlp-sequential-torch]
# [start-mlp-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, CategoricalMixin
# define the model
class MLP(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.fc1 = nn.Linear(self.num_observations, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def compute(self, inputs, role):
x = self.fc1(inputs["states"])
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# [end-mlp-functional-torch]
# [start-mlp-setup-jax]
import flax.linen as nn
from skrl.models.jax import Model, CategoricalMixin
# define the model
class MLP(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
CategoricalMixin.__init__(self, unnormalized_log_prob)
def setup(self):
self.fc1 = nn.Dense(64)
self.fc2 = nn.Dense(32)
self.fc3 = nn.Dense(self.num_actions)
def __call__(self, inputs, role):
x = self.fc1(inputs["states"])
x = nn.relu(x)
x = self.fc2(x)
x = nn.relu(x)
x = self.fc3(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# initialize model's state dict
policy.init_state_dict("policy")
# [end-mlp-setup-jax]
# [start-mlp-compact-jax]
import flax.linen as nn
from skrl.models.jax import Model, CategoricalMixin
# define the model
class MLP(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
CategoricalMixin.__init__(self, unnormalized_log_prob)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.Dense(64)(inputs["states"])
x = nn.relu(x)
x = nn.Dense(32)(x)
x = nn.relu(x)
x = nn.Dense(self.num_actions)(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# initialize model's state dict
policy.init_state_dict("policy")
# [end-mlp-compact-jax]
# =============================================================================
# [start-cnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, CategoricalMixin
# define the model
class CNN(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.net = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 16),
nn.Tanh(),
nn.Linear(16, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, self.num_actions))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
return self.net(inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)), {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# [end-cnn-sequential-torch]
# [start-cnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, CategoricalMixin
# define the model
class CNN(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.conv1 = nn.Conv2d(3, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(16, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, self.num_actions)
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
x = inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = torch.tanh(x)
x = self.fc3(x)
x = torch.tanh(x)
x = self.fc4(x)
x = torch.tanh(x)
x = self.fc5(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# [end-cnn-functional-torch]
# [start-cnn-setup-jax]
import flax.linen as nn
from skrl.models.jax import Model, CategoricalMixin
# define the model
class CNN(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
CategoricalMixin.__init__(self, unnormalized_log_prob)
def setup(self):
self.conv1 = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")
self.conv2 = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")
self.conv3 = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")
self.fc1 = nn.Dense(512)
self.fc2 = nn.Dense(16)
self.fc3 = nn.Dense(64)
self.fc4 = nn.Dense(32)
self.fc5 = nn.Dense(self.num_actions)
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = self.conv1(x)
x = nn.relu(x)
x = self.conv2(x)
x = nn.relu(x)
x = self.conv3(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = self.fc1(x)
x = nn.relu(x)
x = self.fc2(x)
x = nn.tanh(x)
x = self.fc3(x)
x = nn.tanh(x)
x = self.fc4(x)
x = nn.tanh(x)
x = self.fc5(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# initialize model's state dict
policy.init_state_dict("policy")
# [end-cnn-setup-jax]
# [start-cnn-compact-jax]
import flax.linen as nn
from skrl.models.jax import Model, CategoricalMixin
# define the model
class CNN(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
CategoricalMixin.__init__(self, unnormalized_log_prob)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = nn.Dense(512)(x)
x = nn.relu(x)
x = nn.Dense(16)(x)
x = nn.tanh(x)
x = nn.Dense(64)(x)
x = nn.tanh(x)
x = nn.Dense(32)(x)
x = nn.tanh(x)
x = nn.Dense(self.num_actions)(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True)
# initialize model's state dict
policy.init_state_dict("policy")
# [end-cnn-compact-jax]
# =============================================================================
# [start-rnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, CategoricalMixin
# define the model
class RNN(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-sequential-torch]
# [start-rnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, CategoricalMixin
# define the model
class RNN(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-functional-torch]
# =============================================================================
# [start-gru-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, CategoricalMixin
# define the model
class GRU(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-sequential-torch]
# [start-gru-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, CategoricalMixin
# define the model
class GRU(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-functional-torch]
# =============================================================================
# [start-lstm-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, CategoricalMixin
# define the model
class LSTM(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-sequential-torch]
# [start-lstm-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, CategoricalMixin
# define the model
class LSTM(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-functional-torch]
| 36,351 | Python | 40.356086 | 146 | 0.572556 |
Toni-SM/skrl/docs/source/snippets/agents_basic_usage.py | # [torch-start-a2c]
# import the agent and its default configuration
from skrl.agents.torch.a2c import A2C, A2C_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = A2C_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = A2C(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-a2c]
# [jax-start-a2c]
# import the agent and its default configuration
from skrl.agents.jax.a2c import A2C, A2C_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = A2C_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = A2C(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-a2c]
# [torch-start-a2c-rnn]
# import the agent and its default configuration
from skrl.agents.torch.a2c import A2C_RNN as A2C, A2C_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = A2C_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = A2C(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-a2c-rnn]
# =============================================================================
# [torch-start-amp]
# import the agent and its default configuration
from skrl.agents.torch.amp import AMP, AMP_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
models["discriminator"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = AMP_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
# (assuming defined memories for motion <motion_dataset> and <reply_buffer>)
# (assuming defined methods to collect motion <collect_reference_motions> and <collect_observation>)
agent = AMP(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
amp_observation_space=env.amp_observation_space,
motion_dataset=motion_dataset,
reply_buffer=reply_buffer,
collect_reference_motions=collect_reference_motions,
collect_observation=collect_observation)
# [torch-end-amp]
# =============================================================================
# [torch-start-cem]
# import the agent and its default configuration
from skrl.agents.torch.cem import CEM, CEM_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
# adjust some configuration if necessary
cfg_agent = CEM_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = CEM(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-cem]
# [jax-start-cem]
# import the agent and its default configuration
from skrl.agents.jax.cem import CEM, CEM_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
# adjust some configuration if necessary
cfg_agent = CEM_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = CEM(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-cem]
# =============================================================================
# [torch-start-ddpg]
# import the agent and its default configuration
from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["target_policy"] = ... # only required during training
models["critic"] = ... # only required during training
models["target_critic"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DDPG_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DDPG(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-ddpg]
# [jax-start-ddpg]
# import the agent and its default configuration
from skrl.agents.jax.ddpg import DDPG, DDPG_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["target_policy"] = ... # only required during training
models["critic"] = ... # only required during training
models["target_critic"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DDPG_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DDPG(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-ddpg]
# [torch-start-ddpg-rnn]
# import the agent and its default configuration
from skrl.agents.torch.ddpg import DDPG_RNN as DDPG, DDPG_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["target_policy"] = ... # only required during training
models["critic"] = ... # only required during training
models["target_critic"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DDPG_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DDPG(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-ddpg-rnn]
# =============================================================================
# [torch-start-ddqn]
# import the agent and its default configuration
from skrl.agents.torch.dqn import DDQN, DDQN_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["q_network"] = ...
models["target_q_network"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DDQN_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DDQN(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-ddqn]
# [jax-start-ddqn]
# import the agent and its default configuration
from skrl.agents.jax.dqn import DDQN, DDQN_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["q_network"] = ...
models["target_q_network"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DDQN_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DDQN(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-ddqn]
# =============================================================================
# [torch-start-dqn]
# import the agent and its default configuration
from skrl.agents.torch.dqn import DQN, DQN_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["q_network"] = ...
models["target_q_network"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DQN_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DQN(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-dqn]
# [jax-start-dqn]
# import the agent and its default configuration
from skrl.agents.jax.dqn import DQN, DQN_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["q_network"] = ...
models["target_q_network"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = DQN_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = DQN(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-dqn]
# =============================================================================
# [torch-start-ppo]
# import the agent and its default configuration
from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = PPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = PPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-ppo]
# [jax-start-ppo]
# import the agent and its default configuration
from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = PPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = PPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-ppo]
# [torch-start-ppo-rnn]
# import the agent and its default configuration
from skrl.agents.torch.ppo import PPO_RNN as PPO, PPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = PPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = PPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-ppo-rnn]
# =============================================================================
# [torch-start-q-learning]
# import the agent and its default configuration
from skrl.agents.torch.q_learning import Q_LEARNING, Q_LEARNING_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
# adjust some configuration if necessary
cfg_agent = Q_LEARNING_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env>)
agent = Q_LEARNING(models=models,
memory=None,
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-q-learning]
# =============================================================================
# [torch-start-rpo-with-rpo]
class Policy(GaussianMixin, Model):
...
def compute(self, inputs, role):
# compute the mean actions using the neural network
mean_actions = self.net(inputs["states"])
# perturb the mean actions by adding a randomized uniform sample
rpo_alpha = inputs["alpha"]
perturbation = torch.zeros_like(mean_actions).uniform_(-rpo_alpha, rpo_alpha)
mean_actions += perturbation
return mean_actions, self.log_std_parameter, {}
# [torch-end-rpo-with-rpo]
# [jax-start-rpo-with-rpo]
class Policy(GaussianMixin, Model):
...
def __call__(self, inputs, role):
# compute the mean actions using the neural network
mean_actions = ...
log_std = ...
# perturb the mean actions by adding a randomized uniform sample
rpo_alpha = inputs["alpha"]
perturbation = jax.random.uniform(inputs["key"], mean_actions.shape, minval=-rpo_alpha, maxval=rpo_alpha)
mean_actions += perturbation
return mean_actions, log_std, {}
# [jax-end-rpo-with-rpo]
# [torch-start-rpo-without-rpo]
class Policy(GaussianMixin, Model):
...
def compute(self, inputs, role):
# compute the mean actions using the neural network
mean_actions = self.net(inputs["states"])
# perturb the mean actions by adding a randomized uniform sample
rpo_alpha = 0.5
perturbation = torch.zeros_like(mean_actions).uniform_(-rpo_alpha, rpo_alpha)
mean_actions += perturbation
return mean_actions, self.log_std_parameter, {}
# [torch-end-rpo-without-rpo]
# [jax-start-rpo-without-rpo]
class Policy(GaussianMixin, Model):
...
def __call__(self, inputs, role):
# compute the mean actions using the neural network
mean_actions = ...
log_std = ...
# perturb the mean actions by adding a randomized uniform sample
rpo_alpha = 0.5
perturbation = jax.random.uniform(inputs["key"], mean_actions.shape, minval=-rpo_alpha, maxval=rpo_alpha)
mean_actions += perturbation
return mean_actions, log_std, {}
# [jax-end-rpo-without-rpo]
# [torch-start-rpo]
# import the agent and its default configuration
from skrl.agents.torch.rpo import RPO, RPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = RPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = RPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-rpo]
# [jax-start-rpo]
# import the agent and its default configuration
from skrl.agents.jax.rpo import RPO, RPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = RPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = RPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-rpo]
# [torch-start-rpo-rnn]
# import the agent and its default configuration
from skrl.agents.torch.rpo import RPO_RNN as RPO, RPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = RPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = RPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-rpo-rnn]
# =============================================================================
# [torch-start-sac]
# import the agent and its default configuration
from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["critic_1"] = ... # only required during training
models["critic_2"] = ... # only required during training
models["target_critic_1"] = ... # only required during training
models["target_critic_2"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = SAC_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = SAC(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-sac]
# [jax-start-sac]
# import the agent and its default configuration
from skrl.agents.jax.sac import SAC, SAC_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["critic_1"] = ... # only required during training
models["critic_2"] = ... # only required during training
models["target_critic_1"] = ... # only required during training
models["target_critic_2"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = SAC_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = SAC(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-sac]
# [torch-start-sac-rnn]
# import the agent and its default configuration
from skrl.agents.torch.sac import SAC_RNN as SAC, SAC_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["critic_1"] = ... # only required during training
models["critic_2"] = ... # only required during training
models["target_critic_1"] = ... # only required during training
models["target_critic_2"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = SAC_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = SAC(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-sac-rnn]
# =============================================================================
# [torch-start-sarsa]
# import the agent and its default configuration
from skrl.agents.torch.sarsa import SARSA, SARSA_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
# adjust some configuration if necessary
cfg_agent = SARSA_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env>)
agent = SARSA(models=models,
memory=None,
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-sarsa]
# =============================================================================
# [torch-start-td3]
# import the agent and its default configuration
from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["target_policy"] = ... # only required during training
models["critic_1"] = ... # only required during training
models["critic_2"] = ... # only required during training
models["target_critic_1"] = ... # only required during training
models["target_critic_2"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = TD3_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = TD3(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-td3]
# [jax-start-td3]
# import the agent and its default configuration
from skrl.agents.jax.td3 import TD3, TD3_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["target_policy"] = ... # only required during training
models["critic_1"] = ... # only required during training
models["critic_2"] = ... # only required during training
models["target_critic_1"] = ... # only required during training
models["target_critic_2"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = TD3_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = TD3(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [jax-end-td3]
# [torch-start-td3-rnn]
# import the agent and its default configuration
from skrl.agents.torch.td3 import TD3_RNN as TD3, TD3_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["target_policy"] = ... # only required during training
models["critic_1"] = ... # only required during training
models["critic_2"] = ... # only required during training
models["target_critic_1"] = ... # only required during training
models["target_critic_2"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = TD3_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = TD3(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-td3-rnn]
# =============================================================================
# [torch-start-trpo]
# import the agent and its default configuration
from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = TRPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = TRPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-trpo]
# [torch-start-trpo-rnn]
# import the agent and its default configuration
from skrl.agents.torch.trpo import TRPO_RNN as TRPO, TRPO_DEFAULT_CONFIG
# instantiate the agent's models
models = {}
models["policy"] = ...
models["value"] = ... # only required during training
# adjust some configuration if necessary
cfg_agent = TRPO_DEFAULT_CONFIG.copy()
cfg_agent["<KEY>"] = ...
# instantiate the agent
# (assuming a defined environment <env> and memory <memory>)
agent = TRPO(models=models,
memory=memory, # only required during training
cfg=cfg_agent,
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# [torch-end-trpo-rnn]
| 25,726 | Python | 30.840346 | 113 | 0.645378 |
Toni-SM/skrl/docs/source/snippets/memories.py | # [start-base-class-torch]
from typing import Union, Tuple, List
import torch
from skrl.memories.torch import Memory
class CustomMemory(Memory):
def __init__(self, memory_size: int, num_envs: int = 1, device: Union[str, torch.device] = "cuda:0") -> None:
"""Custom memory
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: 1)
:type num_envs: int, optional
:param device: Device on which a torch tensor is or will be allocated (default: "cuda:0")
:type device: str or torch.device, optional
"""
super().__init__(memory_size, num_envs, device)
def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[torch.Tensor]]:
"""Sample a batch from memory
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: 1)
:type mini_batches: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of torch.Tensor list
"""
# ================================
# - sample a batch from memory.
# It is possible to generate only the sampling indexes and call self.sample_by_index(...)
# ================================
# [end-base-class-torch]
# [start-base-class-jax]
from typing import Optional, Union, Tuple, List
import jaxlib
import jax.numpy as jnp
from skrl.memories.jax import Memory
class CustomMemory(Memory):
def __init__(self, memory_size: int,
num_envs: int = 1,
device: Optional[jaxlib.xla_extension.Device] = None) -> None:
"""Custom memory
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: 1)
:type num_envs: int, optional
:param device: Device on which an array is or will be allocated (default: None)
:type device: jaxlib.xla_extension.Device, optional
"""
super().__init__(memory_size, num_envs, device)
def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[jnp.ndarray]]:
"""Sample a batch from memory
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: 1)
:type mini_batches: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of jnp.ndarray list
"""
# ================================
# - sample a batch from memory.
# It is possible to generate only the sampling indexes and call self.sample_by_index(...)
# ================================
# [end-base-class-jax]
# =============================================================================
# [start-random-torch]
# import the memory class
from skrl.memories.torch import RandomMemory
# instantiate the memory (assumes there is a wrapped environment: env)
memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=env.device)
# [end-random-torch]
# [start-random-jax]
# import the memory class
from skrl.memories.jax import RandomMemory
# instantiate the memory (assumes there is a wrapped environment: env)
memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=env.device)
# [end-random-jax]
| 4,112 | Python | 38.171428 | 113 | 0.625486 |
Toni-SM/skrl/docs/source/snippets/data.py | # [start-tensorboard-configuration]
DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-tensorboard-configuration]
# [start-wandb-configuration]
DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-wandb-configuration]
# [start-checkpoint-configuration]
DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-checkpoint-configuration]
# [start-checkpoint-load-agent-torch]
from skrl.agents.torch.ppo import PPO
# Instantiate the agent
agent = PPO(models=models, # models dict
memory=memory, # memory instance, or None if not required
cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.)
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# Load the checkpoint
agent.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/agent_1200.pt")
# [end-checkpoint-load-agent-torch]
# [start-checkpoint-load-agent-jax]
from skrl.agents.jax.ppo import PPO
# Instantiate the agent
agent = PPO(models=models, # models dict
memory=memory, # memory instance, or None if not required
cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.)
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# Load the checkpoint
agent.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/agent_1200.pickle")
# [end-checkpoint-load-agent-jax]
# [start-checkpoint-load-model-torch]
from skrl.models.torch import Model, DeterministicMixin
# Define the model
class Policy(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# Instantiate the model
policy = Policy(env.observation_space, env.action_space, env.device, clip_actions=True)
# Load the checkpoint
policy.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/2500_policy.pt")
# [end-checkpoint-load-model-torch]
# [start-checkpoint-load-model-jax]
from skrl.models.jax import Model, DeterministicMixin
# Define the model
class Policy(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.Dense(32)(inputs["states"])
x = nn.relu(x)
x = nn.Dense(32)(x)
x = nn.relu(x)
x = nn.Dense(self.num_actions)(x)
return x, {}
# Instantiate the model
policy = Policy(env.observation_space, env.action_space, env.device, clip_actions=True)
# Load the checkpoint
policy.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/2500_policy.pickle")
# [end-checkpoint-load-model-jax]
# [start-checkpoint-load-huggingface-torch]
from skrl.agents.torch.ppo import PPO
from skrl.utils.huggingface import download_model_from_huggingface
# Instantiate the agent
agent = PPO(models=models, # models dict
memory=memory, # memory instance, or None if not required
cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.)
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# Load the checkpoint from Hugging Face Hub
path = download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO", filename="agent.pt")
agent.load(path)
# [end-checkpoint-load-huggingface-torch]
# [start-checkpoint-load-huggingface-jax]
from skrl.agents.jax.ppo import PPO
from skrl.utils.huggingface import download_model_from_huggingface
# Instantiate the agent
agent = PPO(models=models, # models dict
memory=memory, # memory instance, or None if not required
cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.)
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# Load the checkpoint from Hugging Face Hub
path = download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO", filename="agent.pickle")
agent.load(path)
# [end-checkpoint-load-huggingface-jax]
# [start-checkpoint-migrate-agent-torch]
from skrl.agents.torch.ppo import PPO
# Instantiate the agent
agent = PPO(models=models, # models dict
memory=memory, # memory instance, or None if not required
cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.)
observation_space=env.observation_space,
action_space=env.action_space,
device=env.device)
# Migrate a rl_games checkpoint
agent.migrate(path="./runs/Cartpole/nn/Cartpole.pth")
# [end-checkpoint-migrate-agent-torch]
# [start-checkpoint-migrate-model-torch]
from skrl.models.torch import Model, DeterministicMixin
# Define the model
class Policy(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
nn.ReLU(),
nn.Linear(32, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# Instantiate the model
policy = Policy(env.observation_space, env.action_space, env.device, clip_actions=True)
# Migrate a rl_games checkpoint (only the model)
policy.migrate(path="./runs/Cartpole/nn/Cartpole.pth")
# or migrate a stable-baselines3 checkpoint
policy.migrate(path="./ddpg_pendulum.zip")
# or migrate a checkpoint of any other library
state_dict = torch.load("./external_model.pt")
policy.migrate(state_dict=state_dict)
# [end-checkpoint-migrate-model-torch]
# [start-export-memory-torch]
from skrl.memories.torch import RandomMemory
# Instantiate a memory and enable its export
memory = RandomMemory(memory_size=16,
num_envs=env.num_envs,
device=device,
export=True,
export_format="pt",
export_directory="./memories")
# [end-export-memory-torch]
# [start-export-memory-jax]
from skrl.memories.jax import RandomMemory
# Instantiate a memory and enable its export
memory = RandomMemory(memory_size=16,
num_envs=env.num_envs,
device=device,
export=True,
export_format="np",
export_directory="./memories")
# [end-export-memory-jax]
| 9,058 | Python | 35.091633 | 101 | 0.643851 |
Toni-SM/skrl/docs/source/snippets/isaacgym_utils.py | import math
from isaacgym import gymapi
from skrl.utils import isaacgym_utils
# create a web viewer instance
web_viewer = isaacgym_utils.WebViewer()
# configure and create simulation
sim_params = gymapi.SimParams()
sim_params.up_axis = gymapi.UP_AXIS_Z
sim_params.gravity = gymapi.Vec3(0.0, 0.0, -9.8)
sim_params.physx.solver_type = 1
sim_params.physx.num_position_iterations = 4
sim_params.physx.num_velocity_iterations = 1
sim_params.physx.use_gpu = True
sim_params.use_gpu_pipeline = True
gym = gymapi.acquire_gym()
sim = gym.create_sim(compute_device=0, graphics_device=0, type=gymapi.SIM_PHYSX, params=sim_params)
# setup num_envs and env's grid
num_envs = 1
spacing = 2.0
env_lower = gymapi.Vec3(-spacing, -spacing, 0.0)
env_upper = gymapi.Vec3(spacing, 0.0, spacing)
# add ground plane
plane_params = gymapi.PlaneParams()
plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)
gym.add_ground(sim, plane_params)
envs = []
cameras = []
for i in range(num_envs):
# create env
env = gym.create_env(sim, env_lower, env_upper, int(math.sqrt(num_envs)))
# add sphere
pose = gymapi.Transform()
pose.p, pose.r = gymapi.Vec3(0.0, 0.0, 1.0), gymapi.Quat(0.0, 0.0, 0.0, 1.0)
gym.create_actor(env, gym.create_sphere(sim, 0.2, None), pose, "sphere", i, 0)
# add camera
cam_props = gymapi.CameraProperties()
cam_props.width, cam_props.height = 300, 300
cam_handle = gym.create_camera_sensor(env, cam_props)
gym.set_camera_location(cam_handle, env, gymapi.Vec3(1, 1, 1), gymapi.Vec3(0, 0, 0))
envs.append(env)
cameras.append(cam_handle)
# setup web viewer
web_viewer.setup(gym, sim, envs, cameras)
gym.prepare_sim(sim)
for i in range(100000):
gym.simulate(sim)
# render the scene
web_viewer.render(fetch_results=True,
step_graphics=True,
render_all_camera_sensors=True,
wait_for_page_load=True)
| 1,927 | Python | 26.942029 | 99 | 0.677218 |
Toni-SM/skrl/docs/source/snippets/deterministic_model.py | # [start-definition-torch]
class DeterministicModel(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
# [end-definition-torch]
# [start-definition-jax]
class DeterministicModel(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
# [end-definition-jax]
# =============================================================================
# [start-mlp-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, DeterministicMixin
# define the model
class MLP(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 1))
def compute(self, inputs, role):
return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# [end-mlp-sequential-torch]
# [start-mlp-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, DeterministicMixin
# define the model
class MLP(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.fc1 = nn.Linear(self.num_observations + self.num_actions, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 1)
def compute(self, inputs, role):
x = self.fc1(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1))
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.fc3(x), {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# [end-mlp-functional-torch]
# [start-mlp-setup-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, DeterministicMixin
# define the model
class MLP(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def setup(self):
self.fc1 = nn.Dense(64)
self.fc2 = nn.Dense(32)
self.fc3 = nn.Dense(1)
def __call__(self, inputs, role):
x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)
x = self.fc1(x)
x = nn.relu(x)
x = self.fc2(x)
x = nn.relu(x)
x = self.fc3(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# initialize model's state dict
critic.init_state_dict("critic")
# [end-mlp-setup-jax]
# [start-mlp-compact-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, DeterministicMixin
# define the model
class MLP(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)
x = nn.relu(nn.Dense(64)(x))
x = nn.relu(nn.Dense(32)(x))
x = nn.Dense(1)(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# initialize model's state dict
critic.init_state_dict("critic")
# [end-mlp-compact-jax]
# =============================================================================
# [start-cnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, DeterministicMixin
# define the model
class CNN(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.features_extractor = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 16),
nn.Tanh())
self.net = nn.Sequential(nn.Linear(16 + self.num_actions, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, 1))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
x = self.features_extractor(inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2))
return self.net(torch.cat([x, inputs["taken_actions"]], dim=1)), {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# [end-cnn-sequential-torch]
# [start-cnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, DeterministicMixin
# define the model
class CNN(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.conv1 = nn.Conv2d(3, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(16 + self.num_actions, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, 1)
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
x = inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = torch.tanh(x)
x = self.fc3(torch.cat([x, inputs["taken_actions"]], dim=1))
x = torch.tanh(x)
x = self.fc4(x)
x = torch.tanh(x)
x = self.fc5(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# [end-cnn-functional-torch]
# [start-cnn-setup-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, DeterministicMixin
# define the model
class CNN(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
def setup(self):
self.conv1 = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")
self.conv2 = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")
self.conv3 = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")
self.fc1 = nn.Dense(512)
self.fc2 = nn.Dense(16)
self.fc3 = nn.Dense(64)
self.fc4 = nn.Dense(32)
self.fc5 = nn.Dense(1)
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = self.conv1(x)
x = nn.relu(x)
x = self.conv2(x)
x = nn.relu(x)
x = self.conv3(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = self.fc1(x)
x = nn.relu(x)
x = self.fc2(x)
x = nn.tanh(x)
x = jnp.concatenate([x, inputs["taken_actions"]], axis=-1)
x = self.fc3(x)
x = nn.tanh(x)
x = self.fc4(x)
x = nn.tanh(x)
x = self.fc5(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# initialize model's state dict
critic.init_state_dict("critic")
# [end-cnn-setup-jax]
# [start-cnn-compact-jax]
import jax.numpy as jnp
import flax.linen as nn
from skrl.models.jax import Model, DeterministicMixin
# define the model
class CNN(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = nn.Dense(512)(x)
x = nn.relu(x)
x = nn.Dense(16)(x)
x = nn.tanh(x)
x = jnp.concatenate([x, inputs["taken_actions"]], axis=-1)
x = nn.Dense(64)(x)
x = nn.tanh(x)
x = nn.Dense(32)(x)
x = nn.tanh(x)
x = nn.Dense(1)(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
critic = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False)
# initialize model's state dict
critic.init_state_dict("critic")
# [end-cnn-compact-jax]
# =============================================================================
# [start-rnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, DeterministicMixin
# define the model
class RNN(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size + self.num_actions, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 1))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# critic models are only used during training
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment
hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(torch.cat([rnn_output, inputs["taken_actions"]], dim=1)), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
critic = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-sequential-torch]
# [start-rnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, DeterministicMixin
# define the model
class RNN(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size + self.num_actions, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 1)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# critic models are only used during training
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment
hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.fc3(x), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
critic = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-functional-torch]
# =============================================================================
# [start-gru-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, DeterministicMixin
# define the model
class GRU(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size + self.num_actions, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 1))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# critic models are only used during training
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment
hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(torch.cat([rnn_output, inputs["taken_actions"]], dim=1)), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
critic = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-sequential-torch]
# [start-gru-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, DeterministicMixin
# define the model
class GRU(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size + self.num_actions, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 1)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# critic models are only used during training
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment
hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.fc3(x), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
critic = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-functional-torch]
# =============================================================================
# [start-lstm-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, DeterministicMixin
# define the model
class LSTM(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size + self.num_actions, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, 1))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# critic models are only used during training
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment
hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(torch.cat([rnn_output, inputs["taken_actions"]], dim=1)), {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
critic = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-sequential-torch]
# [start-lstm-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, DeterministicMixin
# define the model
class LSTM(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size + self.num_actions, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 1)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# critic models are only used during training
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
sequence_index = 1 if role == "target_critic" else 0 # target networks act on the next state of the environment
hidden_states = hidden_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,sequence_index,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(torch.cat([rnn_output, inputs["taken_actions"]], dim=1))
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.fc3(x), {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
critic = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
clip_actions=False,
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-functional-torch]
| 36,210 | Python | 40.669735 | 142 | 0.583458 |
Toni-SM/skrl/docs/source/snippets/loaders.py | # [start-omniverse-isaac-gym-envs-parameters-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env(task_name="Cartpole")
# [end-omniverse-isaac-gym-envs-parameters-torch]
# [start-omniverse-isaac-gym-envs-parameters-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env(task_name="Cartpole")
# [end-omniverse-isaac-gym-envs-parameters-jax]
# [start-omniverse-isaac-gym-envs-cli-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env()
# [end-omniverse-isaac-gym-envs-cli-torch]
# [start-omniverse-isaac-gym-envs-cli-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env()
# [end-omniverse-isaac-gym-envs-cli-jax]
# [start-omniverse-isaac-gym-envs-multi-threaded-parameters-torch]
import threading
# import the environment loader
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30)
# ...
# start training in a separate thread
threading.Thread(target=trainer.train).start()
# run the simulation in the main thread
env.run()
# [end-omniverse-isaac-gym-envs-multi-threaded-parameters-torch]
# [start-omniverse-isaac-gym-envs-multi-threaded-parameters-jax]
import threading
# import the environment loader
from skrl.envs.loaders.jax import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30)
# ...
# start training in a separate thread
threading.Thread(target=trainer.train).start()
# run the simulation in the main thread
env.run()
# [end-omniverse-isaac-gym-envs-multi-threaded-parameters-jax]
# [start-omniverse-isaac-gym-envs-multi-threaded-cli-torch]
import threading
# import the environment loader
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env(multi_threaded=True, timeout=30)
# ...
# start training in a separate thread
threading.Thread(target=trainer.train).start()
# run the simulation in the main thread
env.run()
# [end-omniverse-isaac-gym-envs-multi-threaded-cli-torch]
# [start-omniverse-isaac-gym-envs-multi-threaded-cli-jax]
import threading
# import the environment loader
from skrl.envs.loaders.jax import load_omniverse_isaacgym_env
# load environment
env = load_omniverse_isaacgym_env(multi_threaded=True, timeout=30)
# ...
# start training in a separate thread
threading.Thread(target=trainer.train).start()
# run the simulation in the main thread
env.run()
# [end-omniverse-isaac-gym-envs-multi-threaded-cli-jax]
# =============================================================================
# [start-isaac-orbit-envs-parameters-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaac_orbit_env
# load environment
env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0")
# [end-isaac-orbit-envs-parameters-torch]
# [start-isaac-orbit-envs-parameters-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaac_orbit_env
# load environment
env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0")
# [end-isaac-orbit-envs-parameters-jax]
# [start-isaac-orbit-envs-cli-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaac_orbit_env
# load environment
env = load_isaac_orbit_env()
# [end-isaac-orbit-envs-cli-torch]
# [start-isaac-orbit-envs-cli-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaac_orbit_env
# load environment
env = load_isaac_orbit_env()
# [end-isaac-orbit-envs-cli-jax]
# =============================================================================
# [start-isaac-gym-envs-preview-4-api]
import isaacgymenvs
env = isaacgymenvs.make(seed=0,
task="Cartpole",
num_envs=2000,
sim_device="cuda:0",
rl_device="cuda:0",
graphics_device_id=0,
headless=False)
# [end-isaac-gym-envs-preview-4-api]
# [start-isaac-gym-envs-preview-4-parameters-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaacgym_env_preview4
# load environment
env = load_isaacgym_env_preview4(task_name="Cartpole")
# [end-isaac-gym-envs-preview-4-parameters-torch]
# [start-isaac-gym-envs-preview-4-parameters-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaacgym_env_preview4
# load environment
env = load_isaacgym_env_preview4(task_name="Cartpole")
# [end-isaac-gym-envs-preview-4-parameters-jax]
# [start-isaac-gym-envs-preview-4-cli-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaacgym_env_preview4
# load environment
env = load_isaacgym_env_preview4()
# [end-isaac-gym-envs-preview-4-cli-torch]
# [start-isaac-gym-envs-preview-4-cli-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaacgym_env_preview4
# load environment
env = load_isaacgym_env_preview4()
# [end-isaac-gym-envs-preview-4-cli-jax]
# [start-isaac-gym-envs-preview-3-parameters-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaacgym_env_preview3
# load environment
env = load_isaacgym_env_preview3(task_name="Cartpole")
# [end-isaac-gym-envs-preview-3-parameters-torch]
# [start-isaac-gym-envs-preview-3-parameters-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaacgym_env_preview3
# load environment
env = load_isaacgym_env_preview3(task_name="Cartpole")
# [end-isaac-gym-envs-preview-3-parameters-jax]
# [start-isaac-gym-envs-preview-3-cli-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaacgym_env_preview3
# load environment
env = load_isaacgym_env_preview3()
# [end-isaac-gym-envs-preview-3-cli-torch]
# [start-isaac-gym-envs-preview-3-cli-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaacgym_env_preview3
# load environment
env = load_isaacgym_env_preview3()
# [end-isaac-gym-envs-preview-3-cli-jax]
# [start-isaac-gym-envs-preview-2-parameters-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaacgym_env_preview2
# load environment
env = load_isaacgym_env_preview2(task_name="Cartpole")
# [end-isaac-gym-envs-preview-2-parameters-torch]
# [start-isaac-gym-envs-preview-2-parameters-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaacgym_env_preview2
# load environment
env = load_isaacgym_env_preview2(task_name="Cartpole")
# [end-isaac-gym-envs-preview-2-parameters-jax]
# [start-isaac-gym-envs-preview-2-cli-torch]
# import the environment loader
from skrl.envs.loaders.torch import load_isaacgym_env_preview2
# load environment
env = load_isaacgym_env_preview2()
# [end-isaac-gym-envs-preview-2-cli-torch]
# [start-isaac-gym-envs-preview-2-cli-jax]
# import the environment loader
from skrl.envs.loaders.jax import load_isaacgym_env_preview2
# load environment
env = load_isaacgym_env_preview2()
# [end-isaac-gym-envs-preview-2-cli-jax]
| 7,458 | Python | 26.625926 | 88 | 0.739877 |
Toni-SM/skrl/docs/source/snippets/multicategorical_model.py | # [start-definition-torch]
class MultiCategoricalModel(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
# [end-definition-torch]
# [start-definition-jax]
class MultiCategoricalModel(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
# [end-definition-jax]
# =============================================================================
# [start-mlp-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class MLP(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.net = nn.Sequential(nn.Linear(self.num_observations, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def compute(self, inputs, role):
return self.net(inputs["states"]), {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# [end-mlp-sequential-torch]
# [start-mlp-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class MLP(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.fc1 = nn.Linear(self.num_observations, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def compute(self, inputs, role):
x = self.fc1(inputs["states"])
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# [end-mlp-functional-torch]
# [start-mlp-setup-jax]
import flax.linen as nn
from skrl.models.jax import Model, MultiCategoricalMixin
# define the model
class MLP(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
def setup(self):
self.fc1 = nn.Dense(64)
self.fc2 = nn.Dense(32)
self.fc3 = nn.Dense(self.num_actions)
def __call__(self, inputs, role):
x = self.fc1(inputs["states"])
x = nn.relu(x)
x = self.fc2(x)
x = nn.relu(x)
x = self.fc3(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-mlp-setup-jax]
# [start-mlp-compact-jax]
import flax.linen as nn
from skrl.models.jax import Model, MultiCategoricalMixin
# define the model
class MLP(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = nn.Dense(64)(inputs["states"])
x = nn.relu(x)
x = nn.Dense(32)(x)
x = nn.relu(x)
x = nn.Dense(self.num_actions)(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = MLP(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-mlp-compact-jax]
# =============================================================================
# [start-cnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class CNN(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.net = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
nn.Flatten(),
nn.Linear(1024, 512),
nn.ReLU(),
nn.Linear(512, 16),
nn.Tanh(),
nn.Linear(16, 64),
nn.Tanh(),
nn.Linear(64, 32),
nn.Tanh(),
nn.Linear(32, self.num_actions))
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
return self.net(inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)), {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# [end-cnn-sequential-torch]
# [start-cnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class CNN(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.conv1 = nn.Conv2d(3, 32, kernel_size=8, stride=4)
self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 16)
self.fc3 = nn.Linear(16, 64)
self.fc4 = nn.Linear(64, 32)
self.fc5 = nn.Linear(32, self.num_actions)
def compute(self, inputs, role):
# permute (samples, width * height * channels) -> (samples, channels, width, height)
x = inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = self.conv3(x)
x = F.relu(x)
x = torch.flatten(x, start_dim=1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = torch.tanh(x)
x = self.fc3(x)
x = torch.tanh(x)
x = self.fc4(x)
x = torch.tanh(x)
x = self.fc5(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# [end-cnn-functional-torch]
# [start-cnn-setup-jax]
import flax.linen as nn
from skrl.models.jax import Model, MultiCategoricalMixin
# define the model
class CNN(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
def setup(self):
self.conv1 = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")
self.conv2 = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")
self.conv3 = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")
self.fc1 = nn.Dense(512)
self.fc2 = nn.Dense(16)
self.fc3 = nn.Dense(64)
self.fc4 = nn.Dense(32)
self.fc5 = nn.Dense(self.num_actions)
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = self.conv1(x)
x = nn.relu(x)
x = self.conv2(x)
x = nn.relu(x)
x = self.conv3(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = self.fc1(x)
x = nn.relu(x)
x = self.fc2(x)
x = nn.tanh(x)
x = self.fc3(x)
x = nn.tanh(x)
x = self.fc4(x)
x = nn.tanh(x)
x = self.fc5(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-cnn-setup-jax]
# [start-cnn-compact-jax]
import flax.linen as nn
from skrl.models.jax import Model, MultiCategoricalMixin
# define the model
class CNN(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
@nn.compact # marks the given module method allowing inlined submodules
def __call__(self, inputs, role):
x = inputs["states"].reshape((-1, *self.observation_space.shape))
x = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")(x)
x = nn.relu(x)
x = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")(x)
x = nn.relu(x)
x = x.reshape((x.shape[0], -1))
x = nn.Dense(512)(x)
x = nn.relu(x)
x = nn.Dense(16)(x)
x = nn.tanh(x)
x = nn.Dense(64)(x)
x = nn.tanh(x)
x = nn.Dense(32)(x)
x = nn.tanh(x)
x = nn.Dense(self.num_actions)(x)
return x, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = CNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum")
# initialize model's state dict
policy.init_state_dict("policy")
# [end-cnn-compact-jax]
# =============================================================================
# [start-rnn-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class RNN(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-sequential-torch]
# [start-rnn-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class RNN(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.rnn = nn.RNN(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.rnn(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = RNN(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-rnn-functional-torch]
# =============================================================================
# [start-gru-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class GRU(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-sequential-torch]
# [start-gru-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class GRU(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hout
self.sequence_length = sequence_length
self.gru = nn.GRU(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states = inputs["rnn"][0]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
# get the hidden states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states)
hidden_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, hidden_states = self.gru(rnn_input, hidden_states)
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {"rnn": [hidden_states]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = GRU(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-gru-functional-torch]
# =============================================================================
# [start-lstm-sequential-torch]
import torch
import torch.nn as nn
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class LSTM(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.net = nn.Sequential(nn.Linear(self.hidden_size, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, self.num_actions))
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
return self.net(rnn_output), {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-sequential-torch]
# [start-lstm-functional-torch]
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.models.torch import Model, MultiCategoricalMixin
# define the model
class LSTM(MultiCategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, reduction="sum",
num_envs=1, num_layers=1, hidden_size=64, sequence_length=10):
Model.__init__(self, observation_space, action_space, device)
MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
self.num_envs = num_envs
self.num_layers = num_layers
self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0)
self.sequence_length = sequence_length
self.lstm = nn.LSTM(input_size=self.num_observations,
hidden_size=self.hidden_size,
num_layers=self.num_layers,
batch_first=True) # batch_first -> (batch, sequence, features)
self.fc1 = nn.Linear(self.hidden_size, 64)
self.fc2 = nn.Linear(64, 32)
self.logits = nn.Linear(32, self.num_actions)
def get_specification(self):
# batch size (N) is the number of envs during rollout
return {"rnn": {"sequence_length": self.sequence_length,
"sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout)
(self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell)
def compute(self, inputs, role):
states = inputs["states"]
terminated = inputs.get("terminated", None)
hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1]
# training
if self.training:
rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length
hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout)
cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell)
# get the hidden/cell states corresponding to the initial sequence
hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout)
cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell)
# reset the RNN state in the middle of a sequence
if terminated is not None and torch.any(terminated):
rnn_outputs = []
terminated = terminated.view(-1, self.sequence_length)
indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length]
for i in range(len(indexes) - 1):
i0, i1 = indexes[i], indexes[i + 1]
rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states))
hidden_states[:, (terminated[:,i1-1]), :] = 0
cell_states[:, (terminated[:,i1-1]), :] = 0
rnn_outputs.append(rnn_output)
rnn_states = (hidden_states, cell_states)
rnn_output = torch.cat(rnn_outputs, dim=1)
# no need to reset the RNN state in the sequence
else:
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# rollout
else:
rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1
rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states))
# flatten the RNN output
rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout)
x = self.fc1(rnn_output)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return self.logits(x), {"rnn": [rnn_states[0], rnn_states[1]]}
# instantiate the model (assumes there is a wrapped environment: env)
policy = LSTM(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
unnormalized_log_prob=True,
reduction="sum",
num_envs=env.num_envs,
num_layers=1,
hidden_size=64,
sequence_length=10)
# [end-lstm-functional-torch]
| 37,461 | Python | 40.950728 | 146 | 0.575452 |
Toni-SM/skrl/docs/source/snippets/agent.py | # [start-agent-base-class-torch]
from typing import Union, Tuple, Dict, Any, Optional
import gym, gymnasium
import copy
import torch
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.agents.torch import Agent
CUSTOM_DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
class CUSTOM(Agent):
def __init__(self,
models: Dict[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Custom agent
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: None)
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None)
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a torch tensor is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
_cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# =======================================================================
# - get and process models from `self.models`
# - populate `self.checkpoint_modules` dictionary for storing checkpoints
# - parse configurations from `self.cfg`
# - setup optimizers and learning rate scheduler
# - set up preprocessors
# =======================================================================
def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# =================================================================
# - create tensors in memory if required
# - # create temporary variables needed for storage and computation
# =================================================================
def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# ======================================
# - sample random actions if required or
# sample and return agent's actions
# ======================================
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
# ========================================
# - record agent's specific data in memory
# ========================================
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
# call parent's method for checkpointing and TensorBoard writing
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# ===================================================
# - implement algorithm's update step
# - record tracking data using `self.track_data(...)`
# ===================================================
# [end-agent-base-class-torch]
# [start-agent-base-class-jax]
from typing import Union, Tuple, Dict, Any, Optional
import gym, gymnasium
import copy
import jaxlib
import jax.numpy as jnp
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
from skrl.agents.jax import Agent
CUSTOM_DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
class CUSTOM(Agent):
def __init__(self,
models: Dict[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jaxlib.xla_extension.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Custom agent
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: None)
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None)
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a jax array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or jaxlib.xla_extension.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
_cfg = CUSTOM_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# =======================================================================
# - get and process models from `self.models`
# - populate `self.checkpoint_modules` dictionary for storing checkpoints
# - parse configurations from `self.cfg`
# - setup optimizers and learning rate scheduler
# - set up preprocessors
# =======================================================================
def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# =================================================================
# - create tensors in memory if required
# - # create temporary variables needed for storage and computation
# - set up models for just-in-time compilation with XLA
# =================================================================
def act(self, states: jnp.ndarray, timestep: int, timesteps: int) -> jnp.ndarray:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: jnp.ndarray
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: jnp.ndarray
"""
# ======================================
# - sample random actions if required or
# sample and return agent's actions
# ======================================
def record_transition(self,
states: jnp.ndarray,
actions: jnp.ndarray,
rewards: jnp.ndarray,
next_states: jnp.ndarray,
terminated: jnp.ndarray,
truncated: jnp.ndarray,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: jnp.ndarray
:param actions: Actions taken by the agent
:type actions: jnp.ndarray
:param rewards: Instant rewards achieved by the current actions
:type rewards: jnp.ndarray
:param next_states: Next observations/states of the environment
:type next_states: jnp.ndarray
:param terminated: Signals to indicate that episodes have terminated
:type terminated: jnp.ndarray
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: jnp.ndarray
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
# ========================================
# - record agent's specific data in memory
# ========================================
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
# call parent's method for checkpointing and TensorBoard writing
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# ===================================================
# - implement algorithm's update step
# - record tracking data using `self.track_data(...)`
# ===================================================
# [end-agent-base-class-jax]
| 15,562 | Python | 42.472067 | 123 | 0.543182 |
Toni-SM/skrl/docs/source/snippets/tabular_model.py | # [start-definition-torch]
class TabularModel(TabularMixin, Model):
def __init__(self, observation_space, action_space, device=None, num_envs=1):
Model.__init__(self, observation_space, action_space, device)
TabularMixin.__init__(self, num_envs)
# [end-definition-torch]
# =============================================================================
# [start-epsilon-greedy-torch]
import torch
from skrl.models.torch import Model, TabularMixin
# define the model
class EpilonGreedyPolicy(TabularMixin, Model):
def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1):
Model.__init__(self, observation_space, action_space, device)
TabularMixin.__init__(self, num_envs)
self.epsilon = epsilon
self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32)
def compute(self, inputs, role):
states = inputs["states"]
actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), states],
dim=-1, keepdim=True).view(-1,1)
indexes = (torch.rand(states.shape[0], device=self.device) < self.epsilon).nonzero().view(-1)
if indexes.numel():
actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device)
return actions, {}
# instantiate the model (assumes there is a wrapped environment: env)
policy = EpilonGreedyPolicy(observation_space=env.observation_space,
action_space=env.action_space,
device=env.device,
num_envs=env.num_envs,
epsilon=0.15)
# [end-epsilon-greedy-torch]
| 1,744 | Python | 39.581394 | 107 | 0.601491 |
Toni-SM/skrl/docs/source/snippets/multi_agent.py | # [start-multi-agent-base-class-torch]
from typing import Union, Dict, Any, Optional, Sequence, Mapping
import gym, gymnasium
import copy
import torch
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.multi_agents.torch import MultiAgent
CUSTOM_DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
class CUSTOM(MultiAgent):
def __init__(self,
possible_agents: Sequence[str],
models: Dict[str, Model],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Custom multi-agent
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.torch.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.torch.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a torch tensor is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
_cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(possible_agents=possible_agents,
models=models,
memories=memories,
observation_spaces=observation_spaces,
action_spaces=action_spaces,
device=device,
cfg=_cfg)
# =======================================================================
# - get and process models from `self.models`
# - populate `self.checkpoint_modules` dictionary for storing checkpoints
# - parse configurations from `self.cfg`
# - setup optimizers and learning rate scheduler
# - set up preprocessors
# =======================================================================
def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# =================================================================
# - create tensors in memory if required
# - # create temporary variables needed for storage and computation
# =================================================================
def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policies
:param states: Environment's states
:type states: dictionary of torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# ======================================
# - sample random actions if required or
# sample and return agent's actions
# ======================================
def record_transition(self,
states: Mapping[str, torch.Tensor],
actions: Mapping[str, torch.Tensor],
rewards: Mapping[str, torch.Tensor],
next_states: Mapping[str, torch.Tensor],
terminated: Mapping[str, torch.Tensor],
truncated: Mapping[str, torch.Tensor],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of torch.Tensor
:param actions: Actions taken by the agent
:type actions: dictionary of torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: dictionary of torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of torch.Tensor
:param infos: Additional information about the environment
:type infos: dictionary of any supported type
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
# ========================================
# - record agent's specific data in memory
# ========================================
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
# call parent's method for checkpointing and TensorBoard writing
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# ===================================================
# - implement algorithm's update step
# - record tracking data using `self.track_data(...)`
# ===================================================
# [end-multi-agent-base-class-torch]
# [start-multi-agent-base-class-jax]
from typing import Union, Dict, Any, Optional, Sequence, Mapping
import gym, gymnasium
import copy
import jaxlib
import jax.numpy as jnp
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
from skrl.multi_agents.jax import MultiAgent
CUSTOM_DEFAULT_CONFIG = {
# ...
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
class CUSTOM(MultiAgent):
def __init__(self,
possible_agents: Sequence[str],
models: Dict[str, Model],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
device: Optional[Union[str, jaxlib.xla_extension.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Custom multi-agent
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.torch.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.torch.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a jax array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda:0"`` if available or ``"cpu"``
:type device: str or jaxlib.xla_extension.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
_cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(possible_agents=possible_agents,
models=models,
memories=memories,
observation_spaces=observation_spaces,
action_spaces=action_spaces,
device=device,
cfg=_cfg)
# =======================================================================
# - get and process models from `self.models`
# - populate `self.checkpoint_modules` dictionary for storing checkpoints
# - parse configurations from `self.cfg`
# - setup optimizers and learning rate scheduler
# - set up preprocessors
# =======================================================================
def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# =================================================================
# - create tensors in memory if required
# - # create temporary variables needed for storage and computation
# =================================================================
def act(self, states: Mapping[str, jnp.ndarray], timestep: int, timesteps: int) -> jnp.ndarray:
"""Process the environment's states to make a decision (actions) using the main policies
:param states: Environment's states
:type states: dictionary of jnp.ndarray
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: jnp.ndarray
"""
# ======================================
# - sample random actions if required or
# sample and return agent's actions
# ======================================
def record_transition(self,
states: Mapping[str, jnp.ndarray],
actions: Mapping[str, jnp.ndarray],
rewards: Mapping[str, jnp.ndarray],
next_states: Mapping[str, jnp.ndarray],
terminated: Mapping[str, jnp.ndarray],
truncated: Mapping[str, jnp.ndarray],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of jnp.ndarray
:param actions: Actions taken by the agent
:type actions: dictionary of jnp.ndarray
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of jnp.ndarray
:param next_states: Next observations/states of the environment
:type next_states: dictionary of jnp.ndarray
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of jnp.ndarray
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of jnp.ndarray
:param infos: Additional information about the environment
:type infos: dictionary of any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
# ========================================
# - record agent's specific data in memory
# ========================================
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# =====================================
# - call `self.update(...)` if required
# =====================================
# call parent's method for checkpointing and TensorBoard writing
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# ===================================================
# - implement algorithm's update step
# - record tracking data using `self.track_data(...)`
# ===================================================
# [end-multi-agent-base-class-jax]
| 16,574 | Python | 44.661157 | 135 | 0.556715 |
Toni-SM/skrl/docs/source/snippets/wrapping.py | # [pytorch-start-omniverse-isaacgym]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
# load the environment
env = load_omniverse_isaacgym_env(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")'
# [pytorch-end-omniverse-isaacgym]
# [jax-start-omniverse-isaacgym]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_omniverse_isaacgym_env
# load the environment
env = load_omniverse_isaacgym_env(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")'
# [jax-end-omniverse-isaacgym]
# [pytorch-start-omniverse-isaacgym-mt]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_omniverse_isaacgym_env
# load the multi-threaded environment
env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")'
# [pytorch-end-omniverse-isaacgym-mt]
# [jax-start-omniverse-isaacgym-mt]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_omniverse_isaacgym_env
# load the multi-threaded environment
env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")'
# [jax-end-omniverse-isaacgym-mt]
# [pytorch-start-isaac-orbit]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_isaac_orbit_env
# load the environment
env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaac-orbit")'
# [pytorch-end-isaac-orbit]
# [jax-start-isaac-orbit]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_isaac_orbit_env
# load the environment
env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaac-orbit")'
# [jax-end-isaac-orbit]
# [pytorch-start-isaacgym-preview4-make]
import isaacgymenvs
# import the environment wrapper
from skrl.envs.wrappers.torch import wrap_env
# create/load the environment using the easy-to-use API from NVIDIA
env = isaacgymenvs.make(seed=0,
task="Cartpole",
num_envs=512,
sim_device="cuda:0",
rl_device="cuda:0",
graphics_device_id=0,
headless=False)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")'
# [pytorch-end-isaacgym-preview4-make]
# [jax-start-isaacgym-preview4-make]
import isaacgymenvs
# import the environment wrapper
from skrl.envs.wrappers.jax import wrap_env
# create/load the environment using the easy-to-use API from NVIDIA
env = isaacgymenvs.make(seed=0,
task="Cartpole",
num_envs=512,
sim_device="cuda:0",
rl_device="cuda:0",
graphics_device_id=0,
headless=False)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")'
# [jax-end-isaacgym-preview4-make]
# [pytorch-start-isaacgym-preview4]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_isaacgym_env_preview4
# load the environment
env = load_isaacgym_env_preview4(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")'
# [pytorch-end-isaacgym-preview4]
# [jax-start-isaacgym-preview4]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_isaacgym_env_preview4
# load the environment
env = load_isaacgym_env_preview4(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")'
# [jax-end-isaacgym-preview4]
# [pytorch-start-isaacgym-preview3]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_isaacgym_env_preview3
# load the environment
env = load_isaacgym_env_preview3(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview3")'
# [pytorch-end-isaacgym-preview3]
# [jax-start-isaacgym-preview3]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_isaacgym_env_preview3
# load the environment
env = load_isaacgym_env_preview3(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview3")'
# [jax-end-isaacgym-preview3]
# [pytorch-start-isaacgym-preview2]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_isaacgym_env_preview2
# load the environment
env = load_isaacgym_env_preview2(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview2")'
# [pytorch-end-isaacgym-preview2]
# [jax-start-isaacgym-preview2]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_isaacgym_env_preview2
# load the environment
env = load_isaacgym_env_preview2(task_name="Cartpole")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview2")'
# [jax-end-isaacgym-preview2]
# [pytorch-start-gym]
# import the environment wrapper and gym
from skrl.envs.wrappers.torch import wrap_env
import gym
# load the environment
env = gym.make('Pendulum-v1')
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")'
# [pytorch-end-gym]
# [jax-start-gym]
# import the environment wrapper and gym
from skrl.envs.wrappers.jax import wrap_env
import gym
# load the environment
env = gym.make('Pendulum-v1')
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")'
# [jax-end-gym]
# [pytorch-start-gym-vectorized]
# import the environment wrapper and gym
from skrl.envs.wrappers.torch import wrap_env
import gym
# load a vectorized environment
env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")'
# [pytorch-end-gym-vectorized]
# [jax-start-gym-vectorized]
# import the environment wrapper and gym
from skrl.envs.wrappers.jax import wrap_env
import gym
# load a vectorized environment
env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")'
# [jax-end-gym-vectorized]
# [pytorch-start-gymnasium]
# import the environment wrapper and gymnasium
from skrl.envs.wrappers.torch import wrap_env
import gymnasium as gym
# load the environment
env = gym.make('Pendulum-v1')
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")'
# [pytorch-end-gymnasium]
# [jax-start-gymnasium]
# import the environment wrapper and gymnasium
from skrl.envs.wrappers.jax import wrap_env
import gymnasium as gym
# load the environment
env = gym.make('Pendulum-v1')
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")'
# [jax-end-gymnasium]
# [pytorch-start-gymnasium-vectorized]
# import the environment wrapper and gymnasium
from skrl.envs.wrappers.torch import wrap_env
import gymnasium as gym
# load a vectorized environment
env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")'
# [pytorch-end-gymnasium-vectorized]
# [jax-start-gymnasium-vectorized]
# import the environment wrapper and gymnasium
from skrl.envs.wrappers.jax import wrap_env
import gymnasium as gym
# load a vectorized environment
env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False)
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")'
# [jax-end-gymnasium-vectorized]
# [pytorch-start-shimmy]
# import the environment wrapper and gymnasium
from skrl.envs.wrappers.torch import wrap_env
import gymnasium as gym
# load the environment (API conversion)
env = gym.make("ALE/Pong-v5")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")'
# [pytorch-end-shimmy]
# [jax-start-shimmy]
# import the environment wrapper and gymnasium
from skrl.envs.wrappers.jax import wrap_env
import gymnasium as gym
# load the environment (API conversion)
env = gym.make("ALE/Pong-v5")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")'
# [jax-end-shimmy]
# [pytorch-start-deepmind]
# import the environment wrapper and the deepmind suite
from skrl.envs.wrappers.torch import wrap_env
from dm_control import suite
# load the environment
env = suite.load(domain_name="cartpole", task_name="swingup")
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="dm")'
# [pytorch-end-deepmind]
# [pytorch-start-robosuite]
# import the environment wrapper
from skrl.envs.wrappers.torch import wrap_env
# import the robosuite wrapper
import robosuite
from robosuite.controllers import load_controller_config
# load the environment
controller_config = load_controller_config(default_controller="OSC_POSE")
env = robosuite.make("TwoArmLift",
robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot
gripper_types="default", # use default grippers per robot arm
controller_configs=controller_config, # each arm is controlled using OSC
env_configuration="single-arm-opposed", # (two-arm envs only) arms face each other
has_renderer=True, # on-screen rendering
render_camera="frontview", # visualize the "frontview" camera
has_offscreen_renderer=False, # no off-screen rendering
control_freq=20, # 20 hz control for applied actions
horizon=200, # each episode terminates after 200 steps
use_object_obs=True, # provide object observations to agent
use_camera_obs=False, # don't provide image observations to agent
reward_shaping=True) # use a dense reward signal for learning
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="robosuite")'
# [pytorch-end-robosuite]
# [start-bidexhands-torch]
# import the environment wrapper and loader
from skrl.envs.wrappers.torch import wrap_env
from skrl.envs.loaders.torch import load_bidexhands_env
# load the environment
env = load_bidexhands_env(task_name="ShadowHandOver")
# wrap the environment
env = wrap_env(env, wrapper="bidexhands")
# [end-bidexhands-torch]
# [start-bidexhands-jax]
# import the environment wrapper and loader
from skrl.envs.wrappers.jax import wrap_env
from skrl.envs.loaders.jax import load_bidexhands_env
# load the environment
env = load_bidexhands_env(task_name="ShadowHandOver")
# wrap the environment
env = wrap_env(env, wrapper="bidexhands")
# [end-bidexhands-jax]
# [start-pettingzoo-torch]
# import the environment wrapper
from skrl.envs.wrappers.torch import wrap_env
# import a PettingZoo environment
from pettingzoo.sisl import multiwalker_v9
# load the environment
env = multiwalker_v9.parallel_env()
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="pettingzoo")'
# [end-pettingzoo-torch]
# [start-pettingzoo-jax]
# import the environment wrapper
from skrl.envs.wrappers.jax import wrap_env
# import a PettingZoo environment
from pettingzoo.sisl import multiwalker_v9
# load the environment
env = multiwalker_v9.parallel_env()
# wrap the environment
env = wrap_env(env) # or 'env = wrap_env(env, wrapper="pettingzoo")'
# [end-pettingzoo-jax]
| 12,845 | Python | 29.440758 | 104 | 0.712028 |
Toni-SM/skrl/docs/source/snippets/trainer.py | # [pytorch-start-base]
from typing import Union, List, Optional
import copy
from skrl.envs.wrappers.torch import Wrapper
from skrl.agents.torch import Agent
from skrl.trainers.torch import Trainer
CUSTOM_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
class CustomTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent], List[List[Agent]]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""
:param env: Environment to train on
:type env: skrl.envs.wrappers.torch.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: [])
:type agents_scope: tuple or list of integers
:param cfg: Configuration dictionary
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# ================================
# - init agents
# ================================
def train(self) -> None:
"""Train the agents
"""
# ================================
# - run training loop
# + call agents.pre_interaction(...)
# + compute actions using agents.act(...)
# + step environment using env.step(...)
# + render scene using env.render(...)
# + record environment transition in memory using agents.record_transition(...)
# + call agents.post_interaction(...)
# + reset environment using env.reset(...)
# ================================
def eval(self) -> None:
"""Evaluate the agents
"""
# ================================
# - run evaluation loop
# + compute actions using agents.act(...)
# + step environment using env.step(...)
# + render scene using env.render(...)
# + call agents.post_interaction(...) parent method to write data to TensorBoard
# + reset environment using env.reset(...)
# ================================
# [pytorch-end-base]
# [jax-start-base]
from typing import Union, List, Optional
import copy
from skrl.envs.wrappers.jax import Wrapper
from skrl.agents.jax import Agent
from skrl.trainers.jax import Trainer
CUSTOM_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
class CustomTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent], List[List[Agent]]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""
:param env: Environment to train on
:type env: skrl.envs.wrappers.jax.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: [])
:type agents_scope: tuple or list of integers
:param cfg: Configuration dictionary
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# ================================
# - init agents
# ================================
def train(self) -> None:
"""Train the agents
"""
# ================================
# - run training loop
# + call agents.pre_interaction(...)
# + compute actions using agents.act(...)
# + step environment using env.step(...)
# + render scene using env.render(...)
# + record environment transition in memory using agents.record_transition(...)
# + call agents.post_interaction(...)
# + reset environment using env.reset(...)
# ================================
def eval(self) -> None:
"""Evaluate the agents
"""
# ================================
# - run evaluation loop
# + compute actions using agents.act(...)
# + step environment using env.step(...)
# + render scene using env.render(...)
# + call agents.post_interaction(...) parent method to write data to TensorBoard
# + reset environment using env.reset(...)
# ================================
# [jax-end-base]
# =============================================================================
# [pytorch-start-sequential]
from skrl.trainers.torch import SequentialTrainer
# assuming there is an environment called 'env'
# and an agent or a list of agents called 'agents'
# create a sequential trainer
cfg = {"timesteps": 50000, "headless": False}
trainer = SequentialTrainer(env=env, agents=agents, cfg=cfg)
# train the agent(s)
trainer.train()
# evaluate the agent(s)
trainer.eval()
# [pytorch-end-sequential]
# [jax-start-sequential]
from skrl.trainers.jax import SequentialTrainer
# assuming there is an environment called 'env'
# and an agent or a list of agents called 'agents'
# create a sequential trainer
cfg = {"timesteps": 50000, "headless": False}
trainer = SequentialTrainer(env=env, agents=agents, cfg=cfg)
# train the agent(s)
trainer.train()
# evaluate the agent(s)
trainer.eval()
# [jax-end-sequential]
# =============================================================================
# [pytorch-start-parallel]
from skrl.trainers.torch import ParallelTrainer
# assuming there is an environment called 'env'
# and an agent or a list of agents called 'agents'
# create a sequential trainer
cfg = {"timesteps": 50000, "headless": False}
trainer = ParallelTrainer(env=env, agents=agents, cfg=cfg)
# train the agent(s)
trainer.train()
# evaluate the agent(s)
trainer.eval()
# [pytorch-end-parallel]
# =============================================================================
# [pytorch-start-step]
from skrl.trainers.torch import StepTrainer
# assuming there is an environment called 'env'
# and an agent or a list of agents called 'agents'
# create a sequential trainer
cfg = {"timesteps": 50000, "headless": False}
trainer = StepTrainer(env=env, agents=agents, cfg=cfg)
# train the agent(s)
for timestep in range(cfg["timesteps"]):
trainer.train(timestep=timestep)
# evaluate the agent(s)
for timestep in range(cfg["timesteps"]):
trainer.eval(timestep=timestep)
# [pytorch-end-step]
# [jax-start-step]
from skrl.trainers.jax import StepTrainer
# assuming there is an environment called 'env'
# and an agent or a list of agents called 'agents'
# create a sequential trainer
cfg = {"timesteps": 50000, "headless": False}
trainer = StepTrainer(env=env, agents=agents, cfg=cfg)
# train the agent(s)
for timestep in range(cfg["timesteps"]):
trainer.train(timestep=timestep)
# evaluate the agent(s)
for timestep in range(cfg["timesteps"]):
trainer.eval(timestep=timestep)
# [jax-end-step]
# =============================================================================
# [pytorch-start-manual-training]
# [pytorch-end-manual-training]
# [pytorch-start-manual-evaluation]
# assuming there is an environment named 'env'
# and an agent named 'agents' (or a state-preprocessor and a policy)
states, infos = env.reset()
for i in range(1000):
# state-preprocessor + policy
with torch.no_grad():
states = state_preprocessor(states)
actions = policy.act({"states": states})[0]
# step the environment
next_states, rewards, terminated, truncated, infos = env.step(actions)
# render the environment
env.render()
# check for termination/truncation
if terminated.any() or truncated.any():
states, infos = env.reset()
else:
states = next_states
# [pytorch-end-manual-evaluation]
# [jax-start-manual-training]
# [jax-end-manual-training]
# [jax-start-manual-evaluation]
# [jax-end-manual-evaluation]
| 8,996 | Python | 31.132143 | 105 | 0.587372 |
Toni-SM/skrl/docs/source/api/resources.rst | Resources
=========
.. toctree::
:hidden:
Noises <resources/noises>
Preprocessors <resources/preprocessors>
Learning rate schedulers <resources/schedulers>
Optimizers <resources/optimizers>
Resources groups a variety of components that may be used to improve the agents' performance.
.. raw:: html
<br><hr>
Available resources are :doc:`noises <resources/noises>`, input :doc:`preprocessors <resources/preprocessors>`, learning rate :doc:`schedulers <resources/schedulers>` and :doc:`optimizers <resources/optimizers>` (this last one only for JAX).
.. list-table::
:header-rows: 1
* - Noises
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Gaussian <resources/noises/gaussian>` noise
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Ornstein-Uhlenbeck <resources/noises/ornstein_uhlenbeck>` noise |_2|
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
.. list-table::
:header-rows: 1
* - Preprocessors
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Running standard scaler <resources/preprocessors/running_standard_scaler>` |_4|
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
.. list-table::
:header-rows: 1
* - Learning rate schedulers
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`KL Adaptive <resources/schedulers/kl_adaptive>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
.. list-table::
:header-rows: 1
* - Optimizers
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Adam <resources/optimizers/adam>`\ |_5| |_5| |_5| |_5| |_5| |_5| |_3|
- .. centered:: :math:`\scriptscriptstyle \texttt{PyTorch}`
- .. centered:: :math:`\blacksquare`
| 1,974 | reStructuredText | 30.854838 | 241 | 0.591692 |
Toni-SM/skrl/docs/source/api/multi_agents.rst | Multi-agents
============
.. toctree::
:hidden:
IPPO <multi_agents/ippo>
MAPPO <multi_agents/mappo>
Multi-agents are autonomous entities that interact with the environment to learn and improve their behavior. Multi-agents' goal is to learn optimal policies, which are correspondence between states and actions that maximize the cumulative reward received from the environment over time.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Multi-agents
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Independent Proximal Policy Optimization <multi_agents/ippo>` (**IPPO**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Multi-Agent Proximal Policy Optimization <multi_agents/mappo>` (**MAPPO**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
Base class
----------
.. note::
This is the base class for all multi-agents and provides only basic functionality that is not tied to any implementation of the optimization algorithms.
**It is not intended to be used directly**.
.. raw:: html
<br>
Basic inheritance usage
^^^^^^^^^^^^^^^^^^^^^^^
.. tabs::
.. tab:: Inheritance
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../snippets/multi_agent.py
:language: python
:start-after: [start-multi-agent-base-class-torch]
:end-before: [end-multi-agent-base-class-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../snippets/multi_agent.py
:language: python
:start-after: [start-multi-agent-base-class-jax]
:end-before: [end-multi-agent-base-class-jax]
.. raw:: html
<br>
API (PyTorch)
^^^^^^^^^^^^^
.. autoclass:: skrl.multi_agents.torch.base.MultiAgent
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update, _empty_preprocessor, _get_internal_value, _as_dict
:members:
.. automethod:: __init__
.. automethod:: __str__
.. raw:: html
<br>
API (JAX)
^^^^^^^^^
.. autoclass:: skrl.multi_agents.jax.base.MultiAgent
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update, _empty_preprocessor, _get_internal_value, _as_dict
:members:
.. automethod:: __init__
.. automethod:: __str__
| 2,510 | reStructuredText | 24.886598 | 286 | 0.586853 |
Toni-SM/skrl/docs/source/api/models.rst | Models
======
.. toctree::
:hidden:
Tabular <models/tabular>
Categorical <models/categorical>
Multi-Categorical <models/multicategorical>
Gaussian <models/gaussian>
Multivariate Gaussian <models/multivariate_gaussian>
Deterministic <models/deterministic>
Shared model <models/shared_model>
Models (or agent models) refer to a representation of the agent's policy, value function, etc. that the agent uses to make decisions. Agents can have one or more models, and their parameters are adjusted by the optimization algorithms.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Models
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Tabular model <models/tabular>` (discrete domain)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - :doc:`Categorical model <models/categorical>` (discrete domain)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Multi-Categorical model <models/multicategorical>` (discrete domain)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Gaussian model <models/gaussian>` (continuous domain)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Multivariate Gaussian model <models/multivariate_gaussian>` (continuous domain)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - :doc:`Deterministic model <models/deterministic>` (continuous domain)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Shared model <models/shared_model>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
Base class
----------
.. note::
This is the base class for all models in this module and provides only basic functionality that is not tied to any specific implementation.
**It is not intended to be used directly**.
.. raw:: html
<br>
Mixin and inheritance
^^^^^^^^^^^^^^^^^^^^^
.. tabs::
.. tab:: Mixin
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../snippets/model_mixin.py
:language: python
:start-after: [start-mixin-torch]
:end-before: [end-mixin-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../snippets/model_mixin.py
:language: python
:start-after: [start-mixin-jax]
:end-before: [end-mixin-jax]
.. tab:: Model inheritance
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../snippets/model_mixin.py
:language: python
:start-after: [start-model-torch]
:end-before: [end-model-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../snippets/model_mixin.py
:language: python
:start-after: [start-model-jax]
:end-before: [end-model-jax]
.. raw:: html
<br>
.. _models_base_class:
API (PyTorch)
^^^^^^^^^^^^^
.. autoclass:: skrl.models.torch.base.Model
:undoc-members:
:show-inheritance:
:private-members: _get_space_size
:members:
.. automethod:: __init__
.. py:property:: device
Device to be used for the computations
.. py:property:: observation_space
Observation/state space. It is a replica of the class constructor parameter of the same name
.. py:property:: action_space
Action space. It is a replica of the class constructor parameter of the same name
.. py:property:: num_observations
Number of elements in the observation/state space
.. py:property:: num_actions
Number of elements in the action space
.. raw:: html
<br>
API (JAX)
^^^^^^^^^
.. autoclass:: skrl.models.jax.base.Model
:undoc-members:
:show-inheritance:
:private-members: _get_space_size
:members:
.. automethod:: __init__
.. py:property:: device
Device to be used for the computations
.. py:property:: observation_space
Observation/state space. It is a replica of the class constructor parameter of the same name
.. py:property:: action_space
Action space. It is a replica of the class constructor parameter of the same name
.. py:property:: num_observations
Number of elements in the observation/state space
.. py:property:: num_actions
Number of elements in the action space
| 4,733 | reStructuredText | 26.364162 | 235 | 0.587788 |
Toni-SM/skrl/docs/source/api/memories.rst | Memories
========
.. toctree::
:hidden:
Random <memories/random>
Memories are storage components that allow agents to collect and use/reuse current or past experiences of their interaction with the environment or other types of information.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Memories
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Random memory <memories/random>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
Base class
----------
.. note::
This is the base class for all the other classes in this module.
It provides the basic functionality for the other classes.
**It is not intended to be used directly**.
.. raw:: html
<br>
Basic inheritance usage
^^^^^^^^^^^^^^^^^^^^^^^
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../snippets/memories.py
:language: python
:start-after: [start-base-class-torch]
:end-before: [end-base-class-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../snippets/memories.py
:language: python
:start-after: [start-base-class-jax]
:end-before: [end-base-class-jax]
.. raw:: html
<br>
API (PyTorch)
^^^^^^^^^^^^^
.. autoclass:: skrl.memories.torch.base.Memory
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. automethod:: __len__
.. raw:: html
<br>
API (JAX)
^^^^^^^^^
.. autoclass:: skrl.memories.jax.base.Memory
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. automethod:: __len__
| 1,709 | reStructuredText | 18.883721 | 175 | 0.564658 |
Toni-SM/skrl/docs/source/api/trainers.rst | Trainers
========
.. toctree::
:hidden:
Sequential <trainers/sequential>
Parallel <trainers/parallel>
Step <trainers/step>
Manual training <trainers/manual>
Trainers are responsible for orchestrating and managing the training/evaluation of agents and their interactions with the environment.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Trainers
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Sequential trainer <trainers/sequential>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Parallel trainer <trainers/parallel>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - :doc:`Step trainer <trainers/step>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Manual training <trainers/manual>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
Base class
----------
.. note::
This is the base class for all the other classes in this module.
It provides the basic functionality for the other classes.
**It is not intended to be used directly**.
.. raw:: html
<br>
Basic inheritance usage
^^^^^^^^^^^^^^^^^^^^^^^
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../snippets/trainer.py
:language: python
:start-after: [pytorch-start-base]
:end-before: [pytorch-end-base]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../snippets/trainer.py
:language: python
:start-after: [jax-start-base]
:end-before: [jax-end-base]
.. raw:: html
<br>
API (PyTorch)
^^^^^^^^^^^^^
.. autoclass:: skrl.trainers.torch.base.Trainer
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _setup_agents
:members:
.. automethod:: __init__
.. automethod:: __str__
.. raw:: html
<br>
API (JAX)
^^^^^^^^^
.. autoclass:: skrl.trainers.jax.base.Trainer
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _setup_agents
:members:
.. automethod:: __init__
.. automethod:: __str__
| 2,279 | reStructuredText | 21.352941 | 134 | 0.573936 |
Toni-SM/skrl/docs/source/api/envs.rst | Environments
============
.. toctree::
:hidden:
Wrapping (single-agent) <envs/wrapping>
Wrapping (multi-agents) <envs/multi_agents_wrapping>
Isaac Gym environments <envs/isaac_gym>
Isaac Orbit environments <envs/isaac_orbit>
Omniverse Isaac Gym environments <envs/omniverse_isaac_gym>
The environment plays a fundamental and crucial role in defining the RL setup. It is the place where the agent interacts, and it is responsible for providing the agent with information about its current state, as well as the rewards/penalties associated with each action.
.. raw:: html
<br><hr>
Grouped in this section you will find how to load environments from NVIDIA Isaac Gym, Isaac Orbit and Omniverse Isaac Gym with a simple function.
In addition, you will be able to :doc:`wrap single-agent <envs/wrapping>` and :doc:`multi-agent <envs/multi_agents_wrapping>` RL environment interfaces.
.. list-table::
:header-rows: 1
* - Loaders
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Isaac Gym environments <envs/isaac_gym>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Isaac Orbit environments <envs/isaac_orbit>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Omniverse Isaac Gym environments <envs/omniverse_isaac_gym>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
.. list-table::
:header-rows: 1
* - Wrappers
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Bi-DexHands
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - DeepMind
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - Gym
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Gymnasium
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Isaac Gym (previews)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Isaac Orbit
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Omniverse Isaac Gym |_5| |_5| |_5| |_5| |_2|
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - PettingZoo
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - robosuite
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - Shimmy
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
| 2,701 | reStructuredText | 35.026666 | 271 | 0.599408 |
Toni-SM/skrl/docs/source/api/utils.rst | Utils and configurations
========================
.. toctree::
:hidden:
ML frameworks configuration <config/frameworks>
Random seed <utils/seed>
Memory and Tensorboard file post-processing <utils/postprocessing>
Model instantiators <utils/model_instantiators>
Hugging Face integration <utils/huggingface>
Isaac Gym utils <utils/isaacgym_utils>
Omniverse Isaac Gym utils <utils/omniverse_isaacgym_utils>
A set of utilities and configurations for managing an RL setup is provided as part of the library.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Configurations
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`ML frameworks <config/frameworks>` configuration |_5| |_5| |_5| |_5| |_5| |_2|
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
.. list-table::
:header-rows: 1
* - Utils
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Random seed <utils/seed>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Memory and Tensorboard :doc:`file post-processing <utils/postprocessing>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Model instantiators <utils/model_instantiators>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Hugging Face integration <utils/huggingface>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Isaac Gym utils <utils/isaacgym_utils>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Omniverse Isaac Gym utils <utils/omniverse_isaacgym_utils>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
| 1,902 | reStructuredText | 33.599999 | 98 | 0.599895 |
Toni-SM/skrl/docs/source/api/agents.rst | Agents
======
.. toctree::
:hidden:
A2C <agents/a2c>
AMP <agents/amp>
CEM <agents/cem>
DDPG <agents/ddpg>
DDQN <agents/ddqn>
DQN <agents/dqn>
PPO <agents/ppo>
Q-learning <agents/q_learning>
RPO <agents/rpo>
SAC <agents/sac>
SARSA <agents/sarsa>
TD3 <agents/td3>
TRPO <agents/trpo>
Agents are autonomous entities that interact with the environment to learn and improve their behavior. Agents' goal is to learn an optimal policy, which is a correspondence between states and actions that maximizes the cumulative reward received from the environment over time.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Agents
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Advantage Actor Critic <agents/a2c>` (**A2C**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Adversarial Motion Priors <agents/amp>` (**AMP**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - :doc:`Cross-Entropy Method <agents/cem>` (**CEM**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Deep Deterministic Policy Gradient <agents/ddpg>` (**DDPG**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Double Deep Q-Network <agents/ddqn>` (**DDQN**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Deep Q-Network <agents/dqn>` (**DQN**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Proximal Policy Optimization <agents/ppo>` (**PPO**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Q-learning <agents/q_learning>` (**Q-learning**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - :doc:`Robust Policy Optimization <agents/rpo>` (**RPO**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Soft Actor-Critic <agents/sac>` (**SAC**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`State Action Reward State Action <agents/sarsa>` (**SARSA**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - :doc:`Twin-Delayed DDPG <agents/td3>` (**TD3**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Trust Region Policy Optimization <agents/trpo>` (**TRPO**)
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
Base class
----------
.. note::
This is the base class for all agents in this module and provides only basic functionality that is not tied to any implementation of the optimization algorithms.
**It is not intended to be used directly**.
.. raw:: html
<br>
Basic inheritance usage
^^^^^^^^^^^^^^^^^^^^^^^
.. tabs::
.. tab:: Inheritance
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../snippets/agent.py
:language: python
:start-after: [start-agent-base-class-torch]
:end-before: [end-agent-base-class-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../snippets/agent.py
:language: python
:start-after: [start-agent-base-class-jax]
:end-before: [end-agent-base-class-jax]
.. raw:: html
<br>
API (PyTorch)
^^^^^^^^^^^^^
.. autoclass:: skrl.agents.torch.base.Agent
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update, _empty_preprocessor, _get_internal_value
:members:
.. automethod:: __init__
.. automethod:: __str__
.. raw:: html
<br>
API (JAX)
^^^^^^^^^
.. autoclass:: skrl.agents.jax.base.Agent
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update, _empty_preprocessor, _get_internal_value
:members:
.. automethod:: __init__
.. automethod:: __str__
| 4,230 | reStructuredText | 29.007092 | 277 | 0.564775 |
Toni-SM/skrl/docs/source/api/envs/omniverse_isaac_gym.rst | Omniverse Isaac Gym environments
================================
.. image:: ../../_static/imgs/example_omniverse_isaacgym.png
:width: 100%
:align: center
:alt: Omniverse Isaac Gym environments
.. raw:: html
<br><br><hr>
Environments
------------
The repository https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs provides the example reinforcement learning environments for Omniverse Isaac Gym.
These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see OmniIsaacGymEnvs's `configuration-and-command-line-arguments <https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs#configuration-and-command-line-arguments>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`).
Additionally, multi-threaded environments can be loaded. These are designed to isolate the RL policy in a new thread, separate from the main simulation and rendering thread. Read more about it in the OmniIsaacGymEnvs framework documentation: `Multi-Threaded Environment Wrapper <https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/blob/220d34c6b68d3f7518c4aa008ae009d13cc60c03/docs/framework.md#multi-threaded-environment-wrapper>`_.
.. note::
The command line arguments has priority over the function parameters.
.. note::
Only the configuration related to the environment will be used. The configuration related to RL algorithms are discarded since they do not belong to this library.
.. note::
Omniverse Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py headless=True`.
.. raw:: html
<br>
Usage
^^^^^
.. raw:: html
<br>
Common environments
"""""""""""""""""""
In this approach, the RL algorithm maintains the main execution loop.
.. tabs::
.. group-tab:: Function parameters
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-omniverse-isaac-gym-envs-parameters-torch]
:end-before: [end-omniverse-isaac-gym-envs-parameters-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-omniverse-isaac-gym-envs-parameters-jax]
:end-before: [end-omniverse-isaac-gym-envs-parameters-jax]
.. group-tab:: Command line arguments (priority)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-omniverse-isaac-gym-envs-cli-torch]
:end-before: [end-omniverse-isaac-gym-envs-cli-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-omniverse-isaac-gym-envs-cli-jax]
:end-before: [end-omniverse-isaac-gym-envs-cli-jax]
Run the main script passing the configuration as command line arguments. For example:
.. code-block::
python main.py task=Cartpole
.. raw:: html
<br>
Multi-threaded environments
"""""""""""""""""""""""""""
In this approach, the RL algorithm is executed on a secondary thread while the simulation and rendering is executed on the main thread.
.. tabs::
.. group-tab:: Function parameters
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 1, 4, 7, 12, 15
:start-after: [start-omniverse-isaac-gym-envs-multi-threaded-parameters-torch]
:end-before: [end-omniverse-isaac-gym-envs-multi-threaded-parameters-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 1, 4, 7, 12, 15
:start-after: [start-omniverse-isaac-gym-envs-multi-threaded-parameters-jax]
:end-before: [end-omniverse-isaac-gym-envs-multi-threaded-parameters-jax]
.. group-tab:: Command line arguments (priority)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 1, 4, 7, 12, 15
:start-after: [start-omniverse-isaac-gym-envs-multi-threaded-cli-torch]
:end-before: [end-omniverse-isaac-gym-envs-multi-threaded-cli-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 1, 4, 7, 12, 15
:start-after: [start-omniverse-isaac-gym-envs-multi-threaded-cli-jax]
:end-before: [end-omniverse-isaac-gym-envs-multi-threaded-cli-jax]
Run the main script passing the configuration as command line arguments. For example:
.. code-block::
python main.py task=Cartpole
.. raw:: html
<br>
API
^^^
.. autofunction:: skrl.envs.loaders.torch.load_omniverse_isaacgym_env
| 6,034 | reStructuredText | 36.02454 | 488 | 0.606397 |
Toni-SM/skrl/docs/source/api/envs/isaac_orbit.rst | Isaac Orbit environments
========================
.. image:: ../../_static/imgs/example_isaac_orbit.png
:width: 100%
:align: center
:alt: Isaac Orbit environments
.. raw:: html
<br><br><hr>
Environments
------------
The repository https://github.com/NVIDIA-Omniverse/Orbit provides the example reinforcement learning environments for Isaac orbit.
These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see Isaac Orbit's `Running an RL environment <https://isaac-orbit.github.io/orbit/source/tutorials_envs/00_gym_env.html>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`).
.. note::
The command line arguments has priority over the function parameters.
.. note::
Isaac Orbit environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`orbit -p script.py --headless`.
.. raw:: html
<br>
Usage
^^^^^
.. tabs::
.. tab:: Function parameters
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-orbit-envs-parameters-torch]
:end-before: [end-isaac-orbit-envs-parameters-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-orbit-envs-parameters-jax]
:end-before: [end-isaac-orbit-envs-parameters-jax]
.. tab:: Command line arguments (priority)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-orbit-envs-cli-torch]
:end-before: [end-isaac-orbit-envs-cli-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-orbit-envs-cli-jax]
:end-before: [end-isaac-orbit-envs-cli-jax]
Run the main script passing the configuration as command line arguments. For example:
.. code-block::
orbit -p main.py --task Isaac-Cartpole-v0
.. raw:: html
<br>
API
^^^
.. autofunction:: skrl.envs.loaders.torch.load_isaac_orbit_env
| 3,041 | reStructuredText | 32.428571 | 448 | 0.591911 |
Toni-SM/skrl/docs/source/api/envs/multi_agents_wrapping.rst | :tocdepth: 3
Wrapping (multi-agents)
=======================
.. raw:: html
<br><hr>
This library works with a common API to interact with the following RL multi-agent environments:
* Farama `PettingZoo <https://pettingzoo.farama.org>`_ (parallel API)
* `Bi-DexHands <https://github.com/PKU-MARL/DexterousHands>`_
To operate with them and to support interoperability between these non-compatible interfaces, a **wrapping mechanism is provided** as shown in the diagram below
.. raw:: html
<br>
.. image:: ../../_static/imgs/multi_agent_wrapping-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Environment wrapping
.. image:: ../../_static/imgs/multi_agent_wrapping-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Environment wrapping
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: PettingZoo
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [start-pettingzoo-torch]
:end-before: [end-pettingzoo-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [start-pettingzoo-jax]
:end-before: [end-pettingzoo-jax]
.. tab:: Bi-DexHands
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [start-bidexhands-torch]
:end-before: [end-bidexhands-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [start-bidexhands-jax]
:end-before: [end-bidexhands-jax]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autofunction:: skrl.envs.wrappers.torch.wrap_env
.. raw:: html
<br>
API (JAX)
---------
.. autofunction:: skrl.envs.wrappers.jax.wrap_env
.. raw:: html
<br>
Internal API (PyTorch)
----------------------
.. autoclass:: skrl.envs.wrappers.torch.MultiAgentEnvWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. py:property:: device
The device used by the environment
If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda:0"`` or ``"cpu"`` depending on the device availability
.. py:property:: possible_agents
A list of all possible_agents the environment could generate
.. autoclass:: skrl.envs.wrappers.torch.BiDexHandsWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.PettingZooWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. raw:: html
<br>
Internal API (JAX)
------------------
.. autoclass:: skrl.envs.wrappers.jax.MultiAgentEnvWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. py:property:: device
The device used by the environment
If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda:0"`` or ``"cpu"`` depending on the device availability
.. py:property:: possible_agents
A list of all possible_agents the environment could generate
.. autoclass:: skrl.envs.wrappers.jax.BiDexHandsWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.jax.PettingZooWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
| 3,917 | reStructuredText | 21.912281 | 171 | 0.581057 |
Toni-SM/skrl/docs/source/api/envs/isaac_gym.rst | Isaac Gym environments
======================
.. image:: ../../_static/imgs/example_isaacgym.png
:width: 100%
:align: center
:alt: Omniverse Isaac Gym environments
.. raw:: html
<br><br><hr>
Environments (preview 4)
------------------------
The repository https://github.com/NVIDIA-Omniverse/IsaacGymEnvs provides the example reinforcement learning environments for Isaac Gym (preview 4).
With the release of Isaac Gym (preview 4), NVIDIA developers provide an easy-to-use API for creating/loading preset vectorized environments (see IsaacGymEnvs's `creating-an-environment <https://github.com/NVIDIA-Omniverse/IsaacGymEnvs#creating-an-environment>`_).
.. tabs::
.. tab:: Easy-to-use API from NVIDIA
.. literalinclude:: ../../snippets/loaders.py
:language: python
:start-after: [start-isaac-gym-envs-preview-4-api]
:end-before: [end-isaac-gym-envs-preview-4-api]
Nevertheless, in order to maintain the loading style of previous versions, **skrl** provides its own implementation for loading such environments. The environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see IsaacGymEnvs's `configuration-and-command-line-arguments <https://github.com/NVIDIA-Omniverse/IsaacGymEnvs#configuration-and-command-line-arguments>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`).
.. note::
Only the configuration related to the environment will be used. The configuration related to RL algorithms are discarded since they do not belong to this library.
.. note::
Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py headless=True`.
.. raw:: html
<br>
Usage
^^^^^
.. tabs::
.. group-tab:: Function parameters
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-4-parameters-torch]
:end-before: [end-isaac-gym-envs-preview-4-parameters-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-4-parameters-jax]
:end-before: [end-isaac-gym-envs-preview-4-parameters-jax]
.. group-tab:: Command line arguments (priority)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-4-cli-torch]
:end-before: [end-isaac-gym-envs-preview-4-cli-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-4-cli-jax]
:end-before: [end-isaac-gym-envs-preview-4-cli-jax]
Run the main script passing the configuration as command line arguments. For example:
.. code-block::
python main.py task=Cartpole
.. raw:: html
<br>
API
^^^
.. autofunction:: skrl.envs.loaders.torch.load_isaacgym_env_preview4
.. raw:: html
<br><hr>
Environments (preview 3)
------------------------
The repository https://github.com/NVIDIA-Omniverse/IsaacGymEnvs provides the example reinforcement learning environments for Isaac Gym (preview 3).
These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see IsaacGymEnvs's `configuration-and-command-line-arguments <https://github.com/NVIDIA-Omniverse/IsaacGymEnvs#configuration-and-command-line-arguments>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`).
.. note::
Only the configuration related to the environment will be used. The configuration related to RL algorithms are discarded since they do not belong to this library.
.. note::
Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py headless=True`.
.. raw:: html
<br>
Usage
^^^^^
.. tabs::
.. group-tab:: Function parameters
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-3-parameters-torch]
:end-before: [end-isaac-gym-envs-preview-3-parameters-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-3-parameters-jax]
:end-before: [end-isaac-gym-envs-preview-3-parameters-jax]
.. group-tab:: Command line arguments (priority)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-3-cli-torch]
:end-before: [end-isaac-gym-envs-preview-3-cli-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-3-cli-jax]
:end-before: [end-isaac-gym-envs-preview-3-cli-jax]
Run the main script passing the configuration as command line arguments. For example:
.. code-block::
python main.py task=Cartpole
.. raw:: html
<br>
API
^^^
.. autofunction:: skrl.envs.loaders.torch.load_isaacgym_env_preview3
.. raw:: html
<br><hr>
Environments (preview 2)
------------------------
The example reinforcement learning environments for Isaac Gym (preview 2) are located within the same package (in the :code:`python/rlgpu` directory).
These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`).
.. note::
Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py --headless`.
.. raw:: html
<br>
Usage
^^^^^
.. tabs::
.. group-tab:: Function parameters
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-2-parameters-torch]
:end-before: [end-isaac-gym-envs-preview-2-parameters-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-2-parameters-jax]
:end-before: [end-isaac-gym-envs-preview-2-parameters-jax]
.. group-tab:: Command line arguments (priority)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-2-cli-torch]
:end-before: [end-isaac-gym-envs-preview-2-cli-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/loaders.py
:language: python
:emphasize-lines: 2, 5
:start-after: [start-isaac-gym-envs-preview-2-cli-jax]
:end-before: [end-isaac-gym-envs-preview-2-cli-jax]
Run the main script passing the configuration as command line arguments. For example:
.. code-block::
python main.py --task Cartpole
.. raw:: html
<br>
API
^^^
.. autofunction:: skrl.envs.loaders.torch.load_isaacgym_env_preview2
| 9,804 | reStructuredText | 36.140151 | 625 | 0.609037 |
Toni-SM/skrl/docs/source/api/envs/wrapping.rst | :tocdepth: 3
Wrapping (single-agent)
=======================
.. raw:: html
<br><hr>
This library works with a common API to interact with the following RL environments:
* OpenAI `Gym <https://www.gymlibrary.dev>`_ / Farama `Gymnasium <https://gymnasium.farama.org/>`_ (single and vectorized environments)
* `Farama Shimmy <https://shimmy.farama.org/>`_
* `DeepMind <https://github.com/deepmind/dm_env>`_
* `robosuite <https://robosuite.ai/>`_
* `NVIDIA Isaac Gym <https://developer.nvidia.com/isaac-gym>`_ (preview 2, 3 and 4)
* `NVIDIA Isaac Orbit <https://isaac-orbit.github.io/orbit/index.html>`_
* `NVIDIA Omniverse Isaac Gym <https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html>`_
To operate with them and to support interoperability between these non-compatible interfaces, a **wrapping mechanism is provided** as shown in the diagram below
.. raw:: html
<br>
.. image:: ../../_static/imgs/wrapping-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Environment wrapping
.. image:: ../../_static/imgs/wrapping-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Environment wrapping
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Omniverse Isaac Gym
.. tabs::
.. tab:: Common environment
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-omniverse-isaacgym]
:end-before: [pytorch-end-omniverse-isaacgym]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-omniverse-isaacgym]
:end-before: [jax-end-omniverse-isaacgym]
.. tab:: Multi-threaded environment
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-omniverse-isaacgym-mt]
:end-before: [pytorch-end-omniverse-isaacgym-mt]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-omniverse-isaacgym-mt]
:end-before: [jax-end-omniverse-isaacgym-mt]
.. tab:: Isaac Orbit
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-isaac-orbit]
:end-before: [pytorch-end-isaac-orbit]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-isaac-orbit]
:end-before: [jax-end-isaac-orbit]
.. tab:: Isaac Gym
.. tabs::
.. tab:: Preview 4 (isaacgymenvs.make)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-isaacgym-preview4-make]
:end-before: [pytorch-end-isaacgym-preview4-make]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-isaacgym-preview4-make]
:end-before: [jax-end-isaacgym-preview4-make]
.. tab:: Preview 4
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-isaacgym-preview4]
:end-before: [pytorch-end-isaacgym-preview4]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-isaacgym-preview4]
:end-before: [jax-end-isaacgym-preview4]
.. tab:: Preview 3
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-isaacgym-preview3]
:end-before: [pytorch-end-isaacgym-preview3]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-isaacgym-preview3]
:end-before: [jax-end-isaacgym-preview3]
.. tab:: Preview 2
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-isaacgym-preview2]
:end-before: [pytorch-end-isaacgym-preview2]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-isaacgym-preview2]
:end-before: [jax-end-isaacgym-preview2]
.. tab:: Gym / Gymnasium
.. tabs::
.. tab:: Gym
.. tabs::
.. tab:: Single environment
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-gym]
:end-before: [pytorch-end-gym]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-gym]
:end-before: [jax-end-gym]
.. tab:: Vectorized environment
Visit the Gym documentation (`Vector <https://www.gymlibrary.dev/api/vector>`__) for more information about the creation and usage of vectorized environments
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-gym-vectorized]
:end-before: [pytorch-end-gym-vectorized]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-gym-vectorized]
:end-before: [jax-end-gym-vectorized]
.. tab:: Gymnasium
.. tabs::
.. tab:: Single environment
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-gymnasium]
:end-before: [pytorch-end-gymnasium]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-gymnasium]
:end-before: [jax-end-gymnasium]
.. tab:: Vectorized environment
Visit the Gymnasium documentation (`Vector <https://gymnasium.farama.org/api/vector>`__) for more information about the creation and usage of vectorized environments
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-gymnasium-vectorized]
:end-before: [pytorch-end-gymnasium-vectorized]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-gymnasium-vectorized]
:end-before: [jax-end-gymnasium-vectorized]
.. tab:: Shimmy
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-shimmy]
:end-before: [pytorch-end-shimmy]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [jax-start-shimmy]
:end-before: [jax-end-shimmy]
.. tab:: DeepMind
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-deepmind]
:end-before: [pytorch-end-deepmind]
.. .. group-tab:: |_4| |jax| |_4|
.. .. literalinclude:: ../../snippets/wrapping.py
.. :language: python
.. :start-after: [jax-start-deepmind]
.. :end-before: [jax-end-deepmind]
.. tab:: robosuite
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/wrapping.py
:language: python
:start-after: [pytorch-start-robosuite]
:end-before: [pytorch-end-robosuite]
.. .. group-tab:: |_4| |jax| |_4|
.. .. literalinclude:: ../../snippets/wrapping.py
.. :language: python
.. :start-after: [jax-start-robosuite]
.. :end-before: [jax-end-robosuite]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autofunction:: skrl.envs.wrappers.torch.wrap_env
.. raw:: html
<br>
API (JAX)
---------
.. autofunction:: skrl.envs.wrappers.jax.wrap_env
.. raw:: html
<br>
Internal API (PyTorch)
----------------------
.. autoclass:: skrl.envs.wrappers.torch.Wrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. py:property:: device
The device used by the environment
If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda:0"`` or ``"cpu"`` depending on the device availability
.. autoclass:: skrl.envs.wrappers.torch.OmniverseIsaacGymWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.IsaacOrbitWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.IsaacGymPreview3Wrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.IsaacGymPreview2Wrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.GymWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.GymnasiumWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.DeepMindWrapper
:undoc-members:
:show-inheritance:
:private-members: _spec_to_space, _observation_to_tensor, _tensor_to_action
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.torch.RobosuiteWrapper
:undoc-members:
:show-inheritance:
:private-members: _spec_to_space, _observation_to_tensor, _tensor_to_action
:members:
.. automethod:: __init__
.. raw:: html
<br>
Internal API (JAX)
------------------
.. autoclass:: skrl.envs.wrappers.jax.Wrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. py:property:: device
The device used by the environment
If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda"`` or ``"cpu"`` depending on the device availability
.. autoclass:: skrl.envs.wrappers.jax.OmniverseIsaacGymWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.jax.IsaacOrbitWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.jax.IsaacGymPreview3Wrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.jax.IsaacGymPreview2Wrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
.. autoclass:: skrl.envs.wrappers.jax.GymnasiumWrapper
:undoc-members:
:show-inheritance:
:members:
.. automethod:: __init__
| 14,676 | reStructuredText | 30.029598 | 189 | 0.474448 |
Toni-SM/skrl/docs/source/api/agents/sarsa.rst | State Action Reward State Action (SARSA)
========================================
SARSA is a **model-free** **on-policy** algorithm that uses a **tabular** Q-function to handle **discrete** observations and action spaces
Paper: `On-Line Q-Learning Using Connectionist Systems <https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.17.2539>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - action-value function (:math:`Q`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`a \leftarrow \pi_{Q[s,a]}(s) \qquad` where :math:`\; a \leftarrow \begin{cases} a \in_R A & x < \epsilon \\ \underset{a}{\arg\max} \; Q[s] & x \geq \epsilon \end{cases} \qquad` for :math:`\; x \leftarrow U(0,1)`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# compute next actions`
| :math:`a' \leftarrow \pi_{Q[s,a]}(s') \qquad` :gray:`# the only difference with Q-learning`
| :green:`# update Q-table`
| :math:`Q[s,a] \leftarrow Q[s,a] \;+` :guilabel:`learning_rate` :math:`(r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q[s',a'] - Q[s,a])`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-sarsa]
:end-before: [torch-end-sarsa]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/sarsa/sarsa.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Dict
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 table. This table (model) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_{Q[s,a]}(s)`
- Policy (:math:`\epsilon`-greedy)
- :literal:`"policy"`
- observation
- action
- :ref:`Tabular <models_tabular>`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.sarsa.SARSA_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.sarsa.SARSA
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 3,534 | reStructuredText | 22.104575 | 220 | 0.552349 |
Toni-SM/skrl/docs/source/api/agents/cem.rst | Cross-Entropy Method (CEM)
==========================
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - loss (:math:`L`)
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`a \leftarrow \pi_\theta(s)`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# sample all memory`
| :math:`s, a, r, s', d \leftarrow` states, actions, rewards, next_states, dones
| :green:`# compute discounted return threshold`
| :math:`[G] \leftarrow \sum_{t=0}^{E-1}` :guilabel:`discount_factor`:math:`^{t} \, r_t` for each episode
| :math:`G_{_{bound}} \leftarrow q_{th_{quantile}}([G])` at the given :guilabel:`percentile`
| :green:`# get elite states and actions`
| :math:`s_{_{elite}} \leftarrow s[G \geq G_{_{bound}}]`
| :math:`a_{_{elite}} \leftarrow a[G \geq G_{_{bound}}]`
| :green:`# compute scores for the elite states`
| :math:`scores \leftarrow \theta(s_{_{elite}})`
| :green:`# compute policy loss`
| :math:`L_{\pi_\theta} \leftarrow -\sum_{i=1}^{N} a_{_{elite}} \log(scores)`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_\theta`
| :math:`\nabla_{\theta} L_{\pi_\theta}`
| step :math:`\text{optimizer}_\theta`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\theta (\text{optimizer}_\theta)`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-cem]
:end-before: [torch-end-cem]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-cem]
:end-before: [jax-end-cem]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/cem/cem.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 discrete function approximator. This function approximator (model) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Categorical <models_categorical>` /
|br| :ref:`Multi-Categorical <models_multicategorical>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - RNN support
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.cem.CEM_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.cem.CEM
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.cem.CEM_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.cem.CEM
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 4,839 | reStructuredText | 21.830189 | 206 | 0.549907 |
Toni-SM/skrl/docs/source/api/agents/ddqn.rst | Double Deep Q-Network (DDQN)
============================
DDQN is a **model-free**, **off-policy** algorithm that relies on double Q-learning to avoid the overestimation of action-values introduced by DQN
Paper: `Deep Reinforcement Learning with Double Q-Learning <https://ojs.aaai.org/index.php/AAAI/article/view/10295>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`\epsilon \leftarrow \epsilon_{_{final}} + (\epsilon_{_{initial}} - \epsilon_{_{final}}) \; e^{-1 \; \frac{\text{timestep}}{\epsilon_{_{timesteps}}}}`
| :math:`a \leftarrow \begin{cases} a \in_R A & x < \epsilon \\ \underset{a}{\arg\max} \; Q_\phi(s) & x \geq \epsilon \end{cases} \qquad` for :math:`\; x \leftarrow U(0,1)`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# sample a batch from memory`
| [:math:`s, a, r, s', d`] :math:`\leftarrow` states, actions, rewards, next_states, dones of size :guilabel:`batch_size`
| :green:`# gradient steps`
| **FOR** each gradient step up to :guilabel:`gradient_steps` **DO**
| :green:`# compute target values`
| :math:`Q' \leftarrow Q_{\phi_{target}}(s')`
| :math:`Q_{_{target}} \leftarrow Q'[\underset{a}{\arg\max} \; Q_\phi(s')] \qquad` :gray:`# the only difference with DQN`
| :math:`y \leftarrow r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q_{_{target}}`
| :green:`# compute Q-network loss`
| :math:`Q \leftarrow Q_\phi(s)[a]`
| :math:`{Loss}_{Q_\phi} \leftarrow \frac{1}{N} \sum_{i=1}^N (Q - y)^2`
| :green:`# optimize Q-network`
| :math:`\nabla_{\phi} {Loss}_{Q_\phi}`
| :green:`# update target network`
| **IF** it's time to update target network **THEN**
| :math:`\phi_{target} \leftarrow` :guilabel:`polyak` :math:`\phi + (1 \;-` :guilabel:`polyak` :math:`) \phi_{target}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\phi (\text{optimizer}_\phi)`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-ddqn]
:end-before: [torch-end-ddqn]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-ddqn]
:end-before: [jax-end-ddqn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/dqn/ddqn.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 2 deterministic function approximators. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`Q_\phi(s, a)`
- Q-network
- :literal:`"q_network"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{\phi_{target}}(s, a)`
- Target Q-network
- :literal:`"target_q_network"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.dqn.DDQN_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.dqn.DDQN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.dqn.DDQN_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.dqn.DDQN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 5,607 | reStructuredText | 24.375566 | 215 | 0.551097 |
Toni-SM/skrl/docs/source/api/agents/a2c.rst | Advantage Actor Critic (A2C)
============================
A2C (synchronous version of A3C) is a **model-free**, **stochastic** **on-policy** **policy gradient** algorithm
Paper: `Asynchronous Methods for Deep Reinforcement Learning <https://arxiv.org/abs/1602.01783>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. note::
This algorithm implementation relies on the existence of parallel environments instead of parallel actor-learners
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`), value function approximator (:math:`V_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - values (:math:`V`), advantages (:math:`A`), returns (:math:`R`)
| - log probabilities (:math:`logp`)
| - loss (:math:`L`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V_{_{last}}') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| **IF** :math:`i` is not the last row of :math:`r` **THEN**
| :math:`V_i' = V_{i+1}`
| **ELSE**
| :math:`V_i' \leftarrow V_{_{last}}'`
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`\neg d_i \; (V_i' \, -` :guilabel:`lambda` :math:`adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`_update(...)`
| :green:`# compute returns and advantages`
| :math:`V_{_{last}}' \leftarrow V_\phi(s')`
| :math:`R, A \leftarrow f_{GAE}(r, d, V, V_{_{last}}')`
| :green:`# sample mini-batches from memory`
| [[:math:`s, a, logp, V, R, A`]] :math:`\leftarrow` states, actions, log_prob, values, returns, advantages
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, a, logp, V, R, A`] up to :guilabel:`mini_batches` **DO**
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :green:`# compute entropy loss`
| **IF** entropy computation is enabled **THEN**
| :math:`{L}_{entropy} \leftarrow \, -` :guilabel:`entropy_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N \pi_{\theta_{entropy}}`
| **ELSE**
| :math:`{L}_{entropy} \leftarrow 0`
| :green:`# compute policy loss`
| :math:`L_{\pi_\theta} \leftarrow -\frac{1}{N} \sum_{i=1}^N A \; ratio`
| :green:`# compute value loss`
| :math:`V_{_{predicted}} \leftarrow V_\phi(s)`
| :math:`L_{V_\phi} \leftarrow \frac{1}{N} \sum_{i=1}^N (R - V_{_{predicted}})^2`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_{\theta, \phi}`
| :math:`\nabla_{\theta, \, \phi} (L_{\pi_\theta} + {L}_{entropy} + L_{V_\phi})`
| :math:`\text{clip}(\lVert \nabla_{\theta, \, \phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_{\theta, \phi}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_{\theta} (\text{optimizer}_{\theta})`
| step :math:`\text{scheduler}_{\phi} (\text{optimizer}_{\phi})`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`a2c_rnn.py`) to maintain the readability of the standard implementation (:literal:`a2c.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-a2c]
:end-before: [torch-end-a2c]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-a2c]
:end-before: [jax-end-a2c]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-a2c-rnn]
:end-before: [torch-end-a2c-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/a2c/a2c.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic (discrete or continuous) and 1 deterministic function approximator. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Categorical <models_categorical>` /
|br| :ref:`Multi-Categorical <models_multicategorical>` /
|br| :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- for Policy and Value
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.a2c.A2C_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.a2c.A2C
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.a2c.A2C_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.a2c.A2C_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.a2c.A2C
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 8,087 | reStructuredText | 28.198556 | 256 | 0.558427 |
Toni-SM/skrl/docs/source/api/agents/q_learning.rst | Q-learning
==========
Q-learning is a **model-free** **off-policy** algorithm that uses a **tabular** Q-function to handle **discrete** observations and action spaces
Paper: `Learning from delayed rewards <https://www.academia.edu/3294050/Learning_from_delayed_rewards>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - action-value function (:math:`Q`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`a \leftarrow \pi_{Q[s,a]}(s) \qquad` where :math:`\; a \leftarrow \begin{cases} a \in_R A & x < \epsilon \\ \underset{a}{\arg\max} \; Q[s] & x \geq \epsilon \end{cases} \qquad` for :math:`\; x \leftarrow U(0,1)`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# compute next actions`
| :math:`a' \leftarrow \underset{a}{\arg\max} \; Q[s'] \qquad` :gray:`# the only difference with SARSA`
| :green:`# update Q-table`
| :math:`Q[s,a] \leftarrow Q[s,a] \;+` :guilabel:`learning_rate` :math:`(r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q[s',a'] - Q[s,a])`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-q-learning]
:end-before: [torch-end-q-learning]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/q_learning/q_learning.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Dict
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 table. This table (model) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_{Q[s,a]}(s)`
- Policy (:math:`\epsilon`-greedy)
- :literal:`"policy"`
- observation
- action
- :ref:`Tabular <models_tabular>`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.q_learning.Q_LEARNING_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.q_learning.Q_LEARNING
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 3,511 | reStructuredText | 21.954248 | 220 | 0.558246 |
Toni-SM/skrl/docs/source/api/agents/sac.rst | Soft Actor-Critic (SAC)
=======================
SAC is a **model-free**, **stochastic** **off-policy** **actor-critic** algorithm that uses double Q-learning (like TD3) and **entropy** regularization to maximize a trade-off between exploration and exploitation
Paper: `Soft Actor-Critic: Off-Policy Maximum Entropy Deep Reinforcement Learning with a Stochastic Actor <https://arxiv.org/abs/1801.01290>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`), critic function approximator (:math:`Q_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - log probabilities (:math:`logp`), entropy coefficient (:math:`\alpha`)
| - loss (:math:`L`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# sample a batch from memory`
| [:math:`s, a, r, s', d`] :math:`\leftarrow` states, actions, rewards, next_states, dones of size :guilabel:`batch_size`
| :green:`# gradient steps`
| **FOR** each gradient step up to :guilabel:`gradient_steps` **DO**
| :green:`# compute target values`
| :math:`a',\; logp' \leftarrow \pi_\theta(s')`
| :math:`Q_{1_{target}} \leftarrow Q_{{\phi 1}_{target}}(s', a')`
| :math:`Q_{2_{target}} \leftarrow Q_{{\phi 2}_{target}}(s', a')`
| :math:`Q_{_{target}} \leftarrow \text{min}(Q_{1_{target}}, Q_{2_{target}}) - \alpha \; logp'`
| :math:`y \leftarrow r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q_{_{target}}`
| :green:`# compute critic loss`
| :math:`Q_1 \leftarrow Q_{\phi 1}(s, a)`
| :math:`Q_2 \leftarrow Q_{\phi 2}(s, a)`
| :math:`L_{Q_\phi} \leftarrow 0.5 \; (\frac{1}{N} \sum_{i=1}^N (Q_1 - y)^2 + \frac{1}{N} \sum_{i=1}^N (Q_2 - y)^2)`
| :green:`# optimization step (critic)`
| reset :math:`\text{optimizer}_\phi`
| :math:`\nabla_{\phi} L_{Q_\phi}`
| :math:`\text{clip}(\lVert \nabla_{\phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\phi`
| :green:`# compute policy (actor) loss`
| :math:`a,\; logp \leftarrow \pi_\theta(s)`
| :math:`Q_1 \leftarrow Q_{\phi 1}(s, a)`
| :math:`Q_2 \leftarrow Q_{\phi 2}(s, a)`
| :math:`L_{\pi_\theta} \leftarrow \frac{1}{N} \sum_{i=1}^N (\alpha \; logp - \text{min}(Q_1, Q_2))`
| :green:`# optimization step (policy)`
| reset :math:`\text{optimizer}_\theta`
| :math:`\nabla_{\theta} L_{\pi_\theta}`
| :math:`\text{clip}(\lVert \nabla_{\theta} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\theta`
| :green:`# entropy learning`
| **IF** :guilabel:`learn_entropy` is enabled **THEN**
| :green:`# compute entropy loss`
| :math:`{L}_{entropy} \leftarrow - \frac{1}{N} \sum_{i=1}^N (log(\alpha) \; (logp + \alpha_{Target}))`
| :green:`# optimization step (entropy)`
| reset :math:`\text{optimizer}_\alpha`
| :math:`\nabla_{\alpha} {L}_{entropy}`
| step :math:`\text{optimizer}_\alpha`
| :green:`# compute entropy coefficient`
| :math:`\alpha \leftarrow e^{log(\alpha)}`
| :green:`# update target networks`
| :math:`{\phi 1}_{target} \leftarrow` :guilabel:`polyak` :math:`{\phi 1} + (1 \;-` :guilabel:`polyak` :math:`) {\phi 1}_{target}`
| :math:`{\phi 2}_{target} \leftarrow` :guilabel:`polyak` :math:`{\phi 2} + (1 \;-` :guilabel:`polyak` :math:`) {\phi 2}_{target}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\theta (\text{optimizer}_\theta)`
| step :math:`\text{scheduler}_\phi (\text{optimizer}_\phi)`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`sac_rnn.py`) to maintain the readability of the standard implementation (:literal:`sac.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-sac]
:end-before: [torch-end-sac]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-sac]
:end-before: [jax-end-sac]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-sac-rnn]
:end-before: [torch-end-sac-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/sac/sac.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic and 4 deterministic function approximators. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy (actor)
- :literal:`"policy"`
- observation
- action
- :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`Q_{\phi 1}(s, a)`
- Q1-network (critic 1)
- :literal:`"critic_1"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{\phi 2}(s, a)`
- Q2-network (critic 2)
- :literal:`"critic_2"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{{\phi 1}_{target}}(s, a)`
- Target Q1-network
- :literal:`"target_critic_1"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{{\phi 2}_{target}}(s, a)`
- Target Q2-network
- :literal:`"target_critic_2"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.sac.SAC_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.sac.SAC
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.sac.SAC_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.sac.SAC_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.sac.SAC
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 8,896 | reStructuredText | 29.67931 | 232 | 0.556767 |
Toni-SM/skrl/docs/source/api/agents/trpo.rst | Trust Region Policy Optimization (TRPO)
=======================================
TRPO is a **model-free**, **stochastic** **on-policy** **policy gradient** algorithm that deploys an iterative procedure to optimize the policy, with guaranteed monotonic improvement
Paper: `Trust Region Policy Optimization <https://arxiv.org/abs/1502.05477>`_
.. raw:: html
<br><hr>
Algorithm
---------
| For each iteration do
| :math:`\bullet \;` Collect, in a rollout memory, a set of states :math:`s`, actions :math:`a`, rewards :math:`r`, dones :math:`d`, log probabilities :math:`logp` and values :math:`V` on policy using :math:`\pi_\theta` and :math:`V_\phi`
| :math:`\bullet \;` Estimate returns :math:`R` and advantages :math:`A` using Generalized Advantage Estimation (GAE(:math:`\lambda`)) from the collected data [:math:`r, d, V`]
| :math:`\bullet \;` Compute the surrogate objective (policy loss) gradient :math:`g` and the Hessian :math:`H` of :math:`KL` divergence with respect to the policy parameters :math:`\theta`
| :math:`\bullet \;` Compute the search direction :math:`\; x \approx H^{-1}g \;` using the conjugate gradient method
| :math:`\bullet \;` Compute the maximal (full) step length :math:`\; \beta = \sqrt{\dfrac{2 \delta}{x^T H x}} x \;` where :math:`\delta` is the desired (maximum) :math:`KL` divergence and :math:`\; \sqrt{\frac{2 \delta}{x^T H x}} \;` is the step size
| :math:`\bullet \;` Perform a backtracking line search with exponential decay to find the final policy update :math:`\; \theta_{new} = \theta + \alpha \; \beta \;` ensuring improvement of the surrogate objective and satisfaction of the :math:`KL` divergence constraint
| :math:`\bullet \;` Update the value function :math:`V_\phi` using the computed returns :math:`R`
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V_{_{last}}') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| **IF** :math:`i` is not the last row of :math:`r` **THEN**
| :math:`V_i' = V_{i+1}`
| **ELSE**
| :math:`V_i' \leftarrow V_{_{last}}'`
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`\neg d_i \; (V_i' \, -` :guilabel:`lambda` :math:`adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`surrogate_loss(...)`
| :blue:`def` :math:`\;f_{Loss} (\pi_\theta, s, a, logp, A) \;\rightarrow\; L_{\pi_\theta}:`
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :math:`L_{\pi_\theta} \leftarrow \frac{1}{N} \sum_{i=1}^N A \; e^{(logp' - logp)}`
|
| :literal:`conjugate_gradient(...)` (See `conjugate gradient method <https://en.wikipedia.org/wiki/Conjugate_gradient_method#As_an_iterative_method>`_)
| :blue:`def` :math:`\;f_{CG} (\pi_\theta, s, b) \;\rightarrow\; x:`
| :math:`x \leftarrow \text{zeros}(b)`
| :math:`r \leftarrow b`
| :math:`p \leftarrow b`
| :math:`rr_{old} \leftarrow r \cdot r`
| **FOR** each iteration up to :guilabel:`conjugate_gradient_steps` **DO**
| :math:`\alpha \leftarrow \dfrac{rr_{old}}{p \cdot f_{Ax}(\pi_\theta, s, b)}`
| :math:`x \leftarrow x + \alpha \; p`
| :math:`r \leftarrow r - \alpha \; f_{Ax}(\pi_\theta, s)`
| :math:`rr_{new} \leftarrow r \cdot r`
| **IF** :math:`rr_{new} <` residual tolerance **THEN**
| **BREAK LOOP**
| :math:`p \leftarrow r + \dfrac{rr_{new}}{rr_{old}} \; p`
| :math:`rr_{old} \leftarrow rr_{new}`
|
| :literal:`fisher_vector_product(...)` (See `fisher vector product in TRPO <https://www.telesens.co/2018/06/09/efficiently-computing-the-fisher-vector-product-in-trpo/>`_)
| :blue:`def` :math:`\;f_{Ax} (\pi_\theta, s, v) \;\rightarrow\; hv:`
| :math:`kl \leftarrow f_{KL}(\pi_\theta, \pi_\theta, s)`
| :math:`g_{kl} \leftarrow \nabla_\theta kl`
| :math:`g_{kl_{flat}} \leftarrow \text{flatten}(g_{kl})`
| :math:`g_{hv} \leftarrow \nabla_\theta (g_{kl_{flat}} \; v)`
| :math:`g_{hv_{flat}} \leftarrow \text{flatten}(g_{hv})`
| :math:`hv \leftarrow g_{hv_{flat}} +` :guilabel:`damping` :math:`v`
|
| :literal:`kl_divergence(...)` (See `Kullback–Leibler divergence for normal distribution <https://en.wikipedia.org/wiki/Normal_distribution#Other_properties>`_)
| :blue:`def` :math:`\;f_{KL} (\pi_{\theta 1}, \pi_{\theta 2}, s) \;\rightarrow\; kl:`
| :math:`\mu_1, \log\sigma_1 \leftarrow \pi_{\theta 1}(s)`
| :math:`\mu_2, \log\sigma_2 \leftarrow \pi_{\theta 2}(s)`
| :math:`kl \leftarrow \log\sigma_1 - \log\sigma_2 + \frac{1}{2} \dfrac{(e^{\log\sigma_1})^2 + (\mu_1 - \mu_2)^2}{(e^{\log\sigma_2})^2} - \frac{1}{2}`
| :math:`kl \leftarrow \frac{1}{N} \sum_{i=1}^N \, (\sum_{dim} kl)`
|
| :literal:`_update(...)`
| :green:`# compute returns and advantages`
| :math:`V_{_{last}}' \leftarrow V_\phi(s')`
| :math:`R, A \leftarrow f_{GAE}(r, d, V, V_{_{last}}')`
| :green:`# sample all from memory`
| [[:math:`s, a, logp, A`]] :math:`\leftarrow` states, actions, log_prob, advantages
| :green:`# compute policy loss gradient`
| :math:`L_{\pi_\theta} \leftarrow f_{Loss}(\pi_\theta, s, a, logp, A)`
| :math:`g \leftarrow \nabla_{\theta} L_{\pi_\theta}`
| :math:`g_{_{flat}} \leftarrow \text{flatten}(g)`
| :green:`# compute the search direction using the conjugate gradient algorithm`
| :math:`search_{direction} \leftarrow f_{CG}(\pi_\theta, s, g_{_{flat}})`
| :green:`# compute step size and full step`
| :math:`xHx \leftarrow search_{direction} \; f_{Ax}(\pi_\theta, s, search_{direction})`
| :math:`step_{size} \leftarrow \sqrt{\dfrac{2 \, \delta}{xHx}} \qquad` with :math:`\; \delta` as :guilabel:`max_kl_divergence`
| :math:`\beta \leftarrow step_{size} \; search_{direction}`
| :green:`# backtracking line search`
| :math:`flag_{restore} \leftarrow \text{True}`
| :math:`\pi_{\theta_{backup}} \leftarrow \pi_\theta`
| :math:`\theta \leftarrow \text{get_parameters}(\pi_\theta)`
| :math:`I_{expected} \leftarrow g_{_{flat}} \; \beta`
| **FOR** :math:`\alpha \leftarrow (0.5` :guilabel:`step_fraction` :math:`)^i \;` with :math:`i = 0, 1, 2, ...` up to :guilabel:`max_backtrack_steps` **DO**
| :math:`\theta_{new} \leftarrow \theta + \alpha \; \beta`
| :math:`\pi_\theta \leftarrow \text{set_parameters}(\theta_{new})`
| :math:`I_{expected} \leftarrow \alpha \; I_{expected}`
| :math:`kl \leftarrow f_{KL}(\pi_{\theta_{backup}}, \pi_\theta, s)`
| :math:`L \leftarrow f_{Loss}(\pi_\theta, s, a, logp, A)`
| **IF** :math:`kl < \delta` **AND** :math:`\dfrac{L - L_{\pi_\theta}}{I_{expected}} >` :guilabel:`accept_ratio` **THEN**
| :math:`flag_{restore} \leftarrow \text{False}`
| **BREAK LOOP**
| **IF** :math:`flag_{restore}` **THEN**
| :math:`\pi_\theta \leftarrow \pi_{\theta_{backup}}`
| :green:`# sample mini-batches from memory`
| [[:math:`s, R`]] :math:`\leftarrow` states, returns
| :green:`# learning epochs`
| **FOR** each learning epoch up to :guilabel:`learning_epochs` **DO**
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, R`] up to :guilabel:`mini_batches` **DO**
| :green:`# compute value loss`
| :math:`V' \leftarrow V_\phi(s)`
| :math:`L_{V_\phi} \leftarrow` :guilabel:`value_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N (R - V')^2`
| :green:`# optimization step (value)`
| reset :math:`\text{optimizer}_\phi`
| :math:`\nabla_{\phi} L_{V_\phi}`
| :math:`\text{clip}(\lVert \nabla_{\phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\phi`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\phi(\text{optimizer}_\phi)`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`trpo_rnn.py`) to maintain the readability of the standard implementation (:literal:`trpo.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-trpo]
:end-before: [torch-end-trpo]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-trpo-rnn]
:end-before: [torch-end-trpo-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/trpo/trpo.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic and 1 deterministic function approximator. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.trpo.TRPO_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.trpo.TRPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.trpo.TRPO_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 12,114 | reStructuredText | 38.080645 | 273 | 0.577431 |
Toni-SM/skrl/docs/source/api/agents/rpo.rst | Robust Policy Optimization (RPO)
================================
RPO is a **model-free**, **stochastic** **on-policy** **policy gradient** algorithm that adds a uniform random perturbation to a base parameterized distribution to help the agent maintain a certain level of stochasticity throughout the training process
Paper: `Robust Policy Optimization in Deep Reinforcement Learning <https://arxiv.org/abs/2212.07536>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. note::
This algorithm is built on top of the PPO algorithm and simply adds the :literal:`alpha` hyperparameter to the policy input dictionary. It is the responsibility of the user to make use of this hyper-parameter to modify the parameterized distribution.
.. tabs::
.. tab:: Within the RPO agent
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 9-11
:start-after: [torch-start-rpo-with-rpo]
:end-before: [torch-end-rpo-with-rpo]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 10-12
:start-after: [jax-start-rpo-with-rpo]
:end-before: [jax-end-rpo-with-rpo]
.. tab:: With other agents (e.g. PPO, A2C, TRPO)
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 9-11
:start-after: [torch-start-rpo-without-rpo]
:end-before: [torch-end-rpo-without-rpo]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 10-12
:start-after: [jax-start-rpo-without-rpo]
:end-before: [jax-end-rpo-without-rpo]
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`), value function approximator (:math:`V_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - values (:math:`V`), advantages (:math:`A`), returns (:math:`R`)
| - log probabilities (:math:`logp`)
| - loss (:math:`L`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V_{_{last}}') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| **IF** :math:`i` is not the last row of :math:`r` **THEN**
| :math:`V_i' = V_{i+1}`
| **ELSE**
| :math:`V_i' \leftarrow V_{_{last}}'`
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`\neg d_i \; (V_i' \, -` :guilabel:`lambda` :math:`adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`_update(...)`
| :green:`# compute returns and advantages`
| :math:`V_{_{last}}' \leftarrow V_\phi(s')`
| :math:`R, A \leftarrow f_{GAE}(r, d, V, V_{_{last}}')`
| :green:`# sample mini-batches from memory`
| [[:math:`s, a, logp, V, R, A`]] :math:`\leftarrow` states, actions, log_prob, values, returns, advantages
| :green:`# learning epochs`
| **FOR** each learning epoch up to :guilabel:`learning_epochs` **DO**
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, a, logp, V, R, A`] up to :guilabel:`mini_batches` **DO**
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :green:`# compute approximate KL divergence`
| :math:`ratio \leftarrow logp' - logp`
| :math:`KL_{_{divergence}} \leftarrow \frac{1}{N} \sum_{i=1}^N ((e^{ratio} - 1) - ratio)`
| :green:`# early stopping with KL divergence`
| **IF** :math:`KL_{_{divergence}} >` :guilabel:`kl_threshold` **THEN**
| **BREAK LOOP**
| :green:`# compute entropy loss`
| **IF** entropy computation is enabled **THEN**
| :math:`{L}_{entropy} \leftarrow \, -` :guilabel:`entropy_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N \pi_{\theta_{entropy}}`
| **ELSE**
| :math:`{L}_{entropy} \leftarrow 0`
| :green:`# compute policy loss`
| :math:`ratio \leftarrow e^{logp' - logp}`
| :math:`L_{_{surrogate}} \leftarrow A \; ratio`
| :math:`L_{_{clipped\,surrogate}} \leftarrow A \; \text{clip}(ratio, 1 - c, 1 + c) \qquad` with :math:`c` as :guilabel:`ratio_clip`
| :math:`L^{clip}_{\pi_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N \min(L_{_{surrogate}}, L_{_{clipped\,surrogate}})`
| :green:`# compute value loss`
| :math:`V_{_{predicted}} \leftarrow V_\phi(s)`
| **IF** :guilabel:`clip_predicted_values` is enabled **THEN**
| :math:`V_{_{predicted}} \leftarrow V + \text{clip}(V_{_{predicted}} - V, -c, c) \qquad` with :math:`c` as :guilabel:`value_clip`
| :math:`L_{V_\phi} \leftarrow` :guilabel:`value_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N (R - V_{_{predicted}})^2`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_{\theta, \phi}`
| :math:`\nabla_{\theta, \, \phi} (L^{clip}_{\pi_\theta} + {L}_{entropy} + L_{V_\phi})`
| :math:`\text{clip}(\lVert \nabla_{\theta, \, \phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_{\theta, \phi}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_{\theta, \phi} (\text{optimizer}_{\theta, \phi})`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`rpo_rnn.py`) to maintain the readability of the standard implementation (:literal:`rpo.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-rpo]
:end-before: [torch-end-rpo]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-rpo]
:end-before: [jax-end-rpo]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-rpo-rnn]
:end-before: [torch-end-rpo-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/rpo/rpo.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 continuous stochastic and 1 deterministic function approximator. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- for Policy and Value
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.rpo.RPO_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.rpo.RPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.rpo.RPO_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.rpo.RPO_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.rpo.RPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 10,821 | reStructuredText | 31.793939 | 254 | 0.539414 |
Toni-SM/skrl/docs/source/api/agents/dqn.rst | Deep Q-Network (DQN)
====================
DQN is a **model-free**, **off-policy** algorithm that trains a control policies directly from high-dimensional sensory using a deep function approximator to represent the Q-value function
Paper: `Playing Atari with Deep Reinforcement Learning <https://arxiv.org/abs/1312.5602>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`\epsilon \leftarrow \epsilon_{_{final}} + (\epsilon_{_{initial}} - \epsilon_{_{final}}) \; e^{-1 \; \frac{\text{timestep}}{\epsilon_{_{timesteps}}}}`
| :math:`a \leftarrow \begin{cases} a \in_R A & x < \epsilon \\ \underset{a}{\arg\max} \; Q_\phi(s) & x \geq \epsilon \end{cases} \qquad` for :math:`\; x \leftarrow U(0,1)`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# sample a batch from memory`
| [:math:`s, a, r, s', d`] :math:`\leftarrow` states, actions, rewards, next_states, dones of size :guilabel:`batch_size`
| :green:`# gradient steps`
| **FOR** each gradient step up to :guilabel:`gradient_steps` **DO**
| :green:`# compute target values`
| :math:`Q' \leftarrow Q_{\phi_{target}}(s')`
| :math:`Q_{_{target}} \leftarrow \underset{a}{\max} \; Q' \qquad` :gray:`# the only difference with DDQN`
| :math:`y \leftarrow r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q_{_{target}}`
| :green:`# compute Q-network loss`
| :math:`Q \leftarrow Q_\phi(s)[a]`
| :math:`{Loss}_{Q_\phi} \leftarrow \frac{1}{N} \sum_{i=1}^N (Q - y)^2`
| :green:`# optimize Q-network`
| :math:`\nabla_{\phi} {Loss}_{Q_\phi}`
| :green:`# update target network`
| **IF** it's time to update target network **THEN**
| :math:`\phi_{target} \leftarrow` :guilabel:`polyak` :math:`\phi + (1 \;-` :guilabel:`polyak` :math:`) \phi_{target}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\phi (\text{optimizer}_\phi)`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-dqn]
:end-before: [torch-end-dqn]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-dqn]
:end-before: [jax-end-dqn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/dqn/dqn.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 2 deterministic function approximators. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`Q_\phi(s, a)`
- Q-network
- :literal:`"q_network"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{\phi_{target}}(s, a)`
- Target Q-network
- :literal:`"target_q_network"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.dqn.DQN_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.dqn.DQN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.dqn.DQN_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.dqn.DQN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 5,582 | reStructuredText | 24.262443 | 215 | 0.552132 |
Toni-SM/skrl/docs/source/api/agents/amp.rst | Adversarial Motion Priors (AMP)
===============================
AMP is a **model-free**, **stochastic** **on-policy** **policy gradient** algorithm (trained using a combination of GAIL and PPO) for adversarial learning of physics-based character animation. It enables characters to imitate diverse behaviors from large unstructured datasets, without the need for motion planners or other mechanisms for clip selection
Paper: `AMP: Adversarial Motion Priors for Stylized Physics-Based Character Control <https://arxiv.org/abs/2104.02180>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy (:math:`\pi_\theta`), value (:math:`V_\phi`) and discriminator (:math:`D_\psi`) function approximators
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - values (:math:`V`), next values (:math:`V'`), advantages (:math:`A`), returns (:math:`R`)
| - log probabilities (:math:`logp`)
| - loss (:math:`L`)
| - reference motion dataset (:math:`M`), AMP replay buffer (:math:`B`)
| - AMP states (:math:`s_{_{AMP}}`), reference motion states (:math:`s_{_{AMP}}^{^M}`), AMP states from replay buffer (:math:`s_{_{AMP}}^{^B}`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`(V' \, +` :guilabel:`lambda` :math:`\neg d_i \; adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`_update(...)`
| :green:`# update dataset of reference motions`
| collect reference motions of size :guilabel:`amp_batch_size` :math:`\rightarrow\;` :math:`\text{append}(M)`
| :green:`# compute combined rewards`
| :math:`r_D \leftarrow -log(\text{max}( 1 - \hat{y}(D_\psi(s_{_{AMP}})), \, 10^{-4})) \qquad` with :math:`\; \hat{y}(x) = \dfrac{1}{1 + e^{-x}}`
| :math:`r' \leftarrow` :guilabel:`task_reward_weight` :math:`r \, +` :guilabel:`style_reward_weight` :guilabel:`discriminator_reward_scale` :math:`r_D`
| :green:`# compute returns and advantages`
| :math:`R, A \leftarrow f_{GAE}(r', d, V, V')`
| :green:`# sample mini-batches from memory`
| [[:math:`s, a, logp, V, R, A, s_{_{AMP}}`]] :math:`\leftarrow` states, actions, log_prob, values, returns, advantages, AMP states
| [[:math:`s_{_{AMP}}^{^M}`]] :math:`\leftarrow` AMP states from :math:`M`
| **IF** :math:`B` is not empty **THEN**
| [[:math:`s_{_{AMP}}^{^B}`]] :math:`\leftarrow` AMP states from :math:`B`
| **ELSE**
| [[:math:`s_{_{AMP}}^{^B}`]] :math:`\leftarrow` [[:math:`s_{_{AMP}}`]]
| :green:`# learning epochs`
| **FOR** each learning epoch up to :guilabel:`learning_epochs` **DO**
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, a, logp, V, R, A, s_{_{AMP}}, s_{_{AMP}}^{^B}, s_{_{AMP}}^{^M}`] up to :guilabel:`mini_batches` **DO**
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :green:`# compute entropy loss`
| **IF** entropy computation is enabled **THEN**
| :math:`{L}_{entropy} \leftarrow \, -` :guilabel:`entropy_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N \pi_{\theta_{entropy}}`
| **ELSE**
| :math:`{L}_{entropy} \leftarrow 0`
| :green:`# compute policy loss`
| :math:`ratio \leftarrow e^{logp' - logp}`
| :math:`L_{_{surrogate}} \leftarrow A \; ratio`
| :math:`L_{_{clipped\,surrogate}} \leftarrow A \; \text{clip}(ratio, 1 - c, 1 + c) \qquad` with :math:`c` as :guilabel:`ratio_clip`
| :math:`L^{clip}_{\pi_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N \min(L_{_{surrogate}}, L_{_{clipped\,surrogate}})`
| :green:`# compute value loss`
| :math:`V_{_{predicted}} \leftarrow V_\phi(s)`
| **IF** :guilabel:`clip_predicted_values` is enabled **THEN**
| :math:`V_{_{predicted}} \leftarrow V + \text{clip}(V_{_{predicted}} - V, -c, c) \qquad` with :math:`c` as :guilabel:`value_clip`
| :math:`L_{V_\phi} \leftarrow` :guilabel:`value_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N (R - V_{_{predicted}})^2`
| :green:`# compute discriminator loss`
| :math:`{logit}_{_{AMP}} \leftarrow D_\psi(s_{_{AMP}}) \qquad` with :math:`s_{_{AMP}}` of size :guilabel:`discriminator_batch_size`
| :math:`{logit}_{_{AMP}}^{^B} \leftarrow D_\psi(s_{_{AMP}}^{^B}) \qquad` with :math:`s_{_{AMP}}^{^B}` of size :guilabel:`discriminator_batch_size`
| :math:`{logit}_{_{AMP}}^{^M} \leftarrow D_\psi(s_{_{AMP}}^{^M}) \qquad` with :math:`s_{_{AMP}}^{^M}` of size :guilabel:`discriminator_batch_size`
| :green:`# discriminator prediction loss`
| :math:`L_{D_\psi} \leftarrow \dfrac{1}{2}(BCE({logit}_{_{AMP}}` ++ :math:`{logit}_{_{AMP}}^{^B}, \, 0) + BCE({logit}_{_{AMP}}^{^M}, \, 1))`
| with :math:`\; BCE(x,y)=-\frac{1}{N} \sum_{i=1}^N [y \; log(\hat{y}) + (1-y) \, log(1-\hat{y})] \;` and :math:`\; \hat{y} = \dfrac{1}{1 + e^{-x}}`
| :green:`# discriminator logit regularization`
| :math:`L_{D_\psi} \leftarrow L_{D_\psi} +` :guilabel:`discriminator_logit_regularization_scale` :math:`\sum_{i=1}^N \text{flatten}(\psi_w[-1])^2`
| :green:`# discriminator gradient penalty`
| :math:`L_{D_\psi} \leftarrow L_{D_\psi} +` :guilabel:`discriminator_gradient_penalty_scale` :math:`\frac{1}{N} \sum_{i=1}^N \sum (\nabla_\psi {logit}_{_{AMP}}^{^M})^2`
| :green:`# discriminator weight decay`
| :math:`L_{D_\psi} \leftarrow L_{D_\psi} +` :guilabel:`discriminator_weight_decay_scale` :math:`\sum_{i=1}^N \text{flatten}(\psi_w)^2`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_{\theta, \phi, \psi}`
| :math:`\nabla_{\theta, \, \phi, \, \psi} (L^{clip}_{\pi_\theta} + {L}_{entropy} + L_{V_\phi} + L_{D_\psi})`
| :math:`\text{clip}(\lVert \nabla_{\theta, \, \phi, \, \psi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_{\theta, \phi, \psi}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_{\theta, \phi, \psi} (\text{optimizer}_{\theta, \phi, \psi})`
| :green:`# update AMP repaly buffer`
| :math:`s_{_{AMP}} \rightarrow\;` :math:`\text{append}(B)`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-amp]
:end-before: [torch-end-amp]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/amp/amp.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: AMP observation
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic (continuous) and 2 deterministic function approximators. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`D_\psi(s_{_{AMP}})`
- Discriminator
- :literal:`"discriminator"`
- AMP observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.amp.AMP_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.amp.AMP
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 10,085 | reStructuredText | 38.245136 | 353 | 0.557561 |
Toni-SM/skrl/docs/source/api/agents/ppo.rst | Proximal Policy Optimization (PPO)
==================================
PPO is a **model-free**, **stochastic** **on-policy** **policy gradient** algorithm that alternates between sampling data through interaction with the environment, and optimizing a *surrogate* objective function while avoiding that the new policy does not move too far away from the old one
Paper: `Proximal Policy Optimization Algorithms <https://arxiv.org/abs/1707.06347>`_
.. raw:: html
<br><hr>
Algorithm
---------
| For each iteration do:
| :math:`\bullet \;` Collect, in a rollout memory, a set of states :math:`s`, actions :math:`a`, rewards :math:`r`, dones :math:`d`, log probabilities :math:`logp` and values :math:`V` on policy using :math:`\pi_\theta` and :math:`V_\phi`
| :math:`\bullet \;` Estimate returns :math:`R` and advantages :math:`A` using Generalized Advantage Estimation (GAE(:math:`\lambda`)) from the collected data [:math:`r, d, V`]
| :math:`\bullet \;` Compute the entropy loss :math:`{L}_{entropy}`
| :math:`\bullet \;` Compute the clipped surrogate objective (policy loss) with :math:`ratio` as the probability ratio between the action under the current policy and the action under the previous policy: :math:`L^{clip}_{\pi_\theta} = \mathbb{E}[\min(A \; ratio, A \; \text{clip}(ratio, 1-c, 1+c))]`
| :math:`\bullet \;` Compute the value loss :math:`L_{V_\phi}` as the mean squared error (MSE) between the predicted values :math:`V_{_{predicted}}` and the estimated returns :math:`R`
| :math:`\bullet \;` Optimize the total loss :math:`L = L^{clip}_{\pi_\theta} - c_1 \, L_{V_\phi} + c_2 \, {L}_{entropy}`
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`), value function approximator (:math:`V_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - values (:math:`V`), advantages (:math:`A`), returns (:math:`R`)
| - log probabilities (:math:`logp`)
| - loss (:math:`L`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V_{_{last}}') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| **IF** :math:`i` is not the last row of :math:`r` **THEN**
| :math:`V_i' = V_{i+1}`
| **ELSE**
| :math:`V_i' \leftarrow V_{_{last}}'`
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`\neg d_i \; (V_i' \, -` :guilabel:`lambda` :math:`adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`_update(...)`
| :green:`# compute returns and advantages`
| :math:`V_{_{last}}' \leftarrow V_\phi(s')`
| :math:`R, A \leftarrow f_{GAE}(r, d, V, V_{_{last}}')`
| :green:`# sample mini-batches from memory`
| [[:math:`s, a, logp, V, R, A`]] :math:`\leftarrow` states, actions, log_prob, values, returns, advantages
| :green:`# learning epochs`
| **FOR** each learning epoch up to :guilabel:`learning_epochs` **DO**
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, a, logp, V, R, A`] up to :guilabel:`mini_batches` **DO**
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :green:`# compute approximate KL divergence`
| :math:`ratio \leftarrow logp' - logp`
| :math:`KL_{_{divergence}} \leftarrow \frac{1}{N} \sum_{i=1}^N ((e^{ratio} - 1) - ratio)`
| :green:`# early stopping with KL divergence`
| **IF** :math:`KL_{_{divergence}} >` :guilabel:`kl_threshold` **THEN**
| **BREAK LOOP**
| :green:`# compute entropy loss`
| **IF** entropy computation is enabled **THEN**
| :math:`{L}_{entropy} \leftarrow \, -` :guilabel:`entropy_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N \pi_{\theta_{entropy}}`
| **ELSE**
| :math:`{L}_{entropy} \leftarrow 0`
| :green:`# compute policy loss`
| :math:`ratio \leftarrow e^{logp' - logp}`
| :math:`L_{_{surrogate}} \leftarrow A \; ratio`
| :math:`L_{_{clipped\,surrogate}} \leftarrow A \; \text{clip}(ratio, 1 - c, 1 + c) \qquad` with :math:`c` as :guilabel:`ratio_clip`
| :math:`L^{clip}_{\pi_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N \min(L_{_{surrogate}}, L_{_{clipped\,surrogate}})`
| :green:`# compute value loss`
| :math:`V_{_{predicted}} \leftarrow V_\phi(s)`
| **IF** :guilabel:`clip_predicted_values` is enabled **THEN**
| :math:`V_{_{predicted}} \leftarrow V + \text{clip}(V_{_{predicted}} - V, -c, c) \qquad` with :math:`c` as :guilabel:`value_clip`
| :math:`L_{V_\phi} \leftarrow` :guilabel:`value_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N (R - V_{_{predicted}})^2`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_{\theta, \phi}`
| :math:`\nabla_{\theta, \, \phi} (L^{clip}_{\pi_\theta} + {L}_{entropy} + L_{V_\phi})`
| :math:`\text{clip}(\lVert \nabla_{\theta, \, \phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_{\theta, \phi}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_{\theta, \phi} (\text{optimizer}_{\theta, \phi})`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`ppo_rnn.py`) to maintain the readability of the standard implementation (:literal:`ppo.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-ppo]
:end-before: [torch-end-ppo]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-ppo]
:end-before: [jax-end-ppo]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-ppo-rnn]
:end-before: [torch-end-ppo-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/ppo/ppo.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic (discrete or continuous) and 1 deterministic function approximator. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Categorical <models_categorical>` /
|br| :ref:`Multi-Categorical <models_multicategorical>` /
|br| :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- for Policy and Value
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.ppo.PPO_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.ppo.PPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.ppo.PPO_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.ppo.PPO_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.ppo.PPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 10,324 | reStructuredText | 34.238908 | 304 | 0.565866 |
Toni-SM/skrl/docs/source/api/agents/td3.rst | Twin-Delayed DDPG (TD3)
=======================
TD3 is a **model-free**, **deterministic** **off-policy** **actor-critic** algorithm (based on DDPG) that relies on double Q-learning, target policy smoothing and delayed policy updates to address the problems introduced by overestimation bias in actor-critic algorithms
Paper: `Addressing Function Approximation Error in Actor-Critic Methods <https://arxiv.org/abs/1802.09477>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\mu_\theta`), critic function approximator (:math:`Q_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - loss (:math:`L`)
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`a \leftarrow \mu_\theta(s)`
| :math:`noise \leftarrow` sample :guilabel:`noise`
| :math:`scale \leftarrow (1 - \text{timestep} \;/` :guilabel:`timesteps` :math:`) \; (` :guilabel:`initial_scale` :math:`-` :guilabel:`final_scale` :math:`) \;+` :guilabel:`final_scale`
| :math:`a \leftarrow \text{clip}(a + noise * scale, {a}_{Low}, {a}_{High})`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# sample a batch from memory`
| [:math:`s, a, r, s', d`] :math:`\leftarrow` states, actions, rewards, next_states, dones of size :guilabel:`batch_size`
| :green:`# gradient steps`
| **FOR** each gradient step up to :guilabel:`gradient_steps` **DO**
| :green:`# target policy smoothing`
| :math:`a' \leftarrow \mu_{\theta_{target}}(s')`
| :math:`noise \leftarrow \text{clip}(` :guilabel:`smooth_regularization_noise` :math:`, -c, c) \qquad` with :math:`c` as :guilabel:`smooth_regularization_clip`
| :math:`a' \leftarrow a' + noise`
| :math:`a' \leftarrow \text{clip}(a', {a'}_{Low}, {a'}_{High})`
| :green:`# compute target values`
| :math:`Q_{1_{target}} \leftarrow Q_{{\phi 1}_{target}}(s', a')`
| :math:`Q_{2_{target}} \leftarrow Q_{{\phi 2}_{target}}(s', a')`
| :math:`Q_{_{target}} \leftarrow \text{min}(Q_{1_{target}}, Q_{2_{target}})`
| :math:`y \leftarrow r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q_{_{target}}`
| :green:`# compute critic loss`
| :math:`Q_1 \leftarrow Q_{\phi 1}(s, a)`
| :math:`Q_2 \leftarrow Q_{\phi 2}(s, a)`
| :math:`L_{Q_\phi} \leftarrow \frac{1}{N} \sum_{i=1}^N (Q_1 - y)^2 + \frac{1}{N} \sum_{i=1}^N (Q_2 - y)^2`
| :green:`# optimization step (critic)`
| reset :math:`\text{optimizer}_\phi`
| :math:`\nabla_{\phi} L_{Q_\phi}`
| :math:`\text{clip}(\lVert \nabla_{\phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\phi`
| :green:`# delayed update`
| **IF** it's time for the :guilabel:`policy_delay` update **THEN**
| :green:`# compute policy (actor) loss`
| :math:`a \leftarrow \mu_\theta(s)`
| :math:`Q_1 \leftarrow Q_{\phi 1}(s, a)`
| :math:`L_{\mu_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N Q_1`
| :green:`# optimization step (policy)`
| reset :math:`\text{optimizer}_\theta`
| :math:`\nabla_{\theta} L_{\mu_\theta}`
| :math:`\text{clip}(\lVert \nabla_{\theta} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\theta`
| :green:`# update target networks`
| :math:`\theta_{target} \leftarrow` :guilabel:`polyak` :math:`\theta + (1 \;-` :guilabel:`polyak` :math:`) \theta_{target}`
| :math:`{\phi 1}_{target} \leftarrow` :guilabel:`polyak` :math:`{\phi 1} + (1 \;-` :guilabel:`polyak` :math:`) {\phi 1}_{target}`
| :math:`{\phi 2}_{target} \leftarrow` :guilabel:`polyak` :math:`{\phi 2} + (1 \;-` :guilabel:`polyak` :math:`) {\phi 2}_{target}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\theta (\text{optimizer}_\theta)`
| step :math:`\text{scheduler}_\phi (\text{optimizer}_\phi)`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`td3_rnn.py`) to maintain the readability of the standard implementation (:literal:`td3.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-td3]
:end-before: [torch-end-td3]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-td3]
:end-before: [jax-end-td3]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-td3-rnn]
:end-before: [torch-end-td3-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/td3/td3.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 6 deterministic function approximators. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\mu_\theta(s)`
- Policy (actor)
- :literal:`"policy"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
* - :math:`\mu_{\theta_{target}}(s)`
- Target policy
- :literal:`"target_policy"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{\phi 1}(s, a)`
- Q1-network (critic 1)
- :literal:`"critic_1"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{\phi 2}(s, a)`
- Q2-network (critic 2)
- :literal:`"critic_2"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{{\phi 1}_{target}}(s, a)`
- Target Q1-network
- :literal:`"target_critic_1"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{{\phi 2}_{target}}(s, a)`
- Target Q2-network
- :literal:`"target_critic_2"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.td3.TD3_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.td3.TD3
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.td3.TD3_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.td3.TD3_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.td3.TD3
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 9,328 | reStructuredText | 29.788779 | 270 | 0.558105 |
Toni-SM/skrl/docs/source/api/agents/ddpg.rst | Deep Deterministic Policy Gradient (DDPG)
=========================================
DDPG is a **model-free**, **deterministic** **off-policy** **actor-critic** algorithm that uses deep function approximators to learn a policy (and to estimate the action-value function) in high-dimensional, **continuous** action spaces
Paper: `Continuous control with deep reinforcement learning <https://arxiv.org/abs/1509.02971>`_
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\mu_\theta`), critic function approximator (:math:`Q_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - loss (:math:`L`)
.. raw:: html
<br>
Decision making
"""""""""""""""
|
| :literal:`act(...)`
| :math:`a \leftarrow \mu_\theta(s)`
| :math:`noise \leftarrow` sample :guilabel:`noise`
| :math:`scale \leftarrow (1 - \text{timestep} \;/` :guilabel:`timesteps` :math:`) \; (` :guilabel:`initial_scale` :math:`-` :guilabel:`final_scale` :math:`) \;+` :guilabel:`final_scale`
| :math:`a \leftarrow \text{clip}(a + noise * scale, {a}_{Low}, {a}_{High})`
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`_update(...)`
| :green:`# sample a batch from memory`
| [:math:`s, a, r, s', d`] :math:`\leftarrow` states, actions, rewards, next_states, dones of size :guilabel:`batch_size`
| :green:`# gradient steps`
| **FOR** each gradient step up to :guilabel:`gradient_steps` **DO**
| :green:`# compute target values`
| :math:`a' \leftarrow \mu_{\theta_{target}}(s')`
| :math:`Q_{_{target}} \leftarrow Q_{\phi_{target}}(s', a')`
| :math:`y \leftarrow r \;+` :guilabel:`discount_factor` :math:`\neg d \; Q_{_{target}}`
| :green:`# compute critic loss`
| :math:`Q \leftarrow Q_\phi(s, a)`
| :math:`L_{Q_\phi} \leftarrow \frac{1}{N} \sum_{i=1}^N (Q - y)^2`
| :green:`# optimization step (critic)`
| reset :math:`\text{optimizer}_\phi`
| :math:`\nabla_{\phi} L_{Q_\phi}`
| :math:`\text{clip}(\lVert \nabla_{\phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\phi`
| :green:`# compute policy (actor) loss`
| :math:`a \leftarrow \mu_\theta(s)`
| :math:`Q \leftarrow Q_\phi(s, a)`
| :math:`L_{\mu_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N Q`
| :green:`# optimization step (policy)`
| reset :math:`\text{optimizer}_\theta`
| :math:`\nabla_{\theta} L_{\mu_\theta}`
| :math:`\text{clip}(\lVert \nabla_{\theta} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_\theta`
| :green:`# update target networks`
| :math:`\theta_{target} \leftarrow` :guilabel:`polyak` :math:`\theta + (1 \;-` :guilabel:`polyak` :math:`) \theta_{target}`
| :math:`\phi_{target} \leftarrow` :guilabel:`polyak` :math:`\phi + (1 \;-` :guilabel:`polyak` :math:`) \phi_{target}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_\theta (\text{optimizer}_\theta)`
| step :math:`\text{scheduler}_\phi (\text{optimizer}_\phi)`
.. raw:: html
<br>
Usage
-----
.. note::
Support for recurrent neural networks (RNN, LSTM, GRU and any other variant) is implemented in a separate file (:literal:`ddpg_rnn.py`) to maintain the readability of the standard implementation (:literal:`ddpg.py`)
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-ddpg]
:end-before: [torch-end-ddpg]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [jax-start-ddpg]
:end-before: [jax-end-ddpg]
.. tab:: RNN implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. note::
When using recursive models it is necessary to override their :literal:`.get_specification()` method. Visit each model's documentation for more details
.. literalinclude:: ../../snippets/agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [torch-start-ddpg-rnn]
:end-before: [torch-end-ddpg-rnn]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. literalinclude:: ../../../../skrl/agents/torch/ddpg/ddpg.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 4 deterministic function approximators. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\mu_\theta(s)`
- Policy (actor)
- :literal:`"policy"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
* - :math:`\mu_{\theta_{target}}(s)`
- Target policy
- :literal:`"target_policy"`
- observation
- action
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_\phi(s, a)`
- Q-network (critic)
- :literal:`"critic"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
* - :math:`Q_{\phi_{target}}(s, a)`
- Target Q-network
- :literal:`"target_critic"`
- observation + action
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
* - RNN support
- RNN, LSTM, GRU and any other variant
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.agents.torch.ddpg.DDPG_DEFAULT_CONFIG
.. autoclass:: skrl.agents.torch.ddpg.DDPG
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. autoclass:: skrl.agents.torch.ddpg.DDPG_RNN
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.agents.jax.ddpg.DDPG_DEFAULT_CONFIG
.. autoclass:: skrl.agents.jax.ddpg.DDPG
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 8,064 | reStructuredText | 27.701068 | 235 | 0.56312 |
Toni-SM/skrl/docs/source/api/resources/noises.rst | Noises
======
.. toctree::
:hidden:
Gaussian noise <noises/gaussian>
Ornstein-Uhlenbeck <noises/ornstein_uhlenbeck>
Definition of the noises used by the agents during the exploration stage. All noises inherit from a base class that defines a uniform interface.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Noises
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Gaussian <noises/gaussian>` noise
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - :doc:`Ornstein-Uhlenbeck <noises/ornstein_uhlenbeck>` noise |_2|
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
Base class
----------
.. note::
This is the base class for all the other classes in this module.
It provides the basic functionality for the other classes.
**It is not intended to be used directly**.
.. raw:: html
<br>
Basic inheritance usage
^^^^^^^^^^^^^^^^^^^^^^^
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/noises.py
:language: python
:start-after: [start-base-class-torch]
:end-before: [end-base-class-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/noises.py
:language: python
:start-after: [start-base-class-jax]
:end-before: [end-base-class-jax]
.. raw:: html
<br>
API (PyTorch)
^^^^^^^^^^^^^
.. autoclass:: skrl.resources.noises.torch.base.Noise
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
^^^^^^^^^
.. autoclass:: skrl.resources.noises.jax.base.Noise
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
| 1,899 | reStructuredText | 20.111111 | 144 | 0.57346 |
Toni-SM/skrl/docs/source/api/resources/schedulers.rst | Learning rate schedulers
========================
.. toctree::
:hidden:
KL Adaptive <schedulers/kl_adaptive>
Learning rate schedulers are techniques that adjust the learning rate over time to improve the performance of the agent.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Learning rate schedulers
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`KL Adaptive <schedulers/kl_adaptive>`
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
|
**Implementation according to the ML framework:**
- **PyTorch**: The implemented schedulers inherit from the PyTorch :literal:`_LRScheduler` class. Visit `How to adjust learning rate <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_ in the PyTorch documentation for more details.
- **JAX**: The implemented schedulers must parameterize and return a function that maps step counts to values. Visit `Schedules <https://optax.readthedocs.io/en/latest/api.html#schedules>`_ in the Optax documentation for more details.
.. raw:: html
<br>
Usage
-----
The learning rate scheduler usage is defined in each agent's configuration dictionary. The scheduler class is set under the :literal:`"learning_rate_scheduler"` key and its arguments are set under the :literal:`"learning_rate_scheduler_kwargs"` key as a keyword argument dictionary, without specifying the optimizer (first argument).
The following examples show how to set the scheduler for an agent:
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. tab:: PyTorch scheduler
.. code-block:: python
:emphasize-lines: 2, 5-6
# import the scheduler class
from torch.optim.lr_scheduler import StepLR
cfg = DEFAULT_CONFIG.copy()
cfg["learning_rate_scheduler"] = StepLR
cfg["learning_rate_scheduler_kwargs"] = {"step_size": 1, "gamma": 0.9}
.. tab:: skrl scheduler
.. code-block:: python
:emphasize-lines: 2, 5-6
# import the scheduler class
from skrl.resources.schedulers.torch import KLAdaptiveLR
cfg = DEFAULT_CONFIG.copy()
cfg["learning_rate_scheduler"] = KLAdaptiveLR
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. tab:: JAX (Optax) scheduler
.. code-block:: python
:emphasize-lines: 2, 5-6
# import the scheduler function
from optax import constant_schedule
cfg = DEFAULT_CONFIG.copy()
cfg["learning_rate_scheduler"] = constant_schedule
cfg["learning_rate_scheduler_kwargs"] = {"value": 1e-4}
.. tab:: skrl scheduler
.. code-block:: python
:emphasize-lines: 2, 5-6
# import the scheduler class
from skrl.resources.schedulers.jax import KLAdaptiveLR # or kl_adaptive (Optax style)
cfg = DEFAULT_CONFIG.copy()
cfg["learning_rate_scheduler"] = KLAdaptiveLR
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
| 3,468 | reStructuredText | 33.346534 | 333 | 0.57872 |
Toni-SM/skrl/docs/source/api/resources/preprocessors.rst | Preprocessors
=============
.. toctree::
:hidden:
Running standard scaler <preprocessors/running_standard_scaler>
Preprocessors are functions used to transform or encode raw input data into a form more suitable for the learning algorithm.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Preprocessors
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Running standard scaler <preprocessors/running_standard_scaler>` |_4|
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
| 589 | reStructuredText | 23.583332 | 124 | 0.617997 |
Toni-SM/skrl/docs/source/api/resources/optimizers.rst | Optimizers
==========
.. toctree::
:hidden:
Adam <optimizers/adam>
Optimizers are algorithms that adjust the parameters of artificial neural networks to minimize the error or loss function during the training process.
.. raw:: html
<br><hr>
.. list-table::
:header-rows: 1
* - Optimizers
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - :doc:`Adam <optimizers/adam>`\ |_5| |_5| |_5| |_5| |_5| |_5| |_3|
- .. centered:: :math:`\scriptscriptstyle \texttt{PyTorch}`
- .. centered:: :math:`\blacksquare`
| 578 | reStructuredText | 23.124999 | 150 | 0.576125 |
Toni-SM/skrl/docs/source/api/resources/schedulers/kl_adaptive.rst | KL Adaptive
===========
Adjust the learning rate according to the value of the Kullback-Leibler (KL) divergence.
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
The learning rate (:math:`\eta`) at each step is modified as follows:
| **IF** :math:`\; KL >` :guilabel:`kl_factor` :guilabel:`kl_threshold` **THEN**
| :math:`\eta_{t + 1} = \max(\eta_t \,/` :guilabel:`lr_factor` :math:`,` :guilabel:`min_lr` :math:`)`
| **IF** :math:`\; KL <` :guilabel:`kl_threshold` :math:`/` :guilabel:`kl_factor` **THEN**
| :math:`\eta_{t + 1} = \min(` :guilabel:`lr_factor` :math:`\eta_t,` :guilabel:`max_lr` :math:`)`
.. raw:: html
<br>
Usage
-----
The learning rate scheduler usage is defined in each agent's configuration dictionary. The scheduler class is set under the :literal:`"learning_rate_scheduler"` key and its arguments are set under the :literal:`"learning_rate_scheduler_kwargs"` key as a keyword argument dictionary, without specifying the optimizer (first argument). The following examples show how to set the scheduler for an agent:
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. code-block:: python
:emphasize-lines: 2, 5-6
# import the scheduler class
from skrl.resources.schedulers.torch import KLAdaptiveLR
cfg = DEFAULT_CONFIG.copy()
cfg["learning_rate_scheduler"] = KLAdaptiveLR
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
.. group-tab:: |_4| |jax| |_4|
.. code-block:: python
:emphasize-lines: 2, 5-6
# import the scheduler class
from skrl.resources.schedulers.jax import KLAdaptiveLR # or kl_adaptive (Optax style)
cfg = DEFAULT_CONFIG.copy()
cfg["learning_rate_scheduler"] = KLAdaptiveLR
cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.01}
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.resources.schedulers.torch.kl_adaptive.KLAdaptiveLR
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.resources.schedulers.jax.kl_adaptive.KLAdaptiveLR
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
| 2,383 | reStructuredText | 25.786517 | 400 | 0.606379 |
Toni-SM/skrl/docs/source/api/resources/preprocessors/running_standard_scaler.rst | Running standard scaler
=======================
Standardize input features by removing the mean and scaling to unit variance.
.. raw:: html
<br><hr>
Algorithm
---------
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - mean (:math:`\bar{x}`), standard deviation (:math:`\sigma`), variance (:math:`\sigma^2`)
| - running mean (:math:`\bar{x}_t`), running variance (:math:`\sigma^2_t`)
|
**Standardization by centering and scaling**
| :math:`\text{clip}((x - \bar{x}_t) / (\sqrt{\sigma^2} \;+` :guilabel:`epsilon` :math:`), -c, c) \qquad` with :math:`c` as :guilabel:`clip_threshold`
|
**Scale back the data to the original representation (inverse transform)**
| :math:`\sqrt{\sigma^2_t} \; \text{clip}(x, -c, c) + \bar{x}_t \qquad` with :math:`c` as :guilabel:`clip_threshold`
|
**Update the running mean and variance** (See `parallel algorithm <https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm>`_)
| :math:`\delta \leftarrow x - \bar{x}_t`
| :math:`n_T \leftarrow n_t + n`
| :math:`M2 \leftarrow (\sigma^2_t n_t) + (\sigma^2 n) + \delta^2 \dfrac{n_t n}{n_T}`
| :green:`# update internal variables`
| :math:`\bar{x}_t \leftarrow \bar{x}_t + \delta \dfrac{n}{n_T}`
| :math:`\sigma^2_t \leftarrow \dfrac{M2}{n_T}`
| :math:`n_t \leftarrow n_T`
.. raw:: html
<br>
Usage
-----
The preprocessors usage is defined in each agent's configuration dictionary.
The preprocessor class is set under the :literal:`"<variable>_preprocessor"` key and its arguments are set under the :literal:`"<variable>_preprocessor_kwargs"` key as a keyword argument dictionary. The following examples show how to set the preprocessors for an agent:
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. code-block:: python
:emphasize-lines: 2, 5-8
# import the preprocessor class
from skrl.resources.preprocessors.torch import RunningStandardScaler
cfg = DEFAULT_CONFIG.copy()
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device}
.. group-tab:: |_4| |jax| |_4|
.. code-block:: python
:emphasize-lines: 2, 5-8
# import the preprocessor class
from skrl.resources.preprocessors.jax import RunningStandardScaler
cfg = DEFAULT_CONFIG.copy()
cfg["state_preprocessor"] = RunningStandardScaler
cfg["state_preprocessor_kwargs"] = {"size": env.observation_space}
cfg["value_preprocessor"] = RunningStandardScaler
cfg["value_preprocessor_kwargs"] = {"size": 1}
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.resources.preprocessors.torch.running_standard_scaler.RunningStandardScaler
:private-members: _parallel_variance, _get_space_size
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.resources.preprocessors.jax.running_standard_scaler.RunningStandardScaler
:undoc-members:
:private-members: _parallel_variance, _get_space_size
:members:
.. automethod:: __init__
.. automethod:: __call__
| 3,399 | reStructuredText | 29.088495 | 269 | 0.626949 |
Toni-SM/skrl/docs/source/api/resources/noises/ornstein_uhlenbeck.rst | .. _ornstein-uhlenbeck-noise:
Ornstein-Uhlenbeck noise
========================
Noise generated by a stochastic process that is characterized by its mean-reverting behavior.
.. raw:: html
<br><hr>
Usage
-----
The noise usage is defined in each agent's configuration dictionary. A noise instance is set under the :literal:`"noise"` sub-key. The following examples show how to set the noise for an agent:
|
.. image:: ../../../_static/imgs/noise_ornstein_uhlenbeck.png
:width: 75%
:align: center
:alt: Ornstein-Uhlenbeck noise
.. raw:: html
<br><br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../../snippets/noises.py
:language: python
:emphasize-lines: 1, 4
:start-after: [torch-start-ornstein-uhlenbeck]
:end-before: [torch-end-ornstein-uhlenbeck]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../../snippets/noises.py
:language: python
:emphasize-lines: 1, 4
:start-after: [jax-start-ornstein-uhlenbeck]
:end-before: [jax-end-ornstein-uhlenbeck]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.resources.noises.torch.ornstein_uhlenbeck.OrnsteinUhlenbeckNoise
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.resources.noises.jax.ornstein_uhlenbeck.OrnsteinUhlenbeckNoise
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update
:members:
.. automethod:: __init__
| 1,687 | reStructuredText | 20.922078 | 193 | 0.605216 |
Toni-SM/skrl/docs/source/api/resources/noises/gaussian.rst | .. _gaussian-noise:
Gaussian noise
==============
Noise generated by normal distribution.
.. raw:: html
<br><hr>
Usage
-----
The noise usage is defined in each agent's configuration dictionary. A noise instance is set under the :literal:`"noise"` sub-key. The following examples show how to set the noise for an agent:
|
.. image:: ../../../_static/imgs/noise_gaussian.png
:width: 75%
:align: center
:alt: Gaussian noise
.. raw:: html
<br><br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../../snippets/noises.py
:language: python
:emphasize-lines: 1, 4
:start-after: [torch-start-gaussian]
:end-before: [torch-end-gaussian]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../../snippets/noises.py
:language: python
:emphasize-lines: 1, 4
:start-after: [jax-start-gaussian]
:end-before: [jax-end-gaussian]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.resources.noises.torch.gaussian.GaussianNoise
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.resources.noises.jax.gaussian.GaussianNoise
:undoc-members:
:show-inheritance:
:inherited-members:
:private-members: _update
:members:
.. automethod:: __init__
| 1,505 | reStructuredText | 18.558441 | 193 | 0.576744 |
Toni-SM/skrl/docs/source/api/resources/optimizers/adam.rst | Adam
====
An extension of the stochastic gradient descent algorithm that adaptively changes the learning rate for each neural network parameter.
.. raw:: html
<br><hr>
Usage
-----
.. note::
This class is the result of isolating the Optax optimizer that is mixed with the model parameters, as defined in the `Flax's TrainState <https://flax.readthedocs.io/en/latest/api_reference/flax.training.html#train-state>`_ class. It is not intended to be used directly by the user, but by agent implementations.
.. tabs::
.. group-tab:: |_4| |jax| |_4|
.. code-block:: python
:emphasize-lines: 2, 5, 8
# import the optimizer class
from skrl.resources.optimizers.jax import Adam
# instantiate the optimizer
optimizer = Adam(model=model, lr=1e-3)
# step the optimizer
optimizer = optimizer.step(grad, model)
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.resources.optimizers.jax.adam.Adam
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __new__
| 1,104 | reStructuredText | 23.021739 | 315 | 0.639493 |
Toni-SM/skrl/docs/source/api/trainers/step.rst | Step trainer
============
Train agents controlling the training/evaluation loop step-by-step.
.. raw:: html
<br><hr>
Concept
-------
.. image:: ../../_static/imgs/manual_trainer-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Step-by-step trainer
.. image:: ../../_static/imgs/manual_trainer-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Step-by-step trainer
.. raw:: html
<br>
Usage
-----
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [pytorch-start-step]
:end-before: [pytorch-end-step]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [jax-start-step]
:end-before: [jax-end-step]
.. raw:: html
<br>
Configuration
-------------
.. literalinclude:: ../../../../skrl/trainers/torch/step.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.trainers.torch.step.STEP_TRAINER_DEFAULT_CONFIG
.. autoclass:: skrl.trainers.torch.step.StepTrainer
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.trainers.jax.step.STEP_TRAINER_DEFAULT_CONFIG
.. autoclass:: skrl.trainers.jax.step.StepTrainer
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
| 1,684 | reStructuredText | 17.118279 | 67 | 0.574822 |
Toni-SM/skrl/docs/source/api/trainers/sequential.rst | Sequential trainer
==================
Train agents sequentially (i.e., one after the other in each interaction with the environment).
.. raw:: html
<br><hr>
Concept
-------
.. image:: ../../_static/imgs/sequential_trainer-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Sequential trainer
.. image:: ../../_static/imgs/sequential_trainer-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Sequential trainer
.. raw:: html
<br>
Usage
-----
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [pytorch-start-sequential]
:end-before: [pytorch-end-sequential]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [jax-start-sequential]
:end-before: [jax-end-sequential]
.. raw:: html
<br>
Configuration
-------------
.. literalinclude:: ../../../../skrl/trainers/torch/sequential.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.trainers.torch.sequential.SEQUENTIAL_TRAINER_DEFAULT_CONFIG
.. autoclass:: skrl.trainers.torch.sequential.SequentialTrainer
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.trainers.jax.sequential.SEQUENTIAL_TRAINER_DEFAULT_CONFIG
.. autoclass:: skrl.trainers.jax.sequential.SequentialTrainer
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
| 1,806 | reStructuredText | 18.430107 | 95 | 0.597453 |
Toni-SM/skrl/docs/source/api/trainers/manual.rst | Manual training
===============
Train agents by manually controlling the training/evaluation loop.
.. raw:: html
<br><hr>
Concept
-------
.. image:: ../../_static/imgs/manual_trainer-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Manual trainer
.. image:: ../../_static/imgs/manual_trainer-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Manual trainer
.. raw:: html
<br>
Usage
-----
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: Training
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [pytorch-start-manual-training]
:end-before: [pytorch-end-manual-training]
.. group-tab:: Evaluation
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [pytorch-start-manual-evaluation]
:end-before: [pytorch-end-manual-evaluation]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: Training
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [jax-start-manual-training]
:end-before: [jax-end-manual-training]
.. group-tab:: Evaluation
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [jax-start-manual-evaluation]
:end-before: [jax-end-manual-evaluation]
.. raw:: html
<br>
| 1,676 | reStructuredText | 21.972602 | 67 | 0.5 |
Toni-SM/skrl/docs/source/api/trainers/parallel.rst | Parallel trainer
================
Train agents in parallel using multiple processes.
.. raw:: html
<br><hr>
Concept
-------
.. image:: ../../_static/imgs/parallel_trainer-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Parallel trainer
.. image:: ../../_static/imgs/parallel_trainer-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Parallel trainer
.. raw:: html
<br>
Usage
-----
.. note::
Each process adds a GPU memory overhead (~1GB, although it can be much higher) due to PyTorch's CUDA kernels. See PyTorch `Issue #12873 <https://github.com/pytorch/pytorch/issues/12873>`_ for more details
.. note::
At the moment, only simultaneous training and evaluation of agents with local memory (no memory sharing) is implemented
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/trainer.py
:language: python
:start-after: [pytorch-start-parallel]
:end-before: [pytorch-end-parallel]
.. raw:: html
<br>
Configuration
-------------
.. literalinclude:: ../../../../skrl/trainers/torch/parallel.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.trainers.torch.parallel.PARALLEL_TRAINER_DEFAULT_CONFIG
.. autoclass:: skrl.trainers.torch.parallel.ParallelTrainer
:undoc-members:
:show-inheritance:
:inherited-members:
:members:
.. automethod:: __init__
| 1,577 | reStructuredText | 19.493506 | 208 | 0.622701 |
Toni-SM/skrl/docs/source/api/models/categorical.rst | .. _models_categorical:
Categorical model
=================
Categorical models run **discrete-domain stochastic** policies.
.. raw:: html
<br><hr>
skrl provides a Python mixin (:literal:`CategoricalMixin`) to assist in the creation of these types of models, allowing users to have full control over the function approximator definitions and architectures. Note that the use of this mixin must comply with the following rules:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
.. warning::
For models in JAX/Flax it is imperative to define all parameters (except ``observation_space``, ``action_space`` and ``device``) with default values to avoid errors (``TypeError: __init__() missing N required positional argument``) during initialization.
In addition, it is necessary to initialize the model's ``state_dict`` (via the ``init_state_dict`` method) after its instantiation to avoid errors (``AttributeError: object has no attribute "state_dict". If "state_dict" is defined in '.setup()', remember these fields are only accessible from inside 'init' or 'apply'``) during its use.
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-torch]
:end-before: [end-definition-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-jax]
:end-before: [end-definition-jax]
.. raw:: html
<br>
Concept
-------
.. image:: ../../_static/imgs/model_categorical-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Categorical model
.. image:: ../../_static/imgs/model_categorical-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Categorical model
.. raw:: html
<br>
Usage
-----
* Multi-Layer Perceptron (**MLP**)
* Convolutional Neural Network (**CNN**)
* Recurrent Neural Network (**RNN**)
* Gated Recurrent Unit RNN (**GRU**)
* Long Short-Term Memory RNN (**LSTM**)
.. tabs::
.. tab:: MLP
.. image:: ../../_static/imgs/model_categorical_mlp-light.svg
:width: 40%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_mlp-dark.svg
:width: 40%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-mlp-sequential-torch]
:end-before: [end-mlp-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-mlp-functional-torch]
:end-before: [end-mlp-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-mlp-setup-jax]
:end-before: [end-mlp-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-mlp-compact-jax]
:end-before: [end-mlp-compact-jax]
.. tab:: CNN
.. image:: ../../_static/imgs/model_categorical_cnn-light.svg
:width: 100%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_cnn-dark.svg
:width: 100%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-cnn-sequential-torch]
:end-before: [end-cnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-cnn-functional-torch]
:end-before: [end-cnn-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-cnn-setup-jax]
:end-before: [end-cnn-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-cnn-compact-jax]
:end-before: [end-cnn-compact-jax]
.. tab:: RNN
.. image:: ../../_static/imgs/model_categorical_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-rnn-sequential-torch]
:end-before: [end-rnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-rnn-functional-torch]
:end-before: [end-rnn-functional-torch]
.. tab:: GRU
.. image:: ../../_static/imgs/model_categorical_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-gru-sequential-torch]
:end-before: [end-gru-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-gru-functional-torch]
:end-before: [end-gru-functional-torch]
.. tab:: LSTM
.. image:: ../../_static/imgs/model_categorical_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{cell} ={} & \text{hidden_size} \\
H_{out} ={} & \text{proj_size if } \text{proj_size}>0 \text{ otherwise hidden_size} \\
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden/cell states
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden/cell states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden/cell states
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-lstm-sequential-torch]
:end-before: [end-lstm-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/categorical_model.py
:language: python
:start-after: [start-lstm-functional-torch]
:end-before: [end-lstm-functional-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.models.torch.categorical.CategoricalMixin
:show-inheritance:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.models.jax.categorical.CategoricalMixin
:show-inheritance:
:members:
.. automethod:: __init__
| 14,050 | reStructuredText | 33.952736 | 340 | 0.519288 |
Toni-SM/skrl/docs/source/api/models/multivariate_gaussian.rst | .. _models_multivariate_gaussian:
Multivariate Gaussian model
===========================
Multivariate Gaussian models run **continuous-domain stochastic** policies.
.. raw:: html
<br><hr>
skrl provides a Python mixin (:literal:`MultivariateGaussianMixin`) to assist in the creation of these types of models, allowing users to have full control over the function approximator definitions and architectures. Note that the use of this mixin must comply with the following rules:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:emphasize-lines: 1, 4-5
:start-after: [start-definition-torch]
:end-before: [end-definition-torch]
.. raw:: html
<br>
Concept
-------
.. image:: ../../_static/imgs/model_multivariate_gaussian-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Multivariate Gaussian model
.. image:: ../../_static/imgs/model_multivariate_gaussian-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Multivariate Gaussian model
.. raw:: html
<br>
Usage
-----
* Multi-Layer Perceptron (**MLP**)
* Convolutional Neural Network (**CNN**)
* Recurrent Neural Network (**RNN**)
* Gated Recurrent Unit RNN (**GRU**)
* Long Short-Term Memory RNN (**LSTM**)
.. tabs::
.. tab:: MLP
.. image:: ../../_static/imgs/model_gaussian_mlp-light.svg
:width: 42%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_mlp-dark.svg
:width: 42%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-mlp-sequential-torch]
:end-before: [end-mlp-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-mlp-functional-torch]
:end-before: [end-mlp-functional-torch]
.. tab:: CNN
.. image:: ../../_static/imgs/model_gaussian_cnn-light.svg
:width: 100%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_cnn-dark.svg
:width: 100%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-cnn-sequential-torch]
:end-before: [end-cnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-cnn-functional-torch]
:end-before: [end-cnn-functional-torch]
.. tab:: RNN
.. image:: ../../_static/imgs/model_gaussian_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-rnn-sequential-torch]
:end-before: [end-rnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-rnn-functional-torch]
:end-before: [end-rnn-functional-torch]
.. tab:: GRU
.. image:: ../../_static/imgs/model_gaussian_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-gru-sequential-torch]
:end-before: [end-gru-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-gru-functional-torch]
:end-before: [end-gru-functional-torch]
.. tab:: LSTM
.. image:: ../../_static/imgs/model_gaussian_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{cell} ={} & \text{hidden_size} \\
H_{out} ={} & \text{proj_size if } \text{proj_size}>0 \text{ otherwise hidden_size} \\
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden/cell states
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden/cell states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden/cell states
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-lstm-sequential-torch]
:end-before: [end-lstm-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multivariate_gaussian_model.py
:language: python
:start-after: [start-lstm-functional-torch]
:end-before: [end-lstm-functional-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.models.torch.multivariate_gaussian.MultivariateGaussianMixin
:show-inheritance:
:members:
.. automethod:: __init__
| 11,833 | reStructuredText | 33.908554 | 295 | 0.527339 |
Toni-SM/skrl/docs/source/api/models/multicategorical.rst | .. _models_multicategorical:
Multi-Categorical model
=======================
Multi-Categorical models run **discrete-domain stochastic** policies.
.. raw:: html
<br><hr>
skrl provides a Python mixin (:literal:`MultiCategoricalMixin`) to assist in the creation of these types of models, allowing users to have full control over the function approximator definitions and architectures. Note that the use of this mixin must comply with the following rules:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
.. warning::
For models in JAX/Flax it is imperative to define all parameters (except ``observation_space``, ``action_space`` and ``device``) with default values to avoid errors (``TypeError: __init__() missing N required positional argument``) during initialization.
In addition, it is necessary to initialize the model's ``state_dict`` (via the ``init_state_dict`` method) after its instantiation to avoid errors (``AttributeError: object has no attribute "state_dict". If "state_dict" is defined in '.setup()', remember these fields are only accessible from inside 'init' or 'apply'``) during its use.
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-torch]
:end-before: [end-definition-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-jax]
:end-before: [end-definition-jax]
.. raw:: html
<br>
Concept
-------
.. image:: ../../_static/imgs/model_multicategorical-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Multi-Categorical model
.. image:: ../../_static/imgs/model_multicategorical-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Multi-Categorical model
.. raw:: html
<br>
Usage
-----
* Multi-Layer Perceptron (**MLP**)
* Convolutional Neural Network (**CNN**)
* Recurrent Neural Network (**RNN**)
* Gated Recurrent Unit RNN (**GRU**)
* Long Short-Term Memory RNN (**LSTM**)
.. tabs::
.. tab:: MLP
.. image:: ../../_static/imgs/model_categorical_mlp-light.svg
:width: 40%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_mlp-dark.svg
:width: 40%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-mlp-sequential-torch]
:end-before: [end-mlp-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-mlp-functional-torch]
:end-before: [end-mlp-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-mlp-setup-jax]
:end-before: [end-mlp-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-mlp-compact-jax]
:end-before: [end-mlp-compact-jax]
.. tab:: CNN
.. image:: ../../_static/imgs/model_categorical_cnn-light.svg
:width: 100%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_cnn-dark.svg
:width: 100%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-cnn-sequential-torch]
:end-before: [end-cnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-cnn-functional-torch]
:end-before: [end-cnn-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-cnn-setup-jax]
:end-before: [end-cnn-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-cnn-compact-jax]
:end-before: [end-cnn-compact-jax]
.. tab:: RNN
.. image:: ../../_static/imgs/model_categorical_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-rnn-sequential-torch]
:end-before: [end-rnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-rnn-functional-torch]
:end-before: [end-rnn-functional-torch]
.. tab:: GRU
.. image:: ../../_static/imgs/model_categorical_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-gru-sequential-torch]
:end-before: [end-gru-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-gru-functional-torch]
:end-before: [end-gru-functional-torch]
.. tab:: LSTM
.. image:: ../../_static/imgs/model_categorical_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_categorical_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{cell} ={} & \text{hidden_size} \\
H_{out} ={} & \text{proj_size if } \text{proj_size}>0 \text{ otherwise hidden_size} \\
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden/cell states
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden/cell states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden/cell states
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-lstm-sequential-torch]
:end-before: [end-lstm-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/multicategorical_model.py
:language: python
:start-after: [start-lstm-functional-torch]
:end-before: [end-lstm-functional-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.models.torch.multicategorical.MultiCategoricalMixin
:show-inheritance:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.models.jax.multicategorical.MultiCategoricalMixin
:show-inheritance:
:members:
.. automethod:: __init__
| 14,200 | reStructuredText | 34.325871 | 340 | 0.523662 |
Toni-SM/skrl/docs/source/api/models/tabular.rst | .. _models_tabular:
Tabular model
=============
Tabular models run **discrete-domain deterministic/stochastic** policies.
.. raw:: html
<br><hr>
skrl provides a Python mixin (:literal:`TabularMixin`) to assist in the creation of these types of models, allowing users to have full control over the table definitions. Note that the use of this mixin must comply with the following rules:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/tabular_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-torch]
:end-before: [end-definition-torch]
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: :math:`\epsilon`-greedy policy
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/tabular_model.py
:language: python
:start-after: [start-epsilon-greedy-torch]
:end-before: [end-epsilon-greedy-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.models.torch.tabular.TabularMixin
:show-inheritance:
:exclude-members: to, state_dict, load_state_dict, load, save
:members:
.. automethod:: __init__
| 1,538 | reStructuredText | 24.229508 | 240 | 0.608583 |
Toni-SM/skrl/docs/source/api/models/deterministic.rst | .. _models_deterministic:
Deterministic model
===================
Deterministic models run **continuous-domain deterministic** policies.
.. raw:: html
<br><hr>
skrl provides a Python mixin (:literal:`DeterministicMixin`) to assist in the creation of these types of models, allowing users to have full control over the function approximator definitions and architectures. Note that the use of this mixin must comply with the following rules:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
.. warning::
For models in JAX/Flax it is imperative to define all parameters (except ``observation_space``, ``action_space`` and ``device``) with default values to avoid errors (``TypeError: __init__() missing N required positional argument``) during initialization.
In addition, it is necessary to initialize the model's ``state_dict`` (via the ``init_state_dict`` method) after its instantiation to avoid errors (``AttributeError: object has no attribute "state_dict". If "state_dict" is defined in '.setup()', remember these fields are only accessible from inside 'init' or 'apply'``) during its use.
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-torch]
:end-before: [end-definition-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:emphasize-lines: 1, 3-4
:start-after: [start-definition-jax]
:end-before: [end-definition-jax]
.. raw:: html
<br>
Concept
-------
.. image:: ../../_static/imgs/model_deterministic-light.svg
:width: 65%
:align: center
:class: only-light
:alt: Deterministic model
.. image:: ../../_static/imgs/model_deterministic-dark.svg
:width: 65%
:align: center
:class: only-dark
:alt: Deterministic model
.. raw:: html
<br>
Usage
-----
* Multi-Layer Perceptron (**MLP**)
* Convolutional Neural Network (**CNN**)
* Recurrent Neural Network (**RNN**)
* Gated Recurrent Unit RNN (**GRU**)
* Long Short-Term Memory RNN (**LSTM**)
.. tabs::
.. tab:: MLP
.. image:: ../../_static/imgs/model_deterministic_mlp-light.svg
:width: 35%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_deterministic_mlp-dark.svg
:width: 35%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-mlp-sequential-torch]
:end-before: [end-mlp-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-mlp-functional-torch]
:end-before: [end-mlp-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-mlp-setup-jax]
:end-before: [end-mlp-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-mlp-compact-jax]
:end-before: [end-mlp-compact-jax]
.. tab:: CNN
.. image:: ../../_static/imgs/model_deterministic_cnn-light.svg
:width: 100%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_deterministic_cnn-dark.svg
:width: 100%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-cnn-sequential-torch]
:end-before: [end-cnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-cnn-functional-torch]
:end-before: [end-cnn-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-cnn-setup-jax]
:end-before: [end-cnn-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-cnn-compact-jax]
:end-before: [end-cnn-compact-jax]
.. tab:: RNN
.. image:: ../../_static/imgs/model_deterministic_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_deterministic_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-rnn-sequential-torch]
:end-before: [end-rnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-rnn-functional-torch]
:end-before: [end-rnn-functional-torch]
.. tab:: GRU
.. image:: ../../_static/imgs/model_deterministic_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_deterministic_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-gru-sequential-torch]
:end-before: [end-gru-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-gru-functional-torch]
:end-before: [end-gru-functional-torch]
.. tab:: LSTM
.. image:: ../../_static/imgs/model_deterministic_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_deterministic_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{cell} ={} & \text{hidden_size} \\
H_{out} ={} & \text{proj_size if } \text{proj_size}>0 \text{ otherwise hidden_size} \\
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden/cell states
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden/cell states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden/cell states
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-lstm-sequential-torch]
:end-before: [end-lstm-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/deterministic_model.py
:language: python
:start-after: [start-lstm-functional-torch]
:end-before: [end-lstm-functional-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.models.torch.deterministic.DeterministicMixin
:show-inheritance:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.models.jax.deterministic.DeterministicMixin
:show-inheritance:
:members:
.. automethod:: __init__
| 14,131 | reStructuredText | 34.154229 | 340 | 0.521902 |
Toni-SM/skrl/docs/source/api/models/shared_model.rst | Shared model
============
Sometimes it is desirable to define models that use shared layers or network to represent multiple function approximators. This practice, known as *shared parameters* (or *parameter sharing*), *shared layers*, *shared model*, *shared networks* or *joint architecture* among others, is typically justified by the following criteria:
* Learning the same characteristics, especially when processing large inputs (such as images, e.g.).
* Reduce the number of parameters in the whole system.
* Make the computation more efficient.
.. raw:: html
<br><hr>
Implementation
--------------
By combining the implemented mixins, it is possible to define shared models with skrl. In these cases, the use of the :literal:`role` argument (a Python string) is relevant. The agents will call the models by setting the :literal:`role` argument according to their requirements. Visit each agent's documentation (*Key* column of the table under *Spaces and models* section) to know the possible values that this parameter can take.
The code snippet below shows how to define a shared model. The following practices for building shared models can be identified:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
* All mixin constructors must be invoked.
* Specify :literal:`role` argument is optional if all constructors belong to different mixins.
* If multiple models of the same mixin type are required, the same constructor must be invoked as many times as needed. To do so, it is mandatory to specify the :literal:`role` argument.
* The :literal:`.act(...)` method needs to be overridden to disambiguate its call.
* The same instance of the shared model must be passed to all keys involved.
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/shared_model.py
:language: python
:start-after: [start-mlp-torch]
:end-before: [end-mlp-torch]
| 2,156 | reStructuredText | 43.020407 | 431 | 0.731447 |
Toni-SM/skrl/docs/source/api/models/gaussian.rst | .. _models_gaussian:
Gaussian model
==============
Gaussian models run **continuous-domain stochastic** policies.
.. raw:: html
<br><hr>
skrl provides a Python mixin (:literal:`GaussianMixin`) to assist in the creation of these types of models, allowing users to have full control over the function approximator definitions and architectures. Note that the use of this mixin must comply with the following rules:
* The definition of multiple inheritance must always include the :ref:`Model <models_base_class>` base class at the end.
* The :ref:`Model <models_base_class>` base class constructor must be invoked before the mixins constructor.
.. warning::
For models in JAX/Flax it is imperative to define all parameters (except ``observation_space``, ``action_space`` and ``device``) with default values to avoid errors (``TypeError: __init__() missing N required positional argument``) during initialization.
In addition, it is necessary to initialize the model's ``state_dict`` (via the ``init_state_dict`` method) after its instantiation to avoid errors (``AttributeError: object has no attribute "state_dict". If "state_dict" is defined in '.setup()', remember these fields are only accessible from inside 'init' or 'apply'``) during its use.
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:emphasize-lines: 1, 4-5
:start-after: [start-definition-torch]
:end-before: [end-definition-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:emphasize-lines: 1, 4-5
:start-after: [start-definition-jax]
:end-before: [end-definition-jax]
.. raw:: html
<br>
Concept
-------
.. image:: ../../_static/imgs/model_gaussian-light.svg
:width: 100%
:align: center
:class: only-light
:alt: Gaussian model
.. image:: ../../_static/imgs/model_gaussian-dark.svg
:width: 100%
:align: center
:class: only-dark
:alt: Gaussian model
.. raw:: html
<br>
Usage
-----
* Multi-Layer Perceptron (**MLP**)
* Convolutional Neural Network (**CNN**)
* Recurrent Neural Network (**RNN**)
* Gated Recurrent Unit RNN (**GRU**)
* Long Short-Term Memory RNN (**LSTM**)
.. tabs::
.. tab:: MLP
.. image:: ../../_static/imgs/model_gaussian_mlp-light.svg
:width: 42%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_mlp-dark.svg
:width: 42%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-mlp-sequential-torch]
:end-before: [end-mlp-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-mlp-functional-torch]
:end-before: [end-mlp-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-mlp-setup-jax]
:end-before: [end-mlp-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-mlp-compact-jax]
:end-before: [end-mlp-compact-jax]
.. tab:: CNN
.. image:: ../../_static/imgs/model_gaussian_cnn-light.svg
:width: 100%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_cnn-dark.svg
:width: 100%
:align: center
:class: only-dark
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-cnn-sequential-torch]
:end-before: [end-cnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-cnn-functional-torch]
:end-before: [end-cnn-functional-torch]
.. group-tab:: |_4| |jax| |_4|
.. tabs::
.. group-tab:: setup-style
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-cnn-setup-jax]
:end-before: [end-cnn-setup-jax]
.. group-tab:: compact-style
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-cnn-compact-jax]
:end-before: [end-cnn-compact-jax]
.. tab:: RNN
.. image:: ../../_static/imgs/model_gaussian_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-rnn-sequential-torch]
:end-before: [end-rnn-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-rnn-functional-torch]
:end-before: [end-rnn-functional-torch]
.. tab:: GRU
.. image:: ../../_static/imgs/model_gaussian_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{out} ={} & \text{hidden_size}
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden state
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden state
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-gru-sequential-torch]
:end-before: [end-gru-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-gru-functional-torch]
:end-before: [end-gru-functional-torch]
.. tab:: LSTM
.. image:: ../../_static/imgs/model_gaussian_rnn-light.svg
:width: 90%
:align: center
:class: only-light
.. image:: ../../_static/imgs/model_gaussian_rnn-dark.svg
:width: 90%
:align: center
:class: only-dark
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input_size} \\
H_{cell} ={} & \text{hidden_size} \\
H_{out} ={} & \text{proj_size if } \text{proj_size}>0 \text{ otherwise hidden_size} \\
\end{aligned}
.. raw:: html
<hr>
The following points are relevant in the definition of recurrent models:
* The ``.get_specification()`` method must be overwritten to return, under a dictionary key ``"rnn"``, a sub-dictionary that includes the sequence length (under key ``"sequence_length"``) as a number and a list of the dimensions (under key ``"sizes"``) of each initial hidden/cell states
* The ``.compute()`` method's ``inputs`` parameter will have, at least, the following items in the dictionary:
* ``"states"``: state of the environment used to make the decision
* ``"taken_actions"``: actions taken by the policy for the given states, if applicable
* ``"terminated"``: episode termination status for sampled environment transitions. This key is only defined during the training process
* ``"rnn"``: list of initial hidden/cell states ordered according to the model specification
* The ``.compute()`` method must include, under the ``"rnn"`` key of the returned dictionary, a list of each final hidden/cell states
.. raw:: html
<br>
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. tabs::
.. group-tab:: nn.Sequential
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-lstm-sequential-torch]
:end-before: [end-lstm-sequential-torch]
.. group-tab:: nn.functional
.. literalinclude:: ../../snippets/gaussian_model.py
:language: python
:start-after: [start-lstm-functional-torch]
:end-before: [end-lstm-functional-torch]
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.models.torch.gaussian.GaussianMixin
:show-inheritance:
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.models.jax.gaussian.GaussianMixin
:show-inheritance:
:members:
.. automethod:: __init__
| 13,935 | reStructuredText | 33.666667 | 340 | 0.515536 |
Toni-SM/skrl/docs/source/api/multi_agents/mappo.rst | Multi-Agent Proximal Policy Optimization (MAPPO)
================================================
MAPPO is a **model-free**, **stochastic** **on-policy** **policy gradient** CTDE (centralized training, decentralized execution) **multi-agent** algorithm that uses a centralized value function to estimate a single value that is used to guide the policy updates of all agents, improving coordination and cooperation between them
Paper: `The Surprising Effectiveness of PPO in Cooperative, Multi-Agent Games <https://arxiv.org/abs/2103.01955>`_
.. raw:: html
<br><hr>
Algorithm
---------
| For each iteration do:
| For each agent do:
| :math:`\bullet \;` Collect, in a rollout memory, a set of states :math:`s`, actions :math:`a`, rewards :math:`r`, dones :math:`d`, log probabilities :math:`logp` and values :math:`V` on policy using :math:`\pi_\theta` and :math:`V_\phi`
| :math:`\bullet \;` Estimate returns :math:`R` and advantages :math:`A` using Generalized Advantage Estimation (GAE(:math:`\lambda`)) from the collected data [:math:`r, d, V`]
| :math:`\bullet \;` Compute the entropy loss :math:`{L}_{entropy}`
| :math:`\bullet \;` Compute the clipped surrogate objective (policy loss) with :math:`ratio` as the probability ratio between the action under the current policy and the action under the previous policy: :math:`L^{clip}_{\pi_\theta} = \mathbb{E}[\min(A \; ratio, A \; \text{clip}(ratio, 1-c, 1+c))]`
| :math:`\bullet \;` Compute the value loss :math:`L_{V_\phi}` as the mean squared error (MSE) between the predicted values :math:`V_{_{predicted}}` and the estimated returns :math:`R`
| :math:`\bullet \;` Optimize the total loss :math:`L = L^{clip}_{\pi_\theta} - c_1 \, L_{V_\phi} + c_2 \, {L}_{entropy}`
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`), value function approximator (:math:`V_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - shared states (:math:`s_{_{shared}}`), shared next states (:math:`s'_{_{shared}}`)
| - values (:math:`V`), advantages (:math:`A`), returns (:math:`R`)
| - log probabilities (:math:`logp`)
| - loss (:math:`L`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V_{_{last}}') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| **IF** :math:`i` is not the last row of :math:`r` **THEN**
| :math:`V_i' = V_{i+1}`
| **ELSE**
| :math:`V_i' \leftarrow V_{_{last}}'`
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`\neg d_i \; (V_i' \, -` :guilabel:`lambda` :math:`adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`_update(...)`
| **FOR** each agent **DO**
| :green:`# compute returns and advantages`
| :math:`V_{_{last}}' \leftarrow V_\phi(s'_{_{shared}})`
| :math:`R, A \leftarrow f_{GAE}(r, d, V, V_{_{last}}')`
| :green:`# sample mini-batches from memory`
| [[:math:`s, a, logp, V, R, A`]] :math:`\leftarrow` states, actions, log_prob, values, returns, advantages
| :green:`# learning epochs`
| **FOR** each learning epoch up to :guilabel:`learning_epochs` **DO**
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, a, logp, V, R, A`] up to :guilabel:`mini_batches` **DO**
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :green:`# compute approximate KL divergence`
| :math:`ratio \leftarrow logp' - logp`
| :math:`KL_{_{divergence}} \leftarrow \frac{1}{N} \sum_{i=1}^N ((e^{ratio} - 1) - ratio)`
| :green:`# early stopping with KL divergence`
| **IF** :math:`KL_{_{divergence}} >` :guilabel:`kl_threshold` **THEN**
| **BREAK LOOP**
| :green:`# compute entropy loss`
| **IF** entropy computation is enabled **THEN**
| :math:`{L}_{entropy} \leftarrow \, -` :guilabel:`entropy_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N \pi_{\theta_{entropy}}`
| **ELSE**
| :math:`{L}_{entropy} \leftarrow 0`
| :green:`# compute policy loss`
| :math:`ratio \leftarrow e^{logp' - logp}`
| :math:`L_{_{surrogate}} \leftarrow A \; ratio`
| :math:`L_{_{clipped\,surrogate}} \leftarrow A \; \text{clip}(ratio, 1 - c, 1 + c) \qquad` with :math:`c` as :guilabel:`ratio_clip`
| :math:`L^{clip}_{\pi_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N \min(L_{_{surrogate}}, L_{_{clipped\,surrogate}})`
| :green:`# compute value loss`
| :math:`V_{_{predicted}} \leftarrow V_\phi(s_{_{shared}})`
| **IF** :guilabel:`clip_predicted_values` is enabled **THEN**
| :math:`V_{_{predicted}} \leftarrow V + \text{clip}(V_{_{predicted}} - V, -c, c) \qquad` with :math:`c` as :guilabel:`value_clip`
| :math:`L_{V_\phi} \leftarrow` :guilabel:`value_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N (R - V_{_{predicted}})^2`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_{\theta, \phi}`
| :math:`\nabla_{\theta, \, \phi} (L^{clip}_{\pi_\theta} + {L}_{entropy} + L_{V_\phi})`
| :math:`\text{clip}(\lVert \nabla_{\theta, \, \phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_{\theta, \phi}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_{\theta, \phi} (\text{optimizer}_{\theta, \phi})`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/multi_agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [start-mappo-torch]
:end-before: [end-mappo-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/multi_agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [start-mappo-jax]
:end-before: [end-mappo-jax]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
The specification of a single value is automatically extended to all involved agents, unless the configuration of each individual agent is specified using a dictionary. For example:
.. code-block:: python
# specify a configuration value for each agent (agent names depend on environment)
cfg["discount_factor"] = {"agent_0": 0.99, "agent_1": 0.995, "agent_2": 0.985}
.. literalinclude:: ../../../../skrl/multi_agents/torch/mappo/mappo.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic (discrete or continuous) and 1 deterministic function approximator. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Categorical <models_categorical>` /
|br| :ref:`Multi-Categorical <models_multicategorical>` /
|br| :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- for Policy and Value
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - RNN support
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.multi_agents.torch.mappo.MAPPO_DEFAULT_CONFIG
.. autoclass:: skrl.multi_agents.torch.mappo.MAPPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.multi_agents.jax.mappo.MAPPO_DEFAULT_CONFIG
.. autoclass:: skrl.multi_agents.jax.mappo.MAPPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 10,248 | reStructuredText | 36 | 328 | 0.561378 |
Toni-SM/skrl/docs/source/api/multi_agents/ippo.rst | Independent Proximal Policy Optimization (IPPO)
===============================================
IPPO is a **model-free**, **stochastic** **on-policy** **policy gradient** DTDE (decentralized training, decentralized execution) **multi-agent** algorithm in which each agent learns independently using its own local observations of the environment and has its own independent critic network to estimate the value function
Paper: `Is Independent Learning All You Need in the StarCraft Multi-Agent Challenge? <https://arxiv.org/abs/2011.09533>`_
.. raw:: html
<br><hr>
Algorithm
---------
| For each iteration do:
| For each agent do:
| :math:`\bullet \;` Collect, in a rollout memory, a set of states :math:`s`, actions :math:`a`, rewards :math:`r`, dones :math:`d`, log probabilities :math:`logp` and values :math:`V` on policy using :math:`\pi_\theta` and :math:`V_\phi`
| :math:`\bullet \;` Estimate returns :math:`R` and advantages :math:`A` using Generalized Advantage Estimation (GAE(:math:`\lambda`)) from the collected data [:math:`r, d, V`]
| :math:`\bullet \;` Compute the entropy loss :math:`{L}_{entropy}`
| :math:`\bullet \;` Compute the clipped surrogate objective (policy loss) with :math:`ratio` as the probability ratio between the action under the current policy and the action under the previous policy: :math:`L^{clip}_{\pi_\theta} = \mathbb{E}[\min(A \; ratio, A \; \text{clip}(ratio, 1-c, 1+c))]`
| :math:`\bullet \;` Compute the value loss :math:`L_{V_\phi}` as the mean squared error (MSE) between the predicted values :math:`V_{_{predicted}}` and the estimated returns :math:`R`
| :math:`\bullet \;` Optimize the total loss :math:`L = L^{clip}_{\pi_\theta} - c_1 \, L_{V_\phi} + c_2 \, {L}_{entropy}`
.. raw:: html
<br>
Algorithm implementation
^^^^^^^^^^^^^^^^^^^^^^^^
| Main notation/symbols:
| - policy function approximator (:math:`\pi_\theta`), value function approximator (:math:`V_\phi`)
| - states (:math:`s`), actions (:math:`a`), rewards (:math:`r`), next states (:math:`s'`), dones (:math:`d`)
| - values (:math:`V`), advantages (:math:`A`), returns (:math:`R`)
| - log probabilities (:math:`logp`)
| - loss (:math:`L`)
.. raw:: html
<br>
Learning algorithm
""""""""""""""""""
|
| :literal:`compute_gae(...)`
| :blue:`def` :math:`\;f_{GAE} (r, d, V, V_{_{last}}') \;\rightarrow\; R, A:`
| :math:`adv \leftarrow 0`
| :math:`A \leftarrow \text{zeros}(r)`
| :green:`# advantages computation`
| **FOR** each reverse iteration :math:`i` up to the number of rows in :math:`r` **DO**
| **IF** :math:`i` is not the last row of :math:`r` **THEN**
| :math:`V_i' = V_{i+1}`
| **ELSE**
| :math:`V_i' \leftarrow V_{_{last}}'`
| :math:`adv \leftarrow r_i - V_i \, +` :guilabel:`discount_factor` :math:`\neg d_i \; (V_i' \, -` :guilabel:`lambda` :math:`adv)`
| :math:`A_i \leftarrow adv`
| :green:`# returns computation`
| :math:`R \leftarrow A + V`
| :green:`# normalize advantages`
| :math:`A \leftarrow \dfrac{A - \bar{A}}{A_\sigma + 10^{-8}}`
|
| :literal:`_update(...)`
| **FOR** each agent **DO**
| :green:`# compute returns and advantages`
| :math:`V_{_{last}}' \leftarrow V_\phi(s')`
| :math:`R, A \leftarrow f_{GAE}(r, d, V, V_{_{last}}')`
| :green:`# sample mini-batches from memory`
| [[:math:`s, a, logp, V, R, A`]] :math:`\leftarrow` states, actions, log_prob, values, returns, advantages
| :green:`# learning epochs`
| **FOR** each learning epoch up to :guilabel:`learning_epochs` **DO**
| :green:`# mini-batches loop`
| **FOR** each mini-batch [:math:`s, a, logp, V, R, A`] up to :guilabel:`mini_batches` **DO**
| :math:`logp' \leftarrow \pi_\theta(s, a)`
| :green:`# compute approximate KL divergence`
| :math:`ratio \leftarrow logp' - logp`
| :math:`KL_{_{divergence}} \leftarrow \frac{1}{N} \sum_{i=1}^N ((e^{ratio} - 1) - ratio)`
| :green:`# early stopping with KL divergence`
| **IF** :math:`KL_{_{divergence}} >` :guilabel:`kl_threshold` **THEN**
| **BREAK LOOP**
| :green:`# compute entropy loss`
| **IF** entropy computation is enabled **THEN**
| :math:`{L}_{entropy} \leftarrow \, -` :guilabel:`entropy_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N \pi_{\theta_{entropy}}`
| **ELSE**
| :math:`{L}_{entropy} \leftarrow 0`
| :green:`# compute policy loss`
| :math:`ratio \leftarrow e^{logp' - logp}`
| :math:`L_{_{surrogate}} \leftarrow A \; ratio`
| :math:`L_{_{clipped\,surrogate}} \leftarrow A \; \text{clip}(ratio, 1 - c, 1 + c) \qquad` with :math:`c` as :guilabel:`ratio_clip`
| :math:`L^{clip}_{\pi_\theta} \leftarrow - \frac{1}{N} \sum_{i=1}^N \min(L_{_{surrogate}}, L_{_{clipped\,surrogate}})`
| :green:`# compute value loss`
| :math:`V_{_{predicted}} \leftarrow V_\phi(s)`
| **IF** :guilabel:`clip_predicted_values` is enabled **THEN**
| :math:`V_{_{predicted}} \leftarrow V + \text{clip}(V_{_{predicted}} - V, -c, c) \qquad` with :math:`c` as :guilabel:`value_clip`
| :math:`L_{V_\phi} \leftarrow` :guilabel:`value_loss_scale` :math:`\frac{1}{N} \sum_{i=1}^N (R - V_{_{predicted}})^2`
| :green:`# optimization step`
| reset :math:`\text{optimizer}_{\theta, \phi}`
| :math:`\nabla_{\theta, \, \phi} (L^{clip}_{\pi_\theta} + {L}_{entropy} + L_{V_\phi})`
| :math:`\text{clip}(\lVert \nabla_{\theta, \, \phi} \rVert)` with :guilabel:`grad_norm_clip`
| step :math:`\text{optimizer}_{\theta, \phi}`
| :green:`# update learning rate`
| **IF** there is a :guilabel:`learning_rate_scheduler` **THEN**
| step :math:`\text{scheduler}_{\theta, \phi} (\text{optimizer}_{\theta, \phi})`
.. raw:: html
<br>
Usage
-----
.. tabs::
.. tab:: Standard implementation
.. tabs::
.. group-tab:: |_4| |pytorch| |_4|
.. literalinclude:: ../../snippets/multi_agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [start-ippo-torch]
:end-before: [end-ippo-torch]
.. group-tab:: |_4| |jax| |_4|
.. literalinclude:: ../../snippets/multi_agents_basic_usage.py
:language: python
:emphasize-lines: 2
:start-after: [start-ippo-jax]
:end-before: [end-ippo-jax]
.. raw:: html
<br>
Configuration and hyperparameters
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. note::
The specification of a single value is automatically extended to all involved agents, unless the configuration of each individual agent is specified using a dictionary. For example:
.. code-block:: python
# specify a configuration value for each agent (agent names depend on environment)
cfg["discount_factor"] = {"agent_0": 0.99, "agent_1": 0.995, "agent_2": 0.985}
.. literalinclude:: ../../../../skrl/multi_agents/torch/ippo/ippo.py
:language: python
:start-after: [start-config-dict-torch]
:end-before: [end-config-dict-torch]
.. raw:: html
<br>
Spaces
^^^^^^
The implementation supports the following `Gym spaces <https://www.gymlibrary.dev/api/spaces>`_ / `Gymnasium spaces <https://gymnasium.farama.org/api/spaces>`_
.. list-table::
:header-rows: 1
* - Gym/Gymnasium spaces
- .. centered:: Observation
- .. centered:: Action
* - Discrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - MultiDiscrete
- .. centered:: :math:`\square`
- .. centered:: :math:`\blacksquare`
* - Box
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\blacksquare`
* - Dict
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
.. raw:: html
<br>
Models
^^^^^^
The implementation uses 1 stochastic (discrete or continuous) and 1 deterministic function approximator. These function approximators (models) must be collected in a dictionary and passed to the constructor of the class under the argument :literal:`models`
.. list-table::
:header-rows: 1
* - Notation
- Concept
- Key
- Input shape
- Output shape
- Type
* - :math:`\pi_\theta(s)`
- Policy
- :literal:`"policy"`
- observation
- action
- :ref:`Categorical <models_categorical>` /
|br| :ref:`Multi-Categorical <models_multicategorical>` /
|br| :ref:`Gaussian <models_gaussian>` /
|br| :ref:`MultivariateGaussian <models_multivariate_gaussian>`
* - :math:`V_\phi(s)`
- Value
- :literal:`"value"`
- observation
- 1
- :ref:`Deterministic <models_deterministic>`
.. raw:: html
<br>
Features
^^^^^^^^
Support for advanced features is described in the next table
.. list-table::
:header-rows: 1
* - Feature
- Support and remarks
- .. centered:: |_4| |pytorch| |_4|
- .. centered:: |_4| |jax| |_4|
* - Shared model
- for Policy and Value
- .. centered:: :math:`\blacksquare`
- .. centered:: :math:`\square`
* - RNN support
- \-
- .. centered:: :math:`\square`
- .. centered:: :math:`\square`
.. raw:: html
<br>
API (PyTorch)
-------------
.. autoclass:: skrl.multi_agents.torch.ippo.IPPO_DEFAULT_CONFIG
.. autoclass:: skrl.multi_agents.torch.ippo.IPPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
.. raw:: html
<br>
API (JAX)
---------
.. autoclass:: skrl.multi_agents.jax.ippo.IPPO_DEFAULT_CONFIG
.. autoclass:: skrl.multi_agents.jax.ippo.IPPO
:undoc-members:
:show-inheritance:
:private-members: _update
:members:
.. automethod:: __init__
| 10,120 | reStructuredText | 35.67029 | 322 | 0.561166 |
Toni-SM/skrl/docs/source/api/config/frameworks.rst | ML frameworks configuration
===========================
Configurations for behavior modification of Machine Learning (ML) frameworks.
.. raw:: html
<br><hr>
JAX
---
JAX specific configuration
.. raw:: html
<br>
API
^^^
.. py:data:: skrl.config.jax.backend
:type: str
:value: "numpy"
Backend used by the different components to operate and generate arrays
This configuration excludes models and optimizers.
Supported backend are: ``"numpy"`` and ``"jax"``
.. py:data:: skrl.config.jax.key
:type: jnp.ndarray
:value: [0, 0]
Pseudo-random number generator (PRNG) key
| 617 | reStructuredText | 16.166666 | 77 | 0.641815 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.