file_path
stringlengths 21
207
| content
stringlengths 5
1.02M
| size
int64 5
1.02M
| lang
stringclasses 9
values | avg_line_length
float64 1.33
100
| max_line_length
int64 4
993
| alphanum_fraction
float64 0.27
0.93
|
---|---|---|---|---|---|---|
Toni-SM/skrl/skrl/agents/torch/ppo/__init__.py | from skrl.agents.torch.ppo.ppo import PPO, PPO_DEFAULT_CONFIG
from skrl.agents.torch.ppo.ppo_rnn import PPO_RNN
| 112 | Python | 36.666654 | 61 | 0.803571 |
Toni-SM/skrl/skrl/agents/torch/ppo/ppo.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import itertools
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.agents.torch import Agent
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.resources.schedulers.torch import KLAdaptiveLR
# [start-config-dict-torch]
PPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class PPO(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Proximal Policy Optimization (PPO)
https://arxiv.org/abs/1707.06347
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
_cfg = copy.deepcopy(PPO_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._ratio_clip = self.cfg["ratio_clip"]
self._value_clip = self.cfg["value_clip"]
self._clip_predicted_values = self.cfg["clip_predicted_values"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._kl_threshold = self.cfg["kl_threshold"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
if self.policy is self.value:
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate)
else:
self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()),
lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memory.create_tensor(name="values", size=1, dtype=torch.float32)
self.memory.create_tensor(name="returns", size=1, dtype=torch.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value")
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else last_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# compute returns and advantages
with torch.no_grad():
self.value.train(False)
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value")
self.value.train(True)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches)
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor(sampled_states, train=not epoch)
_, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy")
# compute approximate KL divergence
with torch.no_grad():
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean()
kl_divergences.append(kl_divergence)
# early stopping with KL divergence
if self._kl_threshold and kl_divergence > self._kl_threshold:
break
# compute entropy loss
if self._entropy_loss_scale:
entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean()
else:
entropy_loss = 0
# compute policy loss
ratio = torch.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip)
policy_loss = -torch.min(surrogate, surrogate_clipped).mean()
# compute value loss
predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value")
if self._clip_predicted_values:
predicted_values = sampled_values + torch.clip(predicted_values - sampled_values,
min=-self._value_clip,
max=self._value_clip)
value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values)
# optimization step
self.optimizer.zero_grad()
(policy_loss + entropy_loss + value_loss).backward()
if self._grad_norm_clip > 0:
if self.policy is self.value:
nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip)
else:
nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip)
self.optimizer.step()
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler:
if isinstance(self.scheduler, KLAdaptiveLR):
self.scheduler.step(torch.tensor(kl_divergences).mean())
else:
self.scheduler.step()
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
| 21,415 | Python | 46.485587 | 138 | 0.598459 |
Toni-SM/skrl/skrl/agents/torch/amp/amp.py | from typing import Any, Callable, Mapping, Optional, Tuple, Union
import copy
import itertools
import math
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.agents.torch import Agent
from skrl.memories.torch import Memory
from skrl.models.torch import Model
# [start-config-dict-torch]
AMP_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 6, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 5e-5, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"amp_state_preprocessor": None, # AMP state preprocessor class (see skrl.resources.preprocessors)
"amp_state_preprocessor_kwargs": {}, # AMP state preprocessor's kwargs (e.g. {"size": env.amp_observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.0, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 2.5, # value loss scaling factor
"discriminator_loss_scale": 5.0, # discriminator loss scaling factor
"amp_batch_size": 512, # batch size for updating the reference motion dataset
"task_reward_weight": 0.0, # task-reward weight (wG)
"style_reward_weight": 1.0, # style-reward weight (wS)
"discriminator_batch_size": 0, # batch size for computing the discriminator loss (all samples if 0)
"discriminator_reward_scale": 2, # discriminator reward scaling factor
"discriminator_logit_regularization_scale": 0.05, # logit regularization scale factor for the discriminator loss
"discriminator_gradient_penalty_scale": 5, # gradient penalty scaling factor for the discriminator loss
"discriminator_weight_decay_scale": 0.0001, # weight decay scaling factor for the discriminator loss
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class AMP(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None,
amp_observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
motion_dataset: Optional[Memory] = None,
reply_buffer: Optional[Memory] = None,
collect_reference_motions: Optional[Callable[[int], torch.Tensor]] = None,
collect_observation: Optional[Callable[[], torch.Tensor]] = None) -> None:
"""Adversarial Motion Priors (AMP)
https://arxiv.org/abs/2104.02180
The implementation is adapted from the NVIDIA IsaacGymEnvs
(https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/learning/amp_continuous.py)
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:param amp_observation_space: AMP observation/state space or shape (default: ``None``)
:type amp_observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None
:param motion_dataset: Reference motion dataset: M (default: ``None``)
:type motion_dataset: skrl.memory.torch.Memory or None
:param reply_buffer: Reply buffer for preventing discriminator overfitting: B (default: ``None``)
:type reply_buffer: skrl.memory.torch.Memory or None
:param collect_reference_motions: Callable to collect reference motions (default: ``None``)
:type collect_reference_motions: Callable[[int], torch.Tensor] or None
:param collect_observation: Callable to collect observation (default: ``None``)
:type collect_observation: Callable[[], torch.Tensor] or None
:raises KeyError: If the models dictionary is missing a required key
"""
_cfg = copy.deepcopy(AMP_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
self.amp_observation_space = amp_observation_space
self.motion_dataset = motion_dataset
self.reply_buffer = reply_buffer
self.collect_reference_motions = collect_reference_motions
self.collect_observation = collect_observation
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
self.discriminator = self.models.get("discriminator", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
self.checkpoint_modules["discriminator"] = self.discriminator
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._ratio_clip = self.cfg["ratio_clip"]
self._value_clip = self.cfg["value_clip"]
self._clip_predicted_values = self.cfg["clip_predicted_values"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._discriminator_loss_scale = self.cfg["discriminator_loss_scale"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._amp_state_preprocessor = self.cfg["amp_state_preprocessor"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._amp_batch_size = self.cfg["amp_batch_size"]
self._task_reward_weight = self.cfg["task_reward_weight"]
self._style_reward_weight = self.cfg["style_reward_weight"]
self._discriminator_batch_size = self.cfg["discriminator_batch_size"]
self._discriminator_reward_scale = self.cfg["discriminator_reward_scale"]
self._discriminator_logit_regularization_scale = self.cfg["discriminator_logit_regularization_scale"]
self._discriminator_gradient_penalty_scale = self.cfg["discriminator_gradient_penalty_scale"]
self._discriminator_weight_decay_scale = self.cfg["discriminator_weight_decay_scale"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None and self.discriminator is not None:
self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(),
self.value.parameters(),
self.discriminator.parameters()),
lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
if self._amp_state_preprocessor:
self._amp_state_preprocessor = self._amp_state_preprocessor(**self.cfg["amp_state_preprocessor_kwargs"])
self.checkpoint_modules["amp_state_preprocessor"] = self._amp_state_preprocessor
else:
self._amp_state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memory.create_tensor(name="values", size=1, dtype=torch.float32)
self.memory.create_tensor(name="returns", size=1, dtype=torch.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32)
self.memory.create_tensor(name="amp_states", size=self.amp_observation_space, dtype=torch.float32)
self.memory.create_tensor(name="next_values", size=1, dtype=torch.float32)
self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated", \
"log_prob", "values", "returns", "advantages", "amp_states", "next_values"]
# create tensors for motion dataset and reply buffer
if self.motion_dataset is not None:
self.motion_dataset.create_tensor(name="states", size=self.amp_observation_space, dtype=torch.float32)
self.reply_buffer.create_tensor(name="states", size=self.amp_observation_space, dtype=torch.float32)
# initialize motion dataset
for _ in range(math.ceil(self.motion_dataset.memory_size / self._amp_batch_size)):
self.motion_dataset.add_samples(states=self.collect_reference_motions(self._amp_batch_size))
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_states = None
def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# use collected states
if self._current_states is not None:
states = self._current_states
states = self._state_preprocessor(states)
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": states}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": states}, role="policy")
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# use collected states
if self._current_states is not None:
states = self._current_states
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
amp_states = infos["amp_obs"]
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
with torch.no_grad():
values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value")
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
with torch.no_grad():
next_values, _, _ = self.value.act({"states": self._state_preprocessor(next_states)}, role="value")
next_values = self._value_preprocessor(next_values, inverse=True)
next_values *= infos['terminate'].view(-1, 1).logical_not()
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated,
log_prob=self._current_log_prob, values=values, amp_states=amp_states, next_values=next_values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated,
log_prob=self._current_log_prob, values=values, amp_states=amp_states, next_values=next_values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if self.collect_observation is not None:
self._current_states = self.collect_observation()
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
advantage = rewards[i] - values[i] + discount_factor * (next_values[i] + lambda_coefficient * not_dones[i] * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# update dataset of reference motions
self.motion_dataset.add_samples(states=self.collect_reference_motions(self._amp_batch_size))
# compute combined rewards
rewards = self.memory.get_tensor_by_name("rewards")
amp_states = self.memory.get_tensor_by_name("amp_states")
with torch.no_grad():
amp_logits, _, _ = self.discriminator.act({"states": self._amp_state_preprocessor(amp_states)}, role="discriminator")
style_reward = -torch.log(torch.maximum(1 - 1 / (1 + torch.exp(-amp_logits)), torch.tensor(0.0001, device=self.device)))
style_reward *= self._discriminator_reward_scale
combined_rewards = self._task_reward_weight * rewards + self._style_reward_weight * style_reward
# compute returns and advantages
values = self.memory.get_tensor_by_name("values")
next_values=self.memory.get_tensor_by_name("next_values")
returns, advantages = compute_gae(rewards=combined_rewards,
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=next_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self.tensors_names, mini_batches=self._mini_batches)
sampled_motion_batches = self.motion_dataset.sample(names=["states"],
batch_size=self.memory.memory_size * self.memory.num_envs,
mini_batches=self._mini_batches)
if len(self.reply_buffer):
sampled_replay_batches = self.reply_buffer.sample(names=["states"],
batch_size=self.memory.memory_size * self.memory.num_envs,
mini_batches=self._mini_batches)
else:
sampled_replay_batches = [[batches[self.tensors_names.index("amp_states")]] for batches in sampled_batches]
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
cumulative_discriminator_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
# mini-batches loop
for batch_index, (sampled_states, sampled_actions, _, _, _, \
sampled_log_prob, sampled_values, sampled_returns, sampled_advantages, \
sampled_amp_states, _) in enumerate(sampled_batches):
sampled_states = self._state_preprocessor(sampled_states, train=True)
_, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy")
# compute entropy loss
if self._entropy_loss_scale:
entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean()
else:
entropy_loss = 0
# compute policy loss
ratio = torch.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip)
policy_loss = -torch.min(surrogate, surrogate_clipped).mean()
# compute value loss
predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value")
if self._clip_predicted_values:
predicted_values = sampled_values + torch.clip(predicted_values - sampled_values,
min=-self._value_clip,
max=self._value_clip)
value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values)
# compute discriminator loss
if self._discriminator_batch_size:
sampled_amp_states = self._amp_state_preprocessor(sampled_amp_states[0:self._discriminator_batch_size], train=True)
sampled_amp_replay_states = self._amp_state_preprocessor(
sampled_replay_batches[batch_index][0][0:self._discriminator_batch_size], train=True)
sampled_amp_motion_states = self._amp_state_preprocessor(
sampled_motion_batches[batch_index][0][0:self._discriminator_batch_size], train=True)
else:
sampled_amp_states = self._amp_state_preprocessor(sampled_amp_states, train=True)
sampled_amp_replay_states = self._amp_state_preprocessor(sampled_replay_batches[batch_index][0], train=True)
sampled_amp_motion_states = self._amp_state_preprocessor(sampled_motion_batches[batch_index][0], train=True)
sampled_amp_motion_states.requires_grad_(True)
amp_logits, _, _ = self.discriminator.act({"states": sampled_amp_states}, role="discriminator")
amp_replay_logits, _, _ = self.discriminator.act({"states": sampled_amp_replay_states}, role="discriminator")
amp_motion_logits, _, _ = self.discriminator.act({"states": sampled_amp_motion_states}, role="discriminator")
amp_cat_logits = torch.cat([amp_logits, amp_replay_logits], dim=0)
# discriminator prediction loss
discriminator_loss = 0.5 * (nn.BCEWithLogitsLoss()(amp_cat_logits, torch.zeros_like(amp_cat_logits)) \
+ torch.nn.BCEWithLogitsLoss()(amp_motion_logits, torch.ones_like(amp_motion_logits)))
# discriminator logit regularization
if self._discriminator_logit_regularization_scale:
logit_weights = torch.flatten(list(self.discriminator.modules())[-1].weight)
discriminator_loss += self._discriminator_logit_regularization_scale * torch.sum(torch.square(logit_weights))
# discriminator gradient penalty
if self._discriminator_gradient_penalty_scale:
amp_motion_gradient = torch.autograd.grad(amp_motion_logits,
sampled_amp_motion_states,
grad_outputs=torch.ones_like(amp_motion_logits),
create_graph=True,
retain_graph=True,
only_inputs=True)
gradient_penalty = torch.sum(torch.square(amp_motion_gradient[0]), dim=-1).mean()
discriminator_loss += self._discriminator_gradient_penalty_scale * gradient_penalty
# discriminator weight decay
if self._discriminator_weight_decay_scale:
weights = [torch.flatten(module.weight) for module in self.discriminator.modules() \
if isinstance(module, torch.nn.Linear)]
weight_decay = torch.sum(torch.square(torch.cat(weights, dim=-1)))
discriminator_loss += self._discriminator_weight_decay_scale * weight_decay
discriminator_loss *= self._discriminator_loss_scale
# optimization step
self.optimizer.zero_grad()
(policy_loss + entropy_loss + value_loss + discriminator_loss).backward()
if self._grad_norm_clip > 0:
nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(),
self.value.parameters(),
self.discriminator.parameters()), self._grad_norm_clip)
self.optimizer.step()
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
cumulative_discriminator_loss += discriminator_loss.item()
# update learning rate
if self._learning_rate_scheduler:
self.scheduler.step()
# update AMP repaly buffer
self.reply_buffer.add_samples(states=amp_states.view(-1, amp_states.shape[-1]))
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Discriminator loss", cumulative_discriminator_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
| 31,430 | Python | 52.454082 | 153 | 0.605663 |
Toni-SM/skrl/skrl/agents/torch/amp/__init__.py | from skrl.agents.torch.amp.amp import AMP, AMP_DEFAULT_CONFIG
| 62 | Python | 30.499985 | 61 | 0.806452 |
Toni-SM/skrl/skrl/agents/torch/rpo/rpo.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import itertools
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.agents.torch import Agent
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.resources.schedulers.torch import KLAdaptiveLR
# [start-config-dict-torch]
RPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"alpha": 0.5, # amount of uniform random perturbation on the mean actions: U(-alpha, alpha)
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class RPO(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Robust Policy Optimization (RPO)
https://arxiv.org/abs/2212.07536
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
_cfg = copy.deepcopy(RPO_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._ratio_clip = self.cfg["ratio_clip"]
self._value_clip = self.cfg["value_clip"]
self._clip_predicted_values = self.cfg["clip_predicted_values"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._kl_threshold = self.cfg["kl_threshold"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._alpha = self.cfg["alpha"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
if self.policy is self.value:
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate)
else:
self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()),
lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memory.create_tensor(name="values", size=1, dtype=torch.float32)
self.memory.create_tensor(name="returns", size=1, dtype=torch.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="policy")
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
values, _, _ = self.value.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="value")
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else last_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# compute returns and advantages
with torch.no_grad():
self.value.train(False)
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), "alpha": self._alpha}, role="value")
self.value.train(True)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches)
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor(sampled_states, train=not epoch)
_, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, "alpha": self._alpha}, role="policy")
# compute approximate KL divergence
with torch.no_grad():
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean()
kl_divergences.append(kl_divergence)
# early stopping with KL divergence
if self._kl_threshold and kl_divergence > self._kl_threshold:
break
# compute entropy loss
if self._entropy_loss_scale:
entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean()
else:
entropy_loss = 0
# compute policy loss
ratio = torch.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip)
policy_loss = -torch.min(surrogate, surrogate_clipped).mean()
# compute value loss
predicted_values, _, _ = self.value.act({"states": sampled_states, "alpha": self._alpha}, role="value")
if self._clip_predicted_values:
predicted_values = sampled_values + torch.clip(predicted_values - sampled_values,
min=-self._value_clip,
max=self._value_clip)
value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values)
# optimization step
self.optimizer.zero_grad()
(policy_loss + entropy_loss + value_loss).backward()
if self._grad_norm_clip > 0:
if self.policy is self.value:
nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip)
else:
nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip)
self.optimizer.step()
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler:
if isinstance(self.scheduler, KLAdaptiveLR):
self.scheduler.step(torch.tensor(kl_divergences).mean())
else:
self.scheduler.step()
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
| 21,677 | Python | 46.854305 | 155 | 0.598422 |
Toni-SM/skrl/skrl/agents/torch/rpo/__init__.py | from skrl.agents.torch.rpo.rpo import RPO, RPO_DEFAULT_CONFIG
from skrl.agents.torch.rpo.rpo_rnn import RPO_RNN
| 112 | Python | 36.666654 | 61 | 0.803571 |
Toni-SM/skrl/skrl/agents/torch/rpo/rpo_rnn.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import itertools
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.agents.torch import Agent
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.resources.schedulers.torch import KLAdaptiveLR
# [start-config-dict-torch]
RPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"alpha": 0.5, # amount of uniform random perturbation on the mean actions: U(-alpha, alpha)
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class RPO_RNN(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Robust Policy Optimization (RPO) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.)
https://arxiv.org/abs/2212.07536
:param models: Models used by the agent
:type models: dictionary of skrl.models.torch.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
_cfg = copy.deepcopy(RPO_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._ratio_clip = self.cfg["ratio_clip"]
self._value_clip = self.cfg["value_clip"]
self._clip_predicted_values = self.cfg["clip_predicted_values"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._kl_threshold = self.cfg["kl_threshold"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._alpha = self.cfg["alpha"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
if self.policy is self.value:
self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate)
else:
self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()),
lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memory.create_tensor(name="values", size=1, dtype=torch.float32)
self.memory.create_tensor(name="returns", size=1, dtype=torch.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "terminated", "log_prob", "values", "returns", "advantages"]
# RNN specifications
self._rnn = False # flag to indicate whether RNN is available
self._rnn_tensors_names = [] # used for sampling during training
self._rnn_final_states = {"policy": [], "value": []}
self._rnn_initial_states = {"policy": [], "value": []}
self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1)
# policy
for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])):
self._rnn = True
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True)
self._rnn_tensors_names.append(f"rnn_policy_{i}")
# default RNN states
self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device))
# value
if self.value is not None:
if self.policy is self.value:
self._rnn_initial_states["value"] = self._rnn_initial_states["policy"]
else:
for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])):
self._rnn = True
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True)
self._rnn_tensors_names.append(f"rnn_value_{i}")
# default RNN states
self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device))
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {}
# sample random actions
# TODO: fix for stochasticity, rnn and log_prob
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), "alpha": self._alpha, **rnn}, role="policy")
self._current_log_prob = log_prob
if self._rnn:
self._rnn_final_states["policy"] = outputs.get("rnn", [])
return actions, log_prob, outputs
def record_transition(self,
states: torch.Tensor,
actions: torch.Tensor,
rewards: torch.Tensor,
next_states: torch.Tensor,
terminated: torch.Tensor,
truncated: torch.Tensor,
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: torch.Tensor
:param actions: Actions taken by the agent
:type actions: torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: torch.Tensor
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {}
values, _, outputs = self.value.act({"states": self._state_preprocessor(states), "alpha": self._alpha, **rnn}, role="value")
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# package RNN states
rnn_states = {}
if self._rnn:
rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])})
if self.policy is not self.value:
rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])})
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states)
# update RNN states
if self._rnn:
self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", [])
# reset states if the episodes have ended
finished_episodes = terminated.nonzero(as_tuple=False)
if finished_episodes.numel():
for rnn_state in self._rnn_final_states["policy"]:
rnn_state[:, finished_episodes[:, 0]] = 0
if self.policy is not self.value:
for rnn_state in self._rnn_final_states["value"]:
rnn_state[:, finished_episodes[:, 0]] = 0
self._rnn_initial_states = self._rnn_final_states
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else last_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# compute returns and advantages
with torch.no_grad():
self.value.train(False)
rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {}
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), "alpha": self._alpha, **rnn}, role="value")
self.value.train(True)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length)
rnn_policy, rnn_value = {}, {}
if self._rnn:
sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length)
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
kl_divergences = []
# mini-batches loop
for i, (sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages) in enumerate(sampled_batches):
if self._rnn:
if self.policy is self.value:
rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones}
rnn_value = rnn_policy
else:
rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones}
rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones}
sampled_states = self._state_preprocessor(sampled_states, train=not epoch)
_, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, "alpha": self._alpha, **rnn_policy}, role="policy")
# compute approximate KL divergence
with torch.no_grad():
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean()
kl_divergences.append(kl_divergence)
# early stopping with KL divergence
if self._kl_threshold and kl_divergence > self._kl_threshold:
break
# compute entropy loss
if self._entropy_loss_scale:
entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean()
else:
entropy_loss = 0
# compute policy loss
ratio = torch.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip)
policy_loss = -torch.min(surrogate, surrogate_clipped).mean()
# compute value loss
predicted_values, _, _ = self.value.act({"states": sampled_states, "alpha": self._alpha, **rnn_value}, role="value")
if self._clip_predicted_values:
predicted_values = sampled_values + torch.clip(predicted_values - sampled_values,
min=-self._value_clip,
max=self._value_clip)
value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values)
# optimization step
self.optimizer.zero_grad()
(policy_loss + entropy_loss + value_loss).backward()
if self._grad_norm_clip > 0:
if self.policy is self.value:
nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip)
else:
nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip)
self.optimizer.step()
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler:
if isinstance(self.scheduler, KLAdaptiveLR):
self.scheduler.step(torch.tensor(kl_divergences).mean())
else:
self.scheduler.step()
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
| 26,062 | Python | 48.643809 | 176 | 0.592318 |
Toni-SM/skrl/skrl/agents/jax/base.py | from typing import Any, Mapping, Optional, Tuple, Union
import collections
import copy
import datetime
import os
import pickle
import gym
import gymnasium
import flax
import jax
import numpy as np
from skrl import config, logger
from skrl.memories.jax import Memory
from skrl.models.jax import Model
class Agent:
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Base class that represent a RL agent
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
self._jax = config.jax.backend == "jax"
self.models = models
self.observation_space = observation_space
self.action_space = action_space
self.cfg = cfg if cfg is not None else {}
if device is None:
self.device = jax.devices()[0]
else:
self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0]
if type(memory) is list:
self.memory = memory[0]
self.secondary_memories = memory[1:]
else:
self.memory = memory
self.secondary_memories = []
# convert the models to their respective device
for model in self.models.values():
if model is not None:
pass
self.tracking_data = collections.defaultdict(list)
self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000)
self._track_rewards = collections.deque(maxlen=100)
self._track_timesteps = collections.deque(maxlen=100)
self._cumulative_rewards = None
self._cumulative_timesteps = None
self.training = True
# checkpoint
self.checkpoint_modules = {}
self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000)
self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False)
self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": False, "modules": {}}
# experiment directory
directory = self.cfg.get("experiment", {}).get("directory", "")
experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "")
if not directory:
directory = os.path.join(os.getcwd(), "runs")
if not experiment_name:
experiment_name = "{}_{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), self.__class__.__name__)
self.experiment_dir = os.path.join(directory, experiment_name)
def __str__(self) -> str:
"""Generate a representation of the agent as string
:return: Representation of the agent as string
:rtype: str
"""
string = f"Agent: {repr(self)}"
for k, v in self.cfg.items():
if type(v) is dict:
string += f"\n |-- {k}"
for k1, v1 in v.items():
string += f"\n | |-- {k1}: {v1}"
else:
string += f"\n |-- {k}: {v}"
return string
def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any:
"""Empty preprocess method
This method is defined because PyTorch multiprocessing can't pickle lambdas
:param _input: Input to preprocess
:type _input: Any
:return: Preprocessed input
:rtype: Any
"""
return _input
def _get_internal_value(self, _module: Any) -> Any:
"""Get internal module/variable state/value
:param _module: Module or variable
:type _module: Any
:return: Module/variable state/value
:rtype: Any
"""
return _module.state_dict.params if hasattr(_module, "state_dict") else _module
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
This method should be called before the agent is used.
It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory
:param trainer_cfg: Trainer configuration
:type trainer_cfg: dict, optional
"""
# setup Weights & Biases
if self.cfg.get("experiment", {}).get("wandb", False):
# save experiment config
trainer_cfg = trainer_cfg if trainer_cfg is not None else {}
try:
models_cfg = {k: v.net._modules for (k, v) in self.models.items()}
except AttributeError:
models_cfg = {k: v._modules for (k, v) in self.models.items()}
config={**self.cfg, **trainer_cfg, **models_cfg}
# set default values
wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {}))
wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1])
wandb_kwargs.setdefault("sync_tensorboard", True)
wandb_kwargs.setdefault("config", {})
wandb_kwargs["config"].update(config)
# init Weights & Biases
import wandb
wandb.init(**wandb_kwargs)
# main entry to log data for consumption and visualization by TensorBoard
if self.write_interval > 0:
self.writer = None
# tensorboard via torch SummaryWriter
try:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(log_dir=self.experiment_dir)
except ImportError as e:
pass
# tensorboard via tensorflow
if self.writer is None:
try:
import tensorflow
class _SummaryWriter:
def __init__(self, log_dir):
self.writer = tensorflow.summary.create_file_writer(logdir=log_dir)
def add_scalar(self, tag, value, step):
with self.writer.as_default():
tensorflow.summary.scalar(tag, value, step=step)
self.writer = _SummaryWriter(log_dir=self.experiment_dir)
except ImportError as e:
pass
# tensorboard via tensorboardX
if self.writer is None:
try:
import tensorboardX
self.writer = tensorboardX.SummaryWriter(log_dir=self.experiment_dir)
except ImportError as e:
pass
# show warnings and exit
if self.writer is None:
logger.warning("No package found to write events to Tensorboard.")
logger.warning("Set agent's `write_interval` setting to 0 to disable writing")
logger.warning("or install one of the following packages:")
logger.warning(" - PyTorch: https://pytorch.org/get-started/locally")
logger.warning(" - TensorFlow: https://www.tensorflow.org/install")
logger.warning(" - TensorboardX: https://github.com/lanpa/tensorboardX#install")
logger.warning("The current running process will be terminated.")
exit()
if self.checkpoint_interval > 0:
os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True)
def track_data(self, tag: str, value: float) -> None:
"""Track data to TensorBoard
Currently only scalar data are supported
:param tag: Data identifier (e.g. 'Loss / policy loss')
:type tag: str
:param value: Value to track
:type value: float
"""
self.tracking_data[tag].append(value)
def write_tracking_data(self, timestep: int, timesteps: int) -> None:
"""Write tracking data to TensorBoard
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
for k, v in self.tracking_data.items():
if k.endswith("(min)"):
self.writer.add_scalar(k, np.min(v), timestep)
elif k.endswith("(max)"):
self.writer.add_scalar(k, np.max(v), timestep)
else:
self.writer.add_scalar(k, np.mean(v), timestep)
# reset data containers for next iteration
self._track_rewards.clear()
self._track_timesteps.clear()
self.tracking_data.clear()
def write_checkpoint(self, timestep: int, timesteps: int) -> None:
"""Write checkpoint (modules) to disk
The checkpoints are saved in the directory 'checkpoints' in the experiment directory.
The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time.
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"))
# separated modules
if self.checkpoint_store_separately:
for name, module in self.checkpoint_modules.items():
with open(os.path.join(self.experiment_dir, "checkpoints", f"{name}_{tag}.pickle"), "wb") as file:
pickle.dump(flax.serialization.to_bytes(self._get_internal_value(module)), file, protocol=4)
# whole agent
else:
modules = {}
for name, module in self.checkpoint_modules.items():
modules[name] = flax.serialization.to_bytes(self._get_internal_value(module))
with open(os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pickle"), "wb") as file:
pickle.dump(modules, file, protocol=4)
# best modules
if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]:
# separated modules
if self.checkpoint_store_separately:
for name, module in self.checkpoint_modules.items():
with open(os.path.join(self.experiment_dir, "checkpoints", f"best_{name}.pickle"), "wb") as file:
pickle.dump(flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][name]), file, protocol=4)
# whole agent
else:
modules = {}
for name, module in self.checkpoint_modules.items():
modules[name] = flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][name])
with open(os.path.join(self.experiment_dir, "checkpoints", "best_agent.pickle"), "wb") as file:
pickle.dump(modules, file, protocol=4)
self.checkpoint_best_modules["saved"] = True
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
:return: Actions
:rtype: np.ndarray or jax.Array
"""
raise NotImplementedError
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory (to be implemented by the inheriting classes)
Inheriting classes must call this method to record episode information (rewards, timesteps, etc.).
In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded.
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if self.write_interval > 0:
# compute the cumulative sum of the rewards and timesteps
if self._cumulative_rewards is None:
self._cumulative_rewards = np.zeros_like(rewards, dtype=np.float32)
self._cumulative_timesteps = np.zeros_like(rewards, dtype=np.int32)
# TODO: find a better way to avoid https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError
if self._jax:
rewards = jax.device_get(rewards)
terminated = jax.device_get(terminated)
truncated = jax.device_get(truncated)
self._cumulative_rewards += rewards
self._cumulative_timesteps += 1
# check ended episodes
finished_episodes = (terminated + truncated).nonzero()[0]
if finished_episodes.size:
# storage cumulative rewards and timesteps
self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist())
self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist())
# reset the cumulative rewards and timesteps
self._cumulative_rewards[finished_episodes] = 0
self._cumulative_timesteps[finished_episodes] = 0
# record data
self.tracking_data["Reward / Instantaneous reward (max)"].append(np.max(rewards).item())
self.tracking_data["Reward / Instantaneous reward (min)"].append(np.min(rewards).item())
self.tracking_data["Reward / Instantaneous reward (mean)"].append(np.mean(rewards).item())
if len(self._track_rewards):
track_rewards = np.array(self._track_rewards)
track_timesteps = np.array(self._track_timesteps)
self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards))
self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards))
self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards))
self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps))
self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps))
self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps))
def set_mode(self, mode: str) -> None:
"""Set the model mode (training or evaluation)
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
for model in self.models.values():
if model is not None:
model.set_mode(mode)
def set_running_mode(self, mode: str) -> None:
"""Set the current running mode (training or evaluation)
This method sets the value of the ``training`` property (boolean).
This property can be used to know if the agent is running in training or evaluation mode.
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
self.training = mode == "train"
def save(self, path: str) -> None:
"""Save the agent to the specified path
:param path: Path to save the model to
:type path: str
"""
modules = {}
for name, module in self.checkpoint_modules.items():
modules[name] = flax.serialization.to_bytes(self._get_internal_value(module))
# HACK: Does it make sense to use https://github.com/google/orbax
# file.write(flax.serialization.to_bytes(modules))
with open(path, "wb") as file:
pickle.dump(modules, file, protocol=4)
def load(self, path: str) -> None:
"""Load the model from the specified path
:param path: Path to load the model from
:type path: str
"""
with open(path, "rb") as file:
modules = pickle.load(file)
if type(modules) is dict:
for name, data in modules.items():
module = self.checkpoint_modules.get(name, None)
if module is not None:
if hasattr(module, "state_dict"):
params = flax.serialization.from_bytes(module.state_dict.params, data)
module.state_dict = module.state_dict.replace(params=params)
else:
pass # TODO: raise NotImplementedError
else:
logger.warning(f"Cannot load the {name} module. The agent doesn't have such an instance")
def migrate(self,
path: str,
name_map: Mapping[str, Mapping[str, str]] = {},
auto_mapping: bool = True,
verbose: bool = False) -> bool:
"""Migrate the specified extrernal checkpoint to the current agent
:raises NotImplementedError: Not yet implemented
"""
raise NotImplementedError
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
timestep += 1
# update best models and write checkpoints
if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval:
# update best models
reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31))
if reward > self.checkpoint_best_modules["reward"]:
self.checkpoint_best_modules["timestep"] = timestep
self.checkpoint_best_modules["reward"] = reward
self.checkpoint_best_modules["saved"] = False
self.checkpoint_best_modules["modules"] = {k: copy.deepcopy(self._get_internal_value(v)) for k, v in self.checkpoint_modules.items()}
# write checkpoints
self.write_checkpoint(timestep, timesteps)
# write to tensorboard
if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval:
self.write_tracking_data(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
"""
raise NotImplementedError
| 21,860 | Python | 43.432927 | 149 | 0.595288 |
Toni-SM/skrl/skrl/agents/jax/__init__.py | from skrl.agents.jax.base import Agent
| 39 | Python | 18.999991 | 38 | 0.820513 |
Toni-SM/skrl/skrl/agents/jax/cem/cem.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
import optax
from skrl import logger
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
# [start-config-dict-jax]
CEM_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"percentile": 0.70, # percentile to compute the reward bound [0, 1]
"discount_factor": 0.99, # discount factor (gamma)
"learning_rate": 1e-2, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
class CEM(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Cross-Entropy Method (CEM)
https://ieeexplore.ieee.org/abstract/document/6796865/
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(CEM_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = CEM_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
# configuration
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._percentile = self.cfg["percentile"]
self._discount_factor = self.cfg["discount_factor"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._episode_tracking = []
# set up optimizer and learning rate scheduler
if self.policy is not None:
self.optimizer = Adam(model=self.policy, lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.int32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.tensors_names = ["states", "actions", "rewards"]
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
return actions, None, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
# track episodes internally
if self._rollout:
indexes = (terminated + truncated).nonzero()[0]
if indexes.size:
for i in indexes:
self._episode_tracking[i.item()].append(self._rollout + 1)
else:
self._episode_tracking = [[0] for _ in range(rewards.shape[-1])]
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self._rollout = 0
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# sample all memory
sampled_states, sampled_actions, sampled_rewards = self.memory.sample_all(names=self.tensors_names)[0]
sampled_states = self._state_preprocessor(sampled_states, train=True)
if self._jax: # move to numpy backend
sampled_states = jax.device_get(sampled_states)
sampled_actions = jax.device_get(sampled_actions)
sampled_rewards = jax.device_get(sampled_rewards)
# compute discounted return threshold
limits = []
returns = []
for e in range(sampled_rewards.shape[-1]):
for i, j in zip(self._episode_tracking[e][:-1], self._episode_tracking[e][1:]):
limits.append([e + i, e + j])
rewards = sampled_rewards[e + i: e + j]
returns.append(np.sum(rewards * self._discount_factor ** \
np.flip(np.arange(rewards.shape[0]), axis=-1).reshape(rewards.shape)))
if not len(returns):
logger.warning("No returns to update. Consider increasing the number of rollouts")
return
returns = np.array(returns)
return_threshold = np.quantile(returns, self._percentile, axis=-1)
# get elite states and actions
indexes = (returns >= return_threshold).nonzero()[0]
elite_states = np.concatenate([sampled_states[limits[i][0]:limits[i][1]] for i in indexes], axis=0)
elite_actions = np.concatenate([sampled_actions[limits[i][0]:limits[i][1]] for i in indexes], axis=0).reshape(-1)
# compute policy loss
def _policy_loss(params):
# compute scores for the elite states
_, _, outputs = self.policy.act({"states": elite_states}, "policy", params)
scores = outputs["net_output"]
# HACK: return optax.softmax_cross_entropy_with_integer_labels(scores, elite_actions).mean()
labels = jax.nn.one_hot(elite_actions, self.action_space.n)
return optax.softmax_cross_entropy(scores, labels).mean()
policy_loss, grad = jax.value_and_grad(_policy_loss, has_aux=False)(self.policy.state_dict.params)
# optimization step (policy)
self.optimizer = self.optimizer.step(grad, self.policy)
# update learning rate
if self._learning_rate_scheduler:
self.scheduler.step()
# record data
self.track_data("Loss / Policy loss", policy_loss.item())
self.track_data("Coefficient / Return threshold", return_threshold.item())
self.track_data("Coefficient / Mean discounted returns", returns.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
| 14,421 | Python | 43.239264 | 124 | 0.615145 |
Toni-SM/skrl/skrl/agents/jax/cem/__init__.py | from skrl.agents.jax.cem.cem import CEM, CEM_DEFAULT_CONFIG
| 60 | Python | 29.499985 | 59 | 0.8 |
Toni-SM/skrl/skrl/agents/jax/sac/__init__.py | from skrl.agents.jax.sac.sac import SAC, SAC_DEFAULT_CONFIG
| 60 | Python | 29.499985 | 59 | 0.8 |
Toni-SM/skrl/skrl/agents/jax/sac/sac.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import flax
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
# [start-config-dict-jax]
SAC_DEFAULT_CONFIG = {
"gradient_steps": 1, # gradient steps
"batch_size": 64, # training batch size
"discount_factor": 0.99, # discount factor (gamma)
"polyak": 0.005, # soft update hyperparameter (tau)
"actor_learning_rate": 1e-3, # actor learning rate
"critic_learning_rate": 1e-3, # critic learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0, # clipping coefficient for the norm of the gradients
"learn_entropy": True, # learn entropy
"entropy_learning_rate": 1e-3, # entropy learning rate
"initial_entropy_value": 0.2, # initial entropy value
"target_entropy": None, # target entropy
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@functools.partial(jax.jit, static_argnames=("critic_1_act", "critic_2_act"))
def _update_critic(critic_1_act,
critic_1_state_dict,
critic_2_act,
critic_2_state_dict,
target_q1_values: jax.Array,
target_q2_values: jax.Array,
entropy_coefficient,
next_log_prob,
sampled_states: Union[np.ndarray, jax.Array],
sampled_actions: Union[np.ndarray, jax.Array],
sampled_rewards: Union[np.ndarray, jax.Array],
sampled_dones: Union[np.ndarray, jax.Array],
discount_factor: float):
# compute target values
target_q_values = jnp.minimum(target_q1_values, target_q2_values) - entropy_coefficient * next_log_prob
target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values
# compute critic loss
def _critic_loss(params, critic_act, role):
critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": sampled_actions}, role, params)
critic_loss = ((critic_values - target_values) ** 2).mean()
return critic_loss, critic_values
(critic_1_loss, critic_1_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_1_state_dict.params, critic_1_act, "critic_1")
(critic_2_loss, critic_2_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_2_state_dict.params, critic_2_act, "critic_2")
return grad, (critic_1_loss + critic_2_loss) / 2, critic_1_values, critic_2_values, target_values
@functools.partial(jax.jit, static_argnames=("policy_act", "critic_1_act", "critic_2_act"))
def _update_policy(policy_act,
critic_1_act,
critic_2_act,
policy_state_dict,
critic_1_state_dict,
critic_2_state_dict,
entropy_coefficient,
sampled_states):
# compute policy (actor) loss
def _policy_loss(policy_params, critic_1_params, critic_2_params):
actions, log_prob, _ = policy_act({"states": sampled_states}, "policy", policy_params)
critic_1_values, _, _ = critic_1_act({"states": sampled_states, "taken_actions": actions}, "critic_1", critic_1_params)
critic_2_values, _, _ = critic_2_act({"states": sampled_states, "taken_actions": actions}, "critic_2", critic_2_params)
return (entropy_coefficient * log_prob - jnp.minimum(critic_1_values, critic_2_values)).mean(), log_prob
(policy_loss, log_prob), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params, critic_1_state_dict.params, critic_2_state_dict.params)
return grad, policy_loss, log_prob
@jax.jit
def _update_entropy(log_entropy_coefficient_state_dict, target_entropy, log_prob):
# compute entropy loss
def _entropy_loss(params):
return -(params["params"] * (log_prob + target_entropy)).mean()
entropy_loss, grad = jax.value_and_grad(_entropy_loss, has_aux=False)(log_entropy_coefficient_state_dict.params)
return grad, entropy_loss
class SAC(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Soft Actor-Critic (SAC)
https://arxiv.org/abs/1801.01290
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(SAC_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = SAC_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.critic_1 = self.models.get("critic_1", None)
self.critic_2 = self.models.get("critic_2", None)
self.target_critic_1 = self.models.get("target_critic_1", None)
self.target_critic_2 = self.models.get("target_critic_2", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["critic_1"] = self.critic_1
self.checkpoint_modules["critic_2"] = self.critic_2
self.checkpoint_modules["target_critic_1"] = self.target_critic_1
self.checkpoint_modules["target_critic_2"] = self.target_critic_2
# configuration
self._gradient_steps = self.cfg["gradient_steps"]
self._batch_size = self.cfg["batch_size"]
self._discount_factor = self.cfg["discount_factor"]
self._polyak = self.cfg["polyak"]
self._actor_learning_rate = self.cfg["actor_learning_rate"]
self._critic_learning_rate = self.cfg["critic_learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._entropy_learning_rate = self.cfg["entropy_learning_rate"]
self._learn_entropy = self.cfg["learn_entropy"]
self._entropy_coefficient = self.cfg["initial_entropy_value"]
self._rewards_shaper = self.cfg["rewards_shaper"]
# entropy
if self._learn_entropy:
self._target_entropy = self.cfg["target_entropy"]
if self._target_entropy is None:
if issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box):
self._target_entropy = -np.prod(self.action_space.shape).astype(np.float32)
elif issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete):
self._target_entropy = -self.action_space.n
else:
self._target_entropy = 0
class _LogEntropyCoefficient:
def __init__(self, entropy_coefficient: float) -> None:
class StateDict(flax.struct.PyTreeNode):
params: flax.core.FrozenDict[str, Any] = flax.struct.field(pytree_node=True)
self.state_dict = StateDict(flax.core.FrozenDict({"params": jnp.array([jnp.log(entropy_coefficient)])}))
@property
def value(self):
return self.state_dict.params["params"]
self.log_entropy_coefficient = _LogEntropyCoefficient(self._entropy_coefficient)
self.entropy_optimizer = Adam(model=self.log_entropy_coefficient, lr=self._entropy_learning_rate)
self.checkpoint_modules["entropy_optimizer"] = self.entropy_optimizer
# set up optimizers and learning rate schedulers
if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None:
self.policy_optimizer = Adam(model=self.policy, lr=self._actor_learning_rate, grad_norm_clip=self._grad_norm_clip)
self.critic_1_optimizer = Adam(model=self.critic_1, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip)
self.critic_2_optimizer = Adam(model=self.critic_2, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip)
if self._learning_rate_scheduler is not None:
self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.critic_1_scheduler = self._learning_rate_scheduler(self.critic_1_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.critic_2_scheduler = self._learning_rate_scheduler(self.critic_2_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer
self.checkpoint_modules["critic_1_optimizer"] = self.critic_1_optimizer
self.checkpoint_modules["critic_2_optimizer"] = self.critic_2_optimizer
# set up target networks
if self.target_critic_1 is not None and self.target_critic_2 is not None:
# freeze target networks with respect to optimizers (update via .update_parameters())
self.target_critic_1.freeze_parameters(True)
self.target_critic_2.freeze_parameters(True)
# update target networks (hard update)
self.target_critic_1.update_parameters(self.critic_1, polyak=1)
self.target_critic_2.update_parameters(self.critic_2, polyak=1)
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"]
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
if self.critic_1 is not None and self.critic_2 is not None:
self.critic_1.apply = jax.jit(self.critic_1.apply, static_argnums=2)
self.critic_2.apply = jax.jit(self.critic_2.apply, static_argnums=2)
if self.target_critic_1 is not None and self.target_critic_2 is not None:
self.target_critic_1.apply = jax.jit(self.target_critic_1.apply, static_argnums=2)
self.target_critic_2.apply = jax.jit(self.target_critic_2.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
return actions, None, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# sample a batch from memory
sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \
self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0]
# gradient steps
for gradient_step in range(self._gradient_steps):
sampled_states = self._state_preprocessor(sampled_states, train=True)
sampled_next_states = self._state_preprocessor(sampled_next_states, train=True)
next_actions, next_log_prob, _ = self.policy.act({"states": sampled_next_states}, role="policy")
# compute target values
target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1")
target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2")
# compute critic loss
grad, critic_loss, critic_1_values, critic_2_values, target_values = _update_critic(self.critic_1.act,
self.critic_1.state_dict,
self.critic_2.act,
self.critic_2.state_dict,
target_q1_values,
target_q2_values,
self._entropy_coefficient,
next_log_prob,
sampled_states,
sampled_actions,
sampled_rewards,
sampled_dones,
self._discount_factor)
# optimization step (critic)
self.critic_1_optimizer = self.critic_1_optimizer.step(grad, self.critic_1)
self.critic_2_optimizer = self.critic_2_optimizer.step(grad, self.critic_2)
# compute policy (actor) loss
grad, policy_loss, log_prob = _update_policy(self.policy.act,
self.critic_1.act,
self.critic_2.act,
self.policy.state_dict,
self.critic_1.state_dict,
self.critic_2.state_dict,
self._entropy_coefficient,
sampled_states)
# optimization step (policy)
self.policy_optimizer = self.policy_optimizer.step(grad, self.policy)
# entropy learning
if self._learn_entropy:
# compute entropy loss
grad, entropy_loss = _update_entropy(self.log_entropy_coefficient.state_dict,
self._target_entropy,
log_prob)
# optimization step (entropy)
self.entropy_optimizer = self.entropy_optimizer.step(grad, self.log_entropy_coefficient)
# compute entropy coefficient
self._entropy_coefficient = jnp.exp(self.log_entropy_coefficient.value)
# update target networks
self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak)
self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak)
# update learning rate
if self._learning_rate_scheduler:
self.policy_scheduler.step()
self.critic_1_scheduler.step()
self.critic_2_scheduler.step()
# record data
if self.write_interval > 0:
self.track_data("Loss / Policy loss", policy_loss.item())
self.track_data("Loss / Critic loss", critic_loss.item())
self.track_data("Q-network / Q1 (max)", critic_1_values.max().item())
self.track_data("Q-network / Q1 (min)", critic_1_values.min().item())
self.track_data("Q-network / Q1 (mean)", critic_1_values.mean().item())
self.track_data("Q-network / Q2 (max)", critic_2_values.max().item())
self.track_data("Q-network / Q2 (min)", critic_2_values.min().item())
self.track_data("Q-network / Q2 (mean)", critic_2_values.mean().item())
self.track_data("Target / Target (max)", target_values.max().item())
self.track_data("Target / Target (min)", target_values.min().item())
self.track_data("Target / Target (mean)", target_values.mean().item())
if self._learn_entropy:
self.track_data("Loss / Entropy loss", entropy_loss.item())
self.track_data("Coefficient / Entropy coefficient", self._entropy_coefficient.item())
if self._learning_rate_scheduler:
self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0])
self.track_data("Learning / Critic 1 learning rate", self.critic_1_scheduler.get_last_lr()[0])
self.track_data("Learning / Critic 2 learning rate", self.critic_2_scheduler.get_last_lr()[0])
| 25,277 | Python | 51.335404 | 164 | 0.582862 |
Toni-SM/skrl/skrl/agents/jax/td3/__init__.py | from skrl.agents.jax.td3.td3 import TD3, TD3_DEFAULT_CONFIG
| 60 | Python | 29.499985 | 59 | 0.8 |
Toni-SM/skrl/skrl/agents/jax/td3/td3.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl import logger
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
# [start-config-dict-jax]
TD3_DEFAULT_CONFIG = {
"gradient_steps": 1, # gradient steps
"batch_size": 64, # training batch size
"discount_factor": 0.99, # discount factor (gamma)
"polyak": 0.005, # soft update hyperparameter (tau)
"actor_learning_rate": 1e-3, # actor learning rate
"critic_learning_rate": 1e-3, # critic learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0, # clipping coefficient for the norm of the gradients
"exploration": {
"noise": None, # exploration noise
"initial_scale": 1.0, # initial scale for the noise
"final_scale": 1e-3, # final scale for the noise
"timesteps": None, # timesteps for the noise decay
},
"policy_delay": 2, # policy delay update with respect to critic update
"smooth_regularization_noise": None, # smooth noise for regularization
"smooth_regularization_clip": 0.5, # clip for smooth regularization
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _apply_exploration_noise(actions: jax.Array,
noises: jax.Array,
clip_actions_min: jax.Array,
clip_actions_max: jax.Array,
scale: float) -> jax.Array:
noises = noises.at[:].multiply(scale)
return jnp.clip(actions + noises, a_min=clip_actions_min, a_max=clip_actions_max), noises
@jax.jit
def _apply_smooth_regularization_noise(actions: jax.Array,
noises: jax.Array,
clip_actions_min: jax.Array,
clip_actions_max: jax.Array,
smooth_regularization_clip: float) -> jax.Array:
noises = jnp.clip(noises, a_min=-smooth_regularization_clip, a_max=smooth_regularization_clip)
return jnp.clip(actions + noises, a_min=clip_actions_min, a_max=clip_actions_max)
@functools.partial(jax.jit, static_argnames=("critic_1_act", "critic_2_act"))
def _update_critic(critic_1_act,
critic_1_state_dict,
critic_2_act,
critic_2_state_dict,
target_q1_values: jax.Array,
target_q2_values: jax.Array,
sampled_states: Union[np.ndarray, jax.Array],
sampled_actions: Union[np.ndarray, jax.Array],
sampled_rewards: Union[np.ndarray, jax.Array],
sampled_dones: Union[np.ndarray, jax.Array],
discount_factor: float):
# compute target values
target_q_values = jnp.minimum(target_q1_values, target_q2_values)
target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values
# compute critic loss
def _critic_loss(params, critic_act, role):
critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": sampled_actions}, role, params)
critic_loss = ((critic_values - target_values) ** 2).mean()
return critic_loss, critic_values
(critic_1_loss, critic_1_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_1_state_dict.params, critic_1_act, "critic_1")
(critic_2_loss, critic_2_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_2_state_dict.params, critic_2_act, "critic_2")
return grad, critic_1_loss + critic_2_loss, critic_1_values, critic_2_values, target_values
@functools.partial(jax.jit, static_argnames=("policy_act", "critic_1_act"))
def _update_policy(policy_act,
critic_1_act,
policy_state_dict,
critic_1_state_dict,
sampled_states):
# compute policy (actor) loss
def _policy_loss(policy_params, critic_1_params):
actions, _, _ = policy_act({"states": sampled_states}, "policy", policy_params)
critic_values, _, _ = critic_1_act({"states": sampled_states, "taken_actions": actions}, "critic_1", critic_1_params)
return -critic_values.mean()
policy_loss, grad = jax.value_and_grad(_policy_loss, has_aux=False)(policy_state_dict.params, critic_1_state_dict.params)
return grad, policy_loss
class TD3(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Twin Delayed DDPG (TD3)
https://arxiv.org/abs/1802.09477
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(TD3_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = TD3_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.target_policy = self.models.get("target_policy", None)
self.critic_1 = self.models.get("critic_1", None)
self.critic_2 = self.models.get("critic_2", None)
self.target_critic_1 = self.models.get("target_critic_1", None)
self.target_critic_2 = self.models.get("target_critic_2", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["target_policy"] = self.target_policy
self.checkpoint_modules["critic_1"] = self.critic_1
self.checkpoint_modules["critic_2"] = self.critic_2
self.checkpoint_modules["target_critic_1"] = self.target_critic_1
self.checkpoint_modules["target_critic_2"] = self.target_critic_2
# configuration
self._gradient_steps = self.cfg["gradient_steps"]
self._batch_size = self.cfg["batch_size"]
self._discount_factor = self.cfg["discount_factor"]
self._polyak = self.cfg["polyak"]
self._actor_learning_rate = self.cfg["actor_learning_rate"]
self._critic_learning_rate = self.cfg["critic_learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._exploration_noise = self.cfg["exploration"]["noise"]
self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"]
self._exploration_final_scale = self.cfg["exploration"]["final_scale"]
self._exploration_timesteps = self.cfg["exploration"]["timesteps"]
self._policy_delay = self.cfg["policy_delay"]
self._critic_update_counter = 0
self._smooth_regularization_noise = self.cfg["smooth_regularization_noise"]
self._smooth_regularization_clip = self.cfg["smooth_regularization_clip"]
if self._smooth_regularization_noise is None:
logger.warning("agents:TD3: No smooth regularization noise specified to reduce variance during training")
self._rewards_shaper = self.cfg["rewards_shaper"]
# set up optimizers and learning rate schedulers
if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None:
self.policy_optimizer = Adam(model=self.policy, lr=self._actor_learning_rate, grad_norm_clip=self._grad_norm_clip)
self.critic_1_optimizer = Adam(model=self.critic_1, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip)
self.critic_2_optimizer = Adam(model=self.critic_2, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip)
if self._learning_rate_scheduler is not None:
self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.critic_1_scheduler = self._learning_rate_scheduler(self.critic_1_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.critic_2_scheduler = self._learning_rate_scheduler(self.critic_2_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer
self.checkpoint_modules["critic_1_optimizer"] = self.critic_1_optimizer
self.checkpoint_modules["critic_2_optimizer"] = self.critic_2_optimizer
# set up target networks
if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None:
# freeze target networks with respect to optimizers (update via .update_parameters())
self.target_policy.freeze_parameters(True)
self.target_critic_1.freeze_parameters(True)
self.target_critic_2.freeze_parameters(True)
# update target networks (hard update)
self.target_policy.update_parameters(self.policy, polyak=1)
self.target_critic_1.update_parameters(self.critic_1, polyak=1)
self.target_critic_2.update_parameters(self.critic_2, polyak=1)
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"]
# clip noise bounds
if self.action_space is not None:
if self._jax:
self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32)
self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32)
else:
self.clip_actions_min = np.array(self.action_space.low, dtype=np.float32)
self.clip_actions_max = np.array(self.action_space.high, dtype=np.float32)
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
if self.critic_1 is not None and self.critic_2 is not None:
self.critic_1.apply = jax.jit(self.critic_1.apply, static_argnums=2)
self.critic_2.apply = jax.jit(self.critic_2.apply, static_argnums=2)
if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None:
self.target_policy.apply = jax.jit(self.target_policy.apply, static_argnums=2)
self.target_critic_1.apply = jax.jit(self.target_critic_1.apply, static_argnums=2)
self.target_critic_2.apply = jax.jit(self.target_critic_2.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample deterministic actions
actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
# add exloration noise
if self._exploration_noise is not None:
# sample noises
noises = self._exploration_noise.sample(actions.shape)
# define exploration timesteps
scale = self._exploration_final_scale
if self._exploration_timesteps is None:
self._exploration_timesteps = timesteps
# apply exploration noise
if timestep <= self._exploration_timesteps:
scale = (1 - timestep / self._exploration_timesteps) \
* (self._exploration_initial_scale - self._exploration_final_scale) \
+ self._exploration_final_scale
# modify actions
if self._jax:
actions, noises = _apply_exploration_noise(actions, noises, self.clip_actions_min, self.clip_actions_max, scale)
else:
noises *= scale
actions = np.clip(actions + noises, a_min=self.clip_actions_min, a_max=self.clip_actions_max)
# record noises
self.track_data("Exploration / Exploration noise (max)", noises.max().item())
self.track_data("Exploration / Exploration noise (min)", noises.min().item())
self.track_data("Exploration / Exploration noise (mean)", noises.mean().item())
else:
# record noises
self.track_data("Exploration / Exploration noise (max)", 0)
self.track_data("Exploration / Exploration noise (min)", 0)
self.track_data("Exploration / Exploration noise (mean)", 0)
return actions, None, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# sample a batch from memory
sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \
self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0]
# gradient steps
for gradient_step in range(self._gradient_steps):
sampled_states = self._state_preprocessor(sampled_states, train=True)
sampled_next_states = self._state_preprocessor(sampled_next_states, train=True)
# target policy smoothing
next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy")
if self._smooth_regularization_noise is not None:
noises = self._smooth_regularization_noise.sample(next_actions.shape)
if self._jax:
next_actions = _apply_smooth_regularization_noise(next_actions, noises, self.clip_actions_min, self.clip_actions_max, self._smooth_regularization_clip)
else:
noises = np.clip(noises, a_min=-self._smooth_regularization_clip, a_max=self._smooth_regularization_clip)
next_actions = np.clip(next_actions + noises, a_min=self.clip_actions_min, a_max=self.clip_actions_max)
# compute target values
target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1")
target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2")
# compute critic loss
grad, critic_loss, critic_1_values, critic_2_values, target_values = _update_critic(self.critic_1.act,
self.critic_1.state_dict,
self.critic_2.act,
self.critic_2.state_dict,
target_q1_values,
target_q2_values,
sampled_states,
sampled_actions,
sampled_rewards,
sampled_dones,
self._discount_factor)
# optimization step (critic)
self.critic_1_optimizer = self.critic_1_optimizer.step(grad, self.critic_1)
self.critic_2_optimizer = self.critic_2_optimizer.step(grad, self.critic_2)
# delayed update
self._critic_update_counter += 1
if not self._critic_update_counter % self._policy_delay:
# compute policy (actor) loss
grad, policy_loss = _update_policy(self.policy.act,
self.critic_1.act,
self.policy.state_dict,
self.critic_1.state_dict,
sampled_states)
# optimization step (policy)
self.policy_optimizer = self.policy_optimizer.step(grad, self.policy)
# update target networks
self.target_policy.update_parameters(self.policy, polyak=self._polyak)
self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak)
self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak)
# update learning rate
if self._learning_rate_scheduler:
self.policy_scheduler.step()
self.critic_1_scheduler.step()
self.critic_2_scheduler.step()
# record data
if not self._critic_update_counter % self._policy_delay:
self.track_data("Loss / Policy loss", policy_loss.item())
self.track_data("Loss / Critic loss", critic_loss.item())
self.track_data("Q-network / Q1 (max)", critic_1_values.max().item())
self.track_data("Q-network / Q1 (min)", critic_1_values.min().item())
self.track_data("Q-network / Q1 (mean)", critic_1_values.mean().item())
self.track_data("Q-network / Q2 (max)", critic_2_values.max().item())
self.track_data("Q-network / Q2 (min)", critic_2_values.min().item())
self.track_data("Q-network / Q2 (mean)", critic_2_values.mean().item())
self.track_data("Target / Target (max)", target_values.max().item())
self.track_data("Target / Target (min)", target_values.min().item())
self.track_data("Target / Target (mean)", target_values.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0])
self.track_data("Learning / Critic 1 learning rate", self.critic_1_scheduler.get_last_lr()[0])
self.track_data("Learning / Critic 2 learning rate", self.critic_2_scheduler.get_last_lr()[0])
| 26,774 | Python | 51.294922 | 171 | 0.592179 |
Toni-SM/skrl/skrl/agents/jax/ddpg/ddpg.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
# [start-config-dict-jax]
DDPG_DEFAULT_CONFIG = {
"gradient_steps": 1, # gradient steps
"batch_size": 64, # training batch size
"discount_factor": 0.99, # discount factor (gamma)
"polyak": 0.005, # soft update hyperparameter (tau)
"actor_learning_rate": 1e-3, # actor learning rate
"critic_learning_rate": 1e-3, # critic learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0, # clipping coefficient for the norm of the gradients
"exploration": {
"noise": None, # exploration noise
"initial_scale": 1.0, # initial scale for the noise
"final_scale": 1e-3, # final scale for the noise
"timesteps": None, # timesteps for the noise decay
},
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _apply_exploration_noise(actions: jax.Array,
noises: jax.Array,
clip_actions_min: jax.Array,
clip_actions_max: jax.Array,
scale: float) -> jax.Array:
noises = noises.at[:].multiply(scale)
return jnp.clip(actions + noises, a_min=clip_actions_min, a_max=clip_actions_max), noises
@functools.partial(jax.jit, static_argnames=("critic_act"))
def _update_critic(critic_act,
critic_state_dict,
target_q_values: jax.Array,
sampled_states: Union[np.ndarray, jax.Array],
sampled_actions: Union[np.ndarray, jax.Array],
sampled_rewards: Union[np.ndarray, jax.Array],
sampled_dones: Union[np.ndarray, jax.Array],
discount_factor: float):
# compute target values
target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values
# compute critic loss
def _critic_loss(params):
critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": sampled_actions}, "critic", params)
critic_loss = ((critic_values - target_values) ** 2).mean()
return critic_loss, critic_values
(critic_loss, critic_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_state_dict.params)
return grad, critic_loss, critic_values, target_values
@functools.partial(jax.jit, static_argnames=("policy_act", "critic_act"))
def _update_policy(policy_act,
critic_act,
policy_state_dict,
critic_state_dict,
sampled_states):
# compute policy (actor) loss
def _policy_loss(policy_params, critic_params):
actions, _, _ = policy_act({"states": sampled_states}, "policy", policy_params)
critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": actions}, "critic", critic_params)
return -critic_values.mean()
policy_loss, grad = jax.value_and_grad(_policy_loss, has_aux=False)(policy_state_dict.params, critic_state_dict.params)
return grad, policy_loss
class DDPG(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Deep Deterministic Policy Gradient (DDPG)
https://arxiv.org/abs/1509.02971
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(DDPG_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = DDPG_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.target_policy = self.models.get("target_policy", None)
self.critic = self.models.get("critic", None)
self.target_critic = self.models.get("target_critic", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["target_policy"] = self.target_policy
self.checkpoint_modules["critic"] = self.critic
self.checkpoint_modules["target_critic"] = self.target_critic
# configuration
self._gradient_steps = self.cfg["gradient_steps"]
self._batch_size = self.cfg["batch_size"]
self._discount_factor = self.cfg["discount_factor"]
self._polyak = self.cfg["polyak"]
self._actor_learning_rate = self.cfg["actor_learning_rate"]
self._critic_learning_rate = self.cfg["critic_learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._exploration_noise = self.cfg["exploration"]["noise"]
self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"]
self._exploration_final_scale = self.cfg["exploration"]["final_scale"]
self._exploration_timesteps = self.cfg["exploration"]["timesteps"]
self._rewards_shaper = self.cfg["rewards_shaper"]
# set up optimizers and learning rate schedulers
if self.policy is not None and self.critic is not None:
self.policy_optimizer = Adam(model=self.policy, lr=self._actor_learning_rate, grad_norm_clip=self._grad_norm_clip)
self.critic_optimizer = Adam(model=self.critic, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip)
if self._learning_rate_scheduler is not None:
self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer
self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer
# set up target networks
if self.target_policy is not None and self.target_critic is not None:
# freeze target networks with respect to optimizers (update via .update_parameters())
self.target_policy.freeze_parameters(True)
self.target_critic.freeze_parameters(True)
# update target networks (hard update)
self.target_policy.update_parameters(self.policy, polyak=1)
self.target_critic.update_parameters(self.critic, polyak=1)
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"]
# clip noise bounds
if self.action_space is not None:
if self._jax:
self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32)
self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32)
else:
self.clip_actions_min = np.array(self.action_space.low, dtype=np.float32)
self.clip_actions_max = np.array(self.action_space.high, dtype=np.float32)
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
if self.critic is not None:
self.critic.apply = jax.jit(self.critic.apply, static_argnums=2)
if self.target_policy is not None and self.target_critic is not None:
self.target_policy.apply = jax.jit(self.target_policy.apply, static_argnums=2)
self.target_critic.apply = jax.jit(self.target_critic.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample deterministic actions
actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
# add exloration noise
if self._exploration_noise is not None:
# sample noises
noises = self._exploration_noise.sample(actions.shape)
# define exploration timesteps
scale = self._exploration_final_scale
if self._exploration_timesteps is None:
self._exploration_timesteps = timesteps
# apply exploration noise
if timestep <= self._exploration_timesteps:
scale = (1 - timestep / self._exploration_timesteps) \
* (self._exploration_initial_scale - self._exploration_final_scale) \
+ self._exploration_final_scale
# modify actions
if self._jax:
actions, noises = _apply_exploration_noise(actions, noises, self.clip_actions_min, self.clip_actions_max, scale)
else:
noises *= scale
actions = np.clip(actions + noises, a_min=self.clip_actions_min, a_max=self.clip_actions_max)
# record noises
self.track_data("Exploration / Exploration noise (max)", noises.max().item())
self.track_data("Exploration / Exploration noise (min)", noises.min().item())
self.track_data("Exploration / Exploration noise (mean)", noises.mean().item())
else:
# record noises
self.track_data("Exploration / Exploration noise (max)", 0)
self.track_data("Exploration / Exploration noise (min)", 0)
self.track_data("Exploration / Exploration noise (mean)", 0)
return actions, None, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# sample a batch from memory
sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \
self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0]
# gradient steps
for gradient_step in range(self._gradient_steps):
sampled_states = self._state_preprocessor(sampled_states, train=True)
sampled_next_states = self._state_preprocessor(sampled_next_states, train=True)
# compute target values
next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy")
target_q_values, _, _ = self.target_critic.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic")
# compute critic loss
grad, critic_loss, critic_values, target_values = _update_critic(self.critic.act,
self.critic.state_dict,
target_q_values,
sampled_states,
sampled_actions,
sampled_rewards,
sampled_dones,
self._discount_factor)
# optimization step (critic)
self.critic_optimizer = self.critic_optimizer.step(grad, self.critic)
# compute policy (actor) loss
grad, policy_loss = _update_policy(self.policy.act,
self.critic.act,
self.policy.state_dict,
self.critic.state_dict,
sampled_states)
# optimization step (policy)
self.policy_optimizer = self.policy_optimizer.step(grad, self.policy)
# update target networks
self.target_policy.update_parameters(self.policy, polyak=self._polyak)
self.target_critic.update_parameters(self.critic, polyak=self._polyak)
# update learning rate
if self._learning_rate_scheduler:
self.policy_scheduler.step()
self.critic_scheduler.step()
# record data
self.track_data("Loss / Policy loss", policy_loss.item())
self.track_data("Loss / Critic loss", critic_loss.item())
self.track_data("Q-network / Q1 (max)", critic_values.max().item())
self.track_data("Q-network / Q1 (min)", critic_values.min().item())
self.track_data("Q-network / Q1 (mean)", critic_values.mean().item())
self.track_data("Target / Target (max)", target_values.max().item())
self.track_data("Target / Target (min)", target_values.min().item())
self.track_data("Target / Target (mean)", target_values.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0])
self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
| 21,779 | Python | 47.507795 | 144 | 0.597089 |
Toni-SM/skrl/skrl/agents/jax/ddpg/__init__.py | from skrl.agents.jax.ddpg.ddpg import DDPG, DDPG_DEFAULT_CONFIG
| 64 | Python | 31.499984 | 63 | 0.8125 |
Toni-SM/skrl/skrl/agents/jax/dqn/dqn.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
# [start-config-dict-jax]
DQN_DEFAULT_CONFIG = {
"gradient_steps": 1, # gradient steps
"batch_size": 64, # training batch size
"discount_factor": 0.99, # discount factor (gamma)
"polyak": 0.005, # soft update hyperparameter (tau)
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"update_interval": 1, # agent update interval
"target_update_interval": 10, # target network update interval
"exploration": {
"initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration
"final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration
"timesteps": 1000, # timesteps for epsilon-greedy decay
},
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@functools.partial(jax.jit, static_argnames=("q_network_act"))
def _update_q_network(q_network_act,
q_network_state_dict,
next_q_values,
sampled_states,
sampled_actions,
sampled_rewards,
sampled_dones,
discount_factor):
# compute target values
target_q_values = jnp.max(next_q_values, axis=-1, keepdims=True)
target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values
# compute Q-network loss
def _q_network_loss(params):
q_values = q_network_act({"states": sampled_states}, "q_network", params)[0]
q_values = q_values[jnp.arange(q_values.shape[0]), sampled_actions.reshape(-1)]
return ((q_values - target_values.reshape(-1)) ** 2).mean()
q_network_loss, grad = jax.value_and_grad(_q_network_loss, has_aux=False)(q_network_state_dict.params)
return grad, q_network_loss, target_values
class DQN(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Deep Q-Network (DQN)
https://arxiv.org/abs/1312.5602
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(DQN_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = DQN_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.q_network = self.models.get("q_network", None)
self.target_q_network = self.models.get("target_q_network", None)
# checkpoint models
self.checkpoint_modules["q_network"] = self.q_network
self.checkpoint_modules["target_q_network"] = self.target_q_network
# configuration
self._gradient_steps = self.cfg["gradient_steps"]
self._batch_size = self.cfg["batch_size"]
self._discount_factor = self.cfg["discount_factor"]
self._polyak = self.cfg["polyak"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._update_interval = self.cfg["update_interval"]
self._target_update_interval = self.cfg["target_update_interval"]
self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"]
self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"]
self._exploration_timesteps = self.cfg["exploration"]["timesteps"]
self._rewards_shaper = self.cfg["rewards_shaper"]
# set up optimizer and learning rate scheduler
if self.q_network is not None:
self.optimizer = Adam(model=self.q_network, lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up target networks
if self.target_q_network is not None:
# freeze target networks with respect to optimizers (update via .update_parameters())
self.target_q_network.freeze_parameters(True)
# update target networks (hard update)
self.target_q_network.update_parameters(self.q_network, polyak=1)
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.int32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"]
# set up models for just-in-time compilation with XLA
self.q_network.apply = jax.jit(self.q_network.apply, static_argnums=2)
if self.target_q_network is not None:
self.target_q_network.apply = jax.jit(self.target_q_network.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
states = self._state_preprocessor(states)
if not self._exploration_timesteps:
q_values, _, outputs = self.q_network.act({"states": states}, role="q_network")
actions = jnp.argmax(q_values, axis=1, keepdims=True)
if not self._jax: # numpy backend
actions = jax.device_get(actions)
return actions, None, outputs
# sample random actions
actions, _, outputs = self.q_network.random_act({"states": states}, role="q_network")
if timestep < self._random_timesteps:
raise NotImplementedError
# if not self._jax: # numpy backend
# actions = jax.device_get(actions)
return actions, None, outputs
# sample actions with epsilon-greedy policy
epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \
* np.exp(-1.0 * timestep / self._exploration_timesteps)
indexes = (np.random.random(states.shape[0]) >= epsilon).nonzero()[0]
if indexes.size:
q_values, _, outputs = self.q_network.act({"states": states[indexes]}, role="q_network")
if self._jax:
raise NotImplementedError
actions[indexes] = jnp.argmax(q_values, axis=1, keepdims=True)
else:
q_values = jax.device_get(q_values)
actions[indexes] = np.argmax(q_values, axis=1, keepdims=True)
# record epsilon
self.track_data("Exploration / Exploration epsilon", epsilon)
return actions, None, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if timestep >= self._learning_starts and not timestep % self._update_interval:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# sample a batch from memory
sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \
self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0]
# gradient steps
for gradient_step in range(self._gradient_steps):
sampled_states = self._state_preprocessor(sampled_states, train=True)
sampled_next_states = self._state_preprocessor(sampled_next_states, train=True)
# compute target values
next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network")
grad, q_network_loss, target_values = _update_q_network(self.q_network.act,
self.q_network.state_dict,
next_q_values,
sampled_states,
sampled_actions,
sampled_rewards,
sampled_dones,
self._discount_factor)
# optimization step (Q-network)
self.optimizer = self.optimizer.step(grad, self.q_network)
# update target network
if not timestep % self._target_update_interval:
self.target_q_network.update_parameters(self.q_network, polyak=self._polyak)
# update learning rate
if self._learning_rate_scheduler:
self.scheduler.step()
# record data
self.track_data("Loss / Q-network loss", q_network_loss.item())
self.track_data("Target / Target (max)", target_values.max().item())
self.track_data("Target / Target (min)", target_values.min().item())
self.track_data("Target / Target (mean)", target_values.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler._lr)
| 17,159 | Python | 45.253369 | 124 | 0.600793 |
Toni-SM/skrl/skrl/agents/jax/dqn/__init__.py | from skrl.agents.jax.dqn.ddqn import DDQN, DDQN_DEFAULT_CONFIG
from skrl.agents.jax.dqn.dqn import DQN, DQN_DEFAULT_CONFIG
| 123 | Python | 40.33332 | 62 | 0.804878 |
Toni-SM/skrl/skrl/agents/jax/dqn/ddqn.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
# [start-config-dict-jax]
DDQN_DEFAULT_CONFIG = {
"gradient_steps": 1, # gradient steps
"batch_size": 64, # training batch size
"discount_factor": 0.99, # discount factor (gamma)
"polyak": 0.005, # soft update hyperparameter (tau)
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"update_interval": 1, # agent update interval
"target_update_interval": 10, # target network update interval
"exploration": {
"initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration
"final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration
"timesteps": 1000, # timesteps for epsilon-greedy decay
},
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@functools.partial(jax.jit, static_argnames=("q_network_act"))
def _update_q_network(q_network_act,
q_network_state_dict,
next_q_values,
sampled_states,
sampled_next_states,
sampled_actions,
sampled_rewards,
sampled_dones,
discount_factor):
# compute target values
q_values = q_network_act({"states": sampled_next_states}, "q_network")[0]
actions = jnp.argmax(q_values, axis=-1, keepdims=True)
target_q_values = next_q_values[jnp.arange(q_values.shape[0]), actions.reshape(-1)].reshape(-1, 1)
target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values
# compute Q-network loss
def _q_network_loss(params):
q_values = q_network_act({"states": sampled_states}, "q_network", params)[0]
q_values = q_values[jnp.arange(q_values.shape[0]), sampled_actions.reshape(-1)]
return ((q_values - target_values.reshape(-1)) ** 2).mean()
q_network_loss, grad = jax.value_and_grad(_q_network_loss, has_aux=False)(q_network_state_dict.params)
return grad, q_network_loss, target_values
class DDQN(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Double Deep Q-Network (DDQN)
https://ojs.aaai.org/index.php/AAAI/article/view/10295
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(DDQN_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = DDQN_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.q_network = self.models.get("q_network", None)
self.target_q_network = self.models.get("target_q_network", None)
# checkpoint models
self.checkpoint_modules["q_network"] = self.q_network
self.checkpoint_modules["target_q_network"] = self.target_q_network
# configuration
self._gradient_steps = self.cfg["gradient_steps"]
self._batch_size = self.cfg["batch_size"]
self._discount_factor = self.cfg["discount_factor"]
self._polyak = self.cfg["polyak"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._update_interval = self.cfg["update_interval"]
self._target_update_interval = self.cfg["target_update_interval"]
self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"]
self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"]
self._exploration_timesteps = self.cfg["exploration"]["timesteps"]
self._rewards_shaper = self.cfg["rewards_shaper"]
# set up optimizer and learning rate scheduler
if self.q_network is not None:
self.optimizer = Adam(model=self.q_network, lr=self._learning_rate)
if self._learning_rate_scheduler is not None:
self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"])
self.checkpoint_modules["optimizer"] = self.optimizer
# set up target networks
if self.target_q_network is not None:
# freeze target networks with respect to optimizers (update via .update_parameters())
self.target_q_network.freeze_parameters(True)
# update target networks (hard update)
self.target_q_network.update_parameters(self.q_network, polyak=1)
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.int32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"]
# set up models for just-in-time compilation with XLA
self.q_network.apply = jax.jit(self.q_network.apply, static_argnums=2)
if self.target_q_network is not None:
self.target_q_network.apply = jax.jit(self.target_q_network.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
states = self._state_preprocessor(states)
if not self._exploration_timesteps:
q_values, _, outputs = self.q_network.act({"states": states}, role="q_network")
actions = jnp.argmax(q_values, axis=1, keepdims=True)
if not self._jax: # numpy backend
actions = jax.device_get(actions)
return actions, None, outputs
# sample random actions
actions, _, outputs = self.q_network.random_act({"states": states}, role="q_network")
if timestep < self._random_timesteps:
raise NotImplementedError
# if not self._jax: # numpy backend
# actions = jax.device_get(actions)
return actions, None, outputs
# sample actions with epsilon-greedy policy
epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \
* np.exp(-1.0 * timestep / self._exploration_timesteps)
indexes = (np.random.random(states.shape[0]) >= epsilon).nonzero()[0]
if indexes.size:
q_values, _, outputs = self.q_network.act({"states": states[indexes]}, role="q_network")
if self._jax:
raise NotImplementedError
actions[indexes] = jnp.argmax(q_values, axis=1, keepdims=True)
else:
q_values = jax.device_get(q_values)
actions[indexes] = np.argmax(q_values, axis=1, keepdims=True)
# record epsilon
self.track_data("Exploration / Exploration epsilon", epsilon)
return actions, None, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if timestep >= self._learning_starts and not timestep % self._update_interval:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# sample a batch from memory
sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \
self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0]
# gradient steps
for gradient_step in range(self._gradient_steps):
sampled_states = self._state_preprocessor(sampled_states, train=True)
sampled_next_states = self._state_preprocessor(sampled_next_states, train=True)
# compute target values
next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network")
grad, q_network_loss, target_values = _update_q_network(self.q_network.act,
self.q_network.state_dict,
next_q_values,
sampled_states,
sampled_next_states,
sampled_actions,
sampled_rewards,
sampled_dones,
self._discount_factor)
# optimization step (Q-network)
self.optimizer = self.optimizer.step(grad, self.q_network)
# update target network
if not timestep % self._target_update_interval:
self.target_q_network.update_parameters(self.q_network, polyak=self._polyak)
# update learning rate
if self._learning_rate_scheduler:
self.scheduler.step()
# record data
self.track_data("Loss / Q-network loss", q_network_loss.item())
self.track_data("Target / Target (max)", target_values.max().item())
self.track_data("Target / Target (min)", target_values.min().item())
self.track_data("Target / Target (mean)", target_values.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler._lr)
| 17,497 | Python | 45.661333 | 124 | 0.599303 |
Toni-SM/skrl/skrl/agents/jax/a2c/a2c.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
from skrl.resources.schedulers.jax import KLAdaptiveLR
# [start-config-dict-jax]
A2C_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"mini_batches": 1, # number of mini batches to use for updating
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
def compute_gae(rewards: np.ndarray,
dones: np.ndarray,
values: np.ndarray,
next_values: np.ndarray,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> np.ndarray:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: np.ndarray
:param dones: Signals to indicate that episodes have ended
:type dones: np.ndarray
:param values: Values obtained by the agent
:type values: np.ndarray
:param next_values: Next values obtained by the agent
:type next_values: np.ndarray
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: np.ndarray
"""
advantage = 0
advantages = np.zeros_like(rewards)
not_dones = np.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _compute_gae(rewards: jax.Array,
dones: jax.Array,
values: jax.Array,
next_values: jax.Array,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> jax.Array:
advantage = 0
advantages = jnp.zeros_like(rewards)
not_dones = jnp.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages = advantages.at[i].set(advantage)
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
@functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale"))
def _update_policy(policy_act,
policy_state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
get_entropy,
entropy_loss_scale):
# compute policy loss
def _policy_loss(params):
_, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params)
# compute approximate KL divergence
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean()
# compute entropy loss
entropy_loss = 0
if entropy_loss_scale:
entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean()
return -(sampled_advantages * next_log_prob).mean(), (entropy_loss, kl_divergence, outputs["stddev"])
(policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params)
return grad, policy_loss, entropy_loss, kl_divergence, stddev
@functools.partial(jax.jit, static_argnames=("value_act"))
def _update_value(value_act,
value_state_dict,
sampled_states,
sampled_returns):
# compute value loss
def _value_loss(params):
predicted_values, _, _ = value_act({"states": sampled_states}, "value", params)
return ((sampled_returns - predicted_values) ** 2).mean()
value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params)
return grad, value_loss
class A2C(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Advantage Actor Critic (A2C)
https://arxiv.org/abs/1602.01783
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(A2C_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = A2C_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
# scheduler
scale = True
self.scheduler = None
if self._learning_rate_scheduler is not None:
if self._learning_rate_scheduler == KLAdaptiveLR:
scale = False
self.scheduler = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"])
else:
self._learning_rate = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"])
# optimizer
self.policy_optimizer = Adam(model=self.policy, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale)
self.value_optimizer = Adam(model=self.value, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale)
self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer
self.checkpoint_modules["value_optimizer"] = self.value_optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.memory.create_tensor(name="log_prob", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="values", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="returns", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=jnp.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
if self.value is not None:
self.value.apply = jax.jit(self.value.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
log_prob = jax.device_get(log_prob)
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value")
if not self._jax: # numpy backend
values = jax.device_get(values)
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# compute returns and advantages
self.value.training = False
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states)}, role="value") # TODO: .float()
self.value.training = True
if not self._jax: # numpy backend
last_values = jax.device_get(last_values)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
if self._jax:
returns, advantages = _compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
else:
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches)
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor(sampled_states, train=True)
# compute policy loss
grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(self.policy.act,
self.policy.state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
self.policy.get_entropy,
self._entropy_loss_scale)
kl_divergences.append(kl_divergence.item())
# optimization step (policy)
self.policy_optimizer = self.policy_optimizer.step(grad, self.policy, self.scheduler._lr if self.scheduler else None)
# compute value loss
grad, value_loss = _update_value(self.value.act,
self.value.state_dict,
sampled_states,
sampled_returns)
# optimization step (value)
self.value_optimizer = self.value_optimizer.step(grad, self.value, self.scheduler._lr if self.scheduler else None)
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler:
if isinstance(self.scheduler, KLAdaptiveLR):
self.scheduler.step(np.mean(kl_divergences))
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / len(sampled_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / len(sampled_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / len(sampled_batches))
self.track_data("Policy / Standard deviation", stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler._lr)
| 23,654 | Python | 45.382353 | 139 | 0.596432 |
Toni-SM/skrl/skrl/agents/jax/a2c/__init__.py | from skrl.agents.jax.a2c.a2c import A2C, A2C_DEFAULT_CONFIG
| 60 | Python | 29.499985 | 59 | 0.8 |
Toni-SM/skrl/skrl/agents/jax/ppo/__init__.py | from skrl.agents.jax.ppo.ppo import PPO, PPO_DEFAULT_CONFIG
| 60 | Python | 29.499985 | 59 | 0.8 |
Toni-SM/skrl/skrl/agents/jax/ppo/ppo.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
from skrl.resources.schedulers.jax import KLAdaptiveLR
# [start-config-dict-jax]
PPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
def compute_gae(rewards: np.ndarray,
dones: np.ndarray,
values: np.ndarray,
next_values: np.ndarray,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> np.ndarray:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: np.ndarray
:param dones: Signals to indicate that episodes have ended
:type dones: np.ndarray
:param values: Values obtained by the agent
:type values: np.ndarray
:param next_values: Next values obtained by the agent
:type next_values: np.ndarray
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: np.ndarray
"""
advantage = 0
advantages = np.zeros_like(rewards)
not_dones = np.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _compute_gae(rewards: jax.Array,
dones: jax.Array,
values: jax.Array,
next_values: jax.Array,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> jax.Array:
advantage = 0
advantages = jnp.zeros_like(rewards)
not_dones = jnp.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages = advantages.at[i].set(advantage)
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
@functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale"))
def _update_policy(policy_act,
policy_state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
ratio_clip,
get_entropy,
entropy_loss_scale):
# compute policy loss
def _policy_loss(params):
_, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params)
# compute approximate KL divergence
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean()
# compute policy loss
ratio = jnp.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip)
# compute entropy loss
entropy_loss = 0
if entropy_loss_scale:
entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean()
return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"])
(policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params)
return grad, policy_loss, entropy_loss, kl_divergence, stddev
@functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values"))
def _update_value(value_act,
value_state_dict,
sampled_states,
sampled_values,
sampled_returns,
value_loss_scale,
clip_predicted_values,
value_clip):
# compute value loss
def _value_loss(params):
predicted_values, _, _ = value_act({"states": sampled_states}, "value", params)
if clip_predicted_values:
predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip)
return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean()
value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params)
return grad, value_loss
class PPO(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Proximal Policy Optimization (PPO)
https://arxiv.org/abs/1707.06347
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = PPO_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._ratio_clip = self.cfg["ratio_clip"]
self._value_clip = self.cfg["value_clip"]
self._clip_predicted_values = self.cfg["clip_predicted_values"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._kl_threshold = self.cfg["kl_threshold"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
# scheduler
scale = True
self.scheduler = None
if self._learning_rate_scheduler is not None:
if self._learning_rate_scheduler == KLAdaptiveLR:
scale = False
self.scheduler = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"])
else:
self._learning_rate = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"])
# optimizer
self.policy_optimizer = Adam(model=self.policy, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale)
self.value_optimizer = Adam(model=self.value, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale)
self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer
self.checkpoint_modules["value_optimizer"] = self.value_optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.memory.create_tensor(name="log_prob", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="values", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="returns", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=jnp.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
if self.value is not None:
self.value.apply = jax.jit(self.value.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
log_prob = jax.device_get(log_prob)
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value")
if not self._jax: # numpy backend
values = jax.device_get(values)
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# compute returns and advantages
self.value.training = False
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states)}, role="value") # TODO: .float()
self.value.training = True
if not self._jax: # numpy backend
last_values = jax.device_get(last_values)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
if self._jax:
returns, advantages = _compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
else:
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches)
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor(sampled_states, train=not epoch)
# compute policy loss
grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(self.policy.act,
self.policy.state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
self._ratio_clip,
self.policy.get_entropy,
self._entropy_loss_scale)
kl_divergences.append(kl_divergence.item())
# early stopping with KL divergence
if self._kl_threshold and kl_divergence > self._kl_threshold:
break
# optimization step (policy)
self.policy_optimizer = self.policy_optimizer.step(grad, self.policy, self.scheduler._lr if self.scheduler else None)
# compute value loss
grad, value_loss = _update_value(self.value.act,
self.value.state_dict,
sampled_states,
sampled_values,
sampled_returns,
self._value_loss_scale,
self._clip_predicted_values,
self._value_clip)
# optimization step (value)
self.value_optimizer = self.value_optimizer.step(grad, self.value, self.scheduler._lr if self.scheduler else None)
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler:
if isinstance(self.scheduler, KLAdaptiveLR):
self.scheduler.step(np.mean(kl_divergences))
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler._lr)
| 26,032 | Python | 46.679487 | 139 | 0.589083 |
Toni-SM/skrl/skrl/agents/jax/rpo/rpo.py | from typing import Any, Mapping, Optional, Tuple, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.resources.optimizers.jax import Adam
from skrl.resources.schedulers.jax import KLAdaptiveLR
# [start-config-dict-jax]
RPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"alpha": 0.5, # amount of uniform random perturbation on the mean actions: U(-alpha, alpha)
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
def compute_gae(rewards: np.ndarray,
dones: np.ndarray,
values: np.ndarray,
next_values: np.ndarray,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> np.ndarray:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: np.ndarray
:param dones: Signals to indicate that episodes have ended
:type dones: np.ndarray
:param values: Values obtained by the agent
:type values: np.ndarray
:param next_values: Next values obtained by the agent
:type next_values: np.ndarray
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: np.ndarray
"""
advantage = 0
advantages = np.zeros_like(rewards)
not_dones = np.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _compute_gae(rewards: jax.Array,
dones: jax.Array,
values: jax.Array,
next_values: jax.Array,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> jax.Array:
advantage = 0
advantages = jnp.zeros_like(rewards)
not_dones = jnp.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages = advantages.at[i].set(advantage)
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
@functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale"))
def _update_policy(policy_act,
policy_state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
ratio_clip,
get_entropy,
entropy_loss_scale,
alpha):
# compute policy loss
def _policy_loss(params):
_, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions, "alpha": alpha}, "policy", params)
# compute approximate KL divergence
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean()
# compute policy loss
ratio = jnp.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip)
# compute entropy loss
entropy_loss = 0
if entropy_loss_scale:
entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean()
return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"])
(policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params)
return grad, policy_loss, entropy_loss, kl_divergence, stddev
@functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values"))
def _update_value(value_act,
value_state_dict,
sampled_states,
sampled_values,
sampled_returns,
value_loss_scale,
clip_predicted_values,
value_clip,
alpha):
# compute value loss
def _value_loss(params):
predicted_values, _, _ = value_act({"states": sampled_states, "alpha": alpha}, "value", params)
if clip_predicted_values:
predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip)
return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean()
value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params)
return grad, value_loss
class RPO(Agent):
def __init__(self,
models: Mapping[str, Model],
memory: Optional[Union[Memory, Tuple[Memory]]] = None,
observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Robust Policy Optimization (RPO)
https://arxiv.org/abs/2212.07536
:param models: Models used by the agent
:type models: dictionary of skrl.models.jax.Model
:param memory: Memory to storage the transitions.
If it is a tuple, the first element will be used for training and
for the rest only the environment transitions will be added
:type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None
:param observation_space: Observation/state space or shape (default: ``None``)
:type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: ``None``)
:type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:raises KeyError: If the models dictionary is missing a required key
"""
# _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = RPO_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(models=models,
memory=memory,
observation_space=observation_space,
action_space=action_space,
device=device,
cfg=_cfg)
# models
self.policy = self.models.get("policy", None)
self.value = self.models.get("value", None)
# checkpoint models
self.checkpoint_modules["policy"] = self.policy
self.checkpoint_modules["value"] = self.value
# configuration
self._learning_epochs = self.cfg["learning_epochs"]
self._mini_batches = self.cfg["mini_batches"]
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self.cfg["grad_norm_clip"]
self._ratio_clip = self.cfg["ratio_clip"]
self._value_clip = self.cfg["value_clip"]
self._clip_predicted_values = self.cfg["clip_predicted_values"]
self._value_loss_scale = self.cfg["value_loss_scale"]
self._entropy_loss_scale = self.cfg["entropy_loss_scale"]
self._kl_threshold = self.cfg["kl_threshold"]
self._learning_rate = self.cfg["learning_rate"]
self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"]
self._state_preprocessor = self.cfg["state_preprocessor"]
self._value_preprocessor = self.cfg["value_preprocessor"]
self._alpha = self.cfg["alpha"]
self._discount_factor = self.cfg["discount_factor"]
self._lambda = self.cfg["lambda"]
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"]
# set up optimizer and learning rate scheduler
if self.policy is not None and self.value is not None:
# scheduler
scale = True
self.scheduler = None
if self._learning_rate_scheduler is not None:
if self._learning_rate_scheduler == KLAdaptiveLR:
scale = False
self.scheduler = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"])
else:
self._learning_rate = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"])
# optimizer
self.policy_optimizer = Adam(model=self.policy, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale)
self.value_optimizer = Adam(model=self.value, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale)
self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer
self.checkpoint_modules["value_optimizer"] = self.value_optimizer
# set up preprocessors
if self._state_preprocessor:
self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"])
self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor
else:
self._state_preprocessor = self._empty_preprocessor
if self._value_preprocessor:
self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"])
self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor
else:
self._value_preprocessor = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memory
if self.memory is not None:
self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32)
self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32)
self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.memory.create_tensor(name="log_prob", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="values", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="returns", size=1, dtype=jnp.float32)
self.memory.create_tensor(name="advantages", size=1, dtype=jnp.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = None
self._current_next_states = None
# set up models for just-in-time compilation with XLA
self.policy.apply = jax.jit(self.policy.apply, static_argnums=2)
if self.value is not None:
self.value.apply = jax.jit(self.value.apply, static_argnums=2)
def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# sample random actions
# TODO, check for stochasticity
if timestep < self._random_timesteps:
return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy")
# sample stochastic actions
actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="policy")
if not self._jax: # numpy backend
actions = jax.device_get(actions)
log_prob = jax.device_get(log_prob)
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Union[np.ndarray, jax.Array],
actions: Union[np.ndarray, jax.Array],
rewards: Union[np.ndarray, jax.Array],
next_states: Union[np.ndarray, jax.Array],
terminated: Union[np.ndarray, jax.Array],
truncated: Union[np.ndarray, jax.Array],
infos: Any,
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: Any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memory is not None:
self._current_next_states = next_states
# reward shaping
if self._rewards_shaper is not None:
rewards = self._rewards_shaper(rewards, timestep, timesteps)
# compute values
values, _, _ = self.value.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="value")
if not self._jax: # numpy backend
values = jax.device_get(values)
values = self._value_preprocessor(values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap:
rewards += self._discount_factor * values * truncated
# storage transition in memory
self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
for memory in self.secondary_memories:
memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states,
terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
# compute returns and advantages
self.value.training = False
last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states), "alpha": self._alpha}, role="value") # TODO: .float()
self.value.training = True
if not self._jax: # numpy backend
last_values = jax.device_get(last_values)
last_values = self._value_preprocessor(last_values, inverse=True)
values = self.memory.get_tensor_by_name("values")
if self._jax:
returns, advantages = _compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
else:
returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"),
dones=self.memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor,
lambda_coefficient=self._lambda)
self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True))
self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True))
self.memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches)
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor(sampled_states, train=not epoch)
# compute policy loss
grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(self.policy.act,
self.policy.state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
self._ratio_clip,
self.policy.get_entropy,
self._entropy_loss_scale,
self._alpha)
kl_divergences.append(kl_divergence.item())
# early stopping with KL divergence
if self._kl_threshold and kl_divergence > self._kl_threshold:
break
# optimization step (policy)
self.policy_optimizer = self.policy_optimizer.step(grad, self.policy, self.scheduler._lr if self.scheduler else None)
# compute value loss
grad, value_loss = _update_value(self.value.act,
self.value.state_dict,
sampled_states,
sampled_values,
sampled_returns,
self._value_loss_scale,
self._clip_predicted_values,
self._value_clip,
self._alpha)
# optimization step (value)
self.value_optimizer = self.value_optimizer.step(grad, self.value, self.scheduler._lr if self.scheduler else None)
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler:
if isinstance(self.scheduler, KLAdaptiveLR):
self.scheduler.step(np.mean(kl_divergences))
# record data
self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches))
if self._entropy_loss_scale:
self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches))
self.track_data("Policy / Standard deviation", stddev.mean().item())
if self._learning_rate_scheduler:
self.track_data("Learning / Learning rate", self.scheduler._lr)
| 26,496 | Python | 47.001812 | 161 | 0.585409 |
Toni-SM/skrl/skrl/agents/jax/rpo/__init__.py | from skrl.agents.jax.rpo.rpo import RPO, RPO_DEFAULT_CONFIG
| 60 | Python | 29.499985 | 59 | 0.8 |
Toni-SM/skrl/skrl/resources/schedulers/torch/kl_adaptive.py | from typing import Optional, Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
class KLAdaptiveLR(_LRScheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
kl_threshold: float = 0.008,
min_lr: float = 1e-6,
max_lr: float = 1e-2,
kl_factor: float = 2,
lr_factor: float = 1.5,
last_epoch: int = -1,
verbose: bool = False) -> None:
"""Adaptive KL scheduler
Adjusts the learning rate according to the KL divergence.
The implementation is adapted from the rl_games library
(https://github.com/Denys88/rl_games/blob/master/rl_games/common/schedulers.py)
.. note::
This scheduler is only available for PPO at the moment.
Applying it to other agents will not change the learning rate
Example::
>>> scheduler = KLAdaptiveLR(optimizer, kl_threshold=0.01)
>>> for epoch in range(100):
>>> # ...
>>> kl_divergence = ...
>>> scheduler.step(kl_divergence)
:param optimizer: Wrapped optimizer
:type optimizer: torch.optim.Optimizer
:param kl_threshold: Threshold for KL divergence (default: ``0.008``)
:type kl_threshold: float, optional
:param min_lr: Lower bound for learning rate (default: ``1e-6``)
:type min_lr: float, optional
:param max_lr: Upper bound for learning rate (default: ``1e-2``)
:type max_lr: float, optional
:param kl_factor: The number used to modify the KL divergence threshold (default: ``2``)
:type kl_factor: float, optional
:param lr_factor: The number used to modify the learning rate (default: ``1.5``)
:type lr_factor: float, optional
:param last_epoch: The index of last epoch (default: ``-1``)
:type last_epoch: int, optional
:param verbose: Verbose mode (default: ``False``)
:type verbose: bool, optional
"""
super().__init__(optimizer, last_epoch, verbose)
self.kl_threshold = kl_threshold
self.min_lr = min_lr
self.max_lr = max_lr
self._kl_factor = kl_factor
self._lr_factor = lr_factor
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
def step(self, kl: Optional[Union[torch.Tensor, float]] = None, epoch: Optional[int] = None) -> None:
"""
Step scheduler
Example::
>>> kl = torch.distributions.kl_divergence(p, q)
>>> kl
tensor([0.0332, 0.0500, 0.0383, ..., 0.0076, 0.0240, 0.0164])
>>> scheduler.step(kl.mean())
>>> kl = 0.0046
>>> scheduler.step(kl)
:param kl: KL divergence (default: ``None``)
If None, no adjustment is made.
If tensor, the number of elements must be 1
:type kl: torch.Tensor, float or None, optional
:param epoch: Epoch (default: ``None``)
:type epoch: int, optional
"""
if kl is not None:
for group in self.optimizer.param_groups:
if kl > self.kl_threshold * self._kl_factor:
group['lr'] = max(group['lr'] / self._lr_factor, self.min_lr)
elif kl < self.kl_threshold / self._kl_factor:
group['lr'] = min(group['lr'] * self._lr_factor, self.max_lr)
self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
| 3,588 | Python | 38.010869 | 105 | 0.557971 |
Toni-SM/skrl/skrl/resources/schedulers/torch/__init__.py | from skrl.resources.schedulers.torch.kl_adaptive import KLAdaptiveLR
KLAdaptiveRL = KLAdaptiveLR # known typo (compatibility with versions prior to 1.0.0)
| 158 | Python | 30.799994 | 86 | 0.810127 |
Toni-SM/skrl/skrl/resources/schedulers/jax/kl_adaptive.py | from typing import Optional, Union
import numpy as np
class KLAdaptiveLR:
def __init__(self,
init_value: float,
kl_threshold: float = 0.008,
min_lr: float = 1e-6,
max_lr: float = 1e-2,
kl_factor: float = 2,
lr_factor: float = 1.5) -> None:
"""Adaptive KL scheduler
Adjusts the learning rate according to the KL divergence.
The implementation is adapted from the rl_games library
(https://github.com/Denys88/rl_games/blob/master/rl_games/common/schedulers.py)
.. note::
This scheduler is only available for PPO at the moment.
Applying it to other agents will not change the learning rate
Example::
>>> scheduler = KLAdaptiveLR(init_value=1e-3, kl_threshold=0.01)
>>> for epoch in range(100):
>>> # ...
>>> kl_divergence = ...
>>> scheduler.step(kl_divergence)
>>> scheduler.lr # get the updated learning rate
:param init_value: Initial learning rate
:type init_value: float
:param kl_threshold: Threshold for KL divergence (default: ``0.008``)
:type kl_threshold: float, optional
:param min_lr: Lower bound for learning rate (default: ``1e-6``)
:type min_lr: float, optional
:param max_lr: Upper bound for learning rate (default: ``1e-2``)
:type max_lr: float, optional
:param kl_factor: The number used to modify the KL divergence threshold (default: ``2``)
:type kl_factor: float, optional
:param lr_factor: The number used to modify the learning rate (default: ``1.5``)
:type lr_factor: float, optional
"""
self.kl_threshold = kl_threshold
self.min_lr = min_lr
self.max_lr = max_lr
self._kl_factor = kl_factor
self._lr_factor = lr_factor
self._lr = init_value
@property
def lr(self) -> float:
"""Learning rate
"""
return self._lr
def step(self, kl: Optional[Union[np.ndarray, float]] = None) -> None:
"""
Step scheduler
Example::
>>> kl = [0.0332, 0.0500, 0.0383, 0.0456, 0.0076, 0.0240, 0.0164]
>>> kl
[0.0332, 0.05, 0.0383, 0.0456, 0.0076, 0.024, 0.0164]
>>> scheduler.step(np.mean(kl))
>>> kl = 0.0046
>>> scheduler.step(kl)
:param kl: KL divergence (default: ``None``)
If None, no adjustment is made.
If array, the number of elements must be 1
:type kl: np.ndarray, float or None, optional
"""
if kl is not None:
if kl > self.kl_threshold * self._kl_factor:
self._lr = max(self._lr / self._lr_factor, self.min_lr)
elif kl < self.kl_threshold / self._kl_factor:
self._lr = min(self._lr * self._lr_factor, self.max_lr)
# Alias to maintain naming compatibility with Optax schedulers
# https://optax.readthedocs.io/en/latest/api.html#schedules
kl_adaptive = KLAdaptiveLR
| 3,168 | Python | 34.211111 | 96 | 0.553662 |
Toni-SM/skrl/skrl/resources/schedulers/jax/__init__.py | from skrl.resources.schedulers.jax.kl_adaptive import KLAdaptiveLR, kl_adaptive
KLAdaptiveRL = KLAdaptiveLR # known typo (compatibility with versions prior to 1.0.0)
| 169 | Python | 32.999993 | 86 | 0.804734 |
Toni-SM/skrl/skrl/resources/preprocessors/torch/running_standard_scaler.py | from typing import Optional, Tuple, Union
import gym
import gymnasium
import numpy as np
import torch
import torch.nn as nn
class RunningStandardScaler(nn.Module):
def __init__(self,
size: Union[int, Tuple[int], gym.Space, gymnasium.Space],
epsilon: float = 1e-8,
clip_threshold: float = 5.0,
device: Optional[Union[str, torch.device]] = None) -> None:
"""Standardize the input data by removing the mean and scaling by the standard deviation
The implementation is adapted from the rl_games library
(https://github.com/Denys88/rl_games/blob/master/rl_games/algos_torch/running_mean_std.py)
Example::
>>> running_standard_scaler = RunningStandardScaler(size=2)
>>> data = torch.rand(3, 2) # tensor of shape (N, 2)
>>> running_standard_scaler(data)
tensor([[0.1954, 0.3356],
[0.9719, 0.4163],
[0.8540, 0.1982]])
:param size: Size of the input space
:type size: int, tuple or list of integers, gym.Space, or gymnasium.Space
:param epsilon: Small number to avoid division by zero (default: ``1e-8``)
:type epsilon: float
:param clip_threshold: Threshold to clip the data (default: ``5.0``)
:type clip_threshold: float
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
"""
super().__init__()
self.epsilon = epsilon
self.clip_threshold = clip_threshold
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
size = self._get_space_size(size)
self.register_buffer("running_mean", torch.zeros(size, dtype=torch.float64, device=self.device))
self.register_buffer("running_variance", torch.ones(size, dtype=torch.float64, device=self.device))
self.register_buffer("current_count", torch.ones((), dtype=torch.float64, device=self.device))
def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space]) -> int:
"""Get the size (number of elements) of a space
:param space: Space or shape from which to obtain the number of elements
:type space: int, tuple or list of integers, gym.Space, or gymnasium.Space
:raises ValueError: If the space is not supported
:return: Size of the space data
:rtype: Space size (number of elements)
"""
if type(space) in [int, float]:
return int(space)
elif type(space) in [tuple, list]:
return np.prod(space)
elif issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
return 1
elif issubclass(type(space), gym.spaces.Box):
return np.prod(space.shape)
elif issubclass(type(space), gym.spaces.Dict):
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
elif issubclass(type(space), gymnasium.Space):
if issubclass(type(space), gymnasium.spaces.Discrete):
return 1
elif issubclass(type(space), gymnasium.spaces.Box):
return np.prod(space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
raise ValueError(f"Space type {type(space)} not supported")
def _parallel_variance(self, input_mean: torch.Tensor, input_var: torch.Tensor, input_count: int) -> None:
"""Update internal variables using the parallel algorithm for computing variance
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param input_mean: Mean of the input data
:type input_mean: torch.Tensor
:param input_var: Variance of the input data
:type input_var: torch.Tensor
:param input_count: Batch size of the input data
:type input_count: int
"""
delta = input_mean - self.running_mean
total_count = self.current_count + input_count
M2 = (self.running_variance * self.current_count) + (input_var * input_count) \
+ delta ** 2 * self.current_count * input_count / total_count
# update internal variables
self.running_mean = self.running_mean + delta * input_count / total_count
self.running_variance = M2 / total_count
self.current_count = total_count
def _compute(self, x: torch.Tensor, train: bool = False, inverse: bool = False) -> torch.Tensor:
"""Compute the standardization of the input data
:param x: Input tensor
:type x: torch.Tensor
:param train: Whether to train the standardizer (default: ``False``)
:type train: bool, optional
:param inverse: Whether to inverse the standardizer to scale back the data (default: ``False``)
:type inverse: bool, optional
:return: Standardized tensor
:rtype: torch.Tensor
"""
if train:
if x.dim() == 3:
self._parallel_variance(torch.mean(x, dim=(0, 1)), torch.var(x, dim=(0, 1)), x.shape[0] * x.shape[1])
else:
self._parallel_variance(torch.mean(x, dim=0), torch.var(x, dim=0), x.shape[0])
# scale back the data to the original representation
if inverse:
return torch.sqrt(self.running_variance.float()) \
* torch.clamp(x, min=-self.clip_threshold, max=self.clip_threshold) + self.running_mean.float()
# standardization by centering and scaling
return torch.clamp((x - self.running_mean.float()) / (torch.sqrt(self.running_variance.float()) + self.epsilon),
min=-self.clip_threshold,
max=self.clip_threshold)
def forward(self,
x: torch.Tensor,
train: bool = False,
inverse: bool = False,
no_grad: bool = True) -> torch.Tensor:
"""Forward pass of the standardizer
Example::
>>> x = torch.rand(3, 2, device="cuda:0")
>>> running_standard_scaler(x)
tensor([[0.6933, 0.1905],
[0.3806, 0.3162],
[0.1140, 0.0272]], device='cuda:0')
>>> running_standard_scaler(x, train=True)
tensor([[ 0.8681, -0.6731],
[ 0.0560, -0.3684],
[-0.6360, -1.0690]], device='cuda:0')
>>> running_standard_scaler(x, inverse=True)
tensor([[0.6260, 0.5468],
[0.5056, 0.5987],
[0.4029, 0.4795]], device='cuda:0')
:param x: Input tensor
:type x: torch.Tensor
:param train: Whether to train the standardizer (default: ``False``)
:type train: bool, optional
:param inverse: Whether to inverse the standardizer to scale back the data (default: ``False``)
:type inverse: bool, optional
:param no_grad: Whether to disable the gradient computation (default: ``True``)
:type no_grad: bool, optional
:return: Standardized tensor
:rtype: torch.Tensor
"""
if no_grad:
with torch.no_grad():
return self._compute(x, train, inverse)
return self._compute(x, train, inverse)
| 7,719 | Python | 42.370786 | 120 | 0.588807 |
Toni-SM/skrl/skrl/resources/preprocessors/torch/__init__.py | from skrl.resources.preprocessors.torch.running_standard_scaler import RunningStandardScaler
| 93 | Python | 45.999977 | 92 | 0.892473 |
Toni-SM/skrl/skrl/resources/preprocessors/jax/running_standard_scaler.py | from typing import Mapping, Optional, Tuple, Union
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _copyto(dst, src):
"""NumPy function copyto not yet implemented
"""
return dst.at[:].set(src)
@jax.jit
def _parallel_variance(running_mean: jax.Array,
running_variance: jax.Array,
current_count: jax.Array,
array: jax.Array) -> Tuple[jax.Array, jax.Array, jax.Array]: # yapf: disable
# ddof = 1: https://github.com/pytorch/pytorch/issues/50010
if array.ndim == 3:
input_mean = jnp.mean(array, axis=(0, 1))
input_var = jnp.var(array, axis=(0, 1), ddof=1)
input_count = array.shape[0] * array.shape[1]
else:
input_mean = jnp.mean(array, axis=0)
input_var = jnp.var(array, axis=0, ddof=1)
input_count = array.shape[0]
delta = input_mean - running_mean
total_count = current_count + input_count
M2 = (running_variance * current_count) + (input_var * input_count) \
+ delta ** 2 * current_count * input_count / total_count
return running_mean + delta * input_count / total_count, M2 / total_count, total_count
@jax.jit
def _inverse(running_mean: jax.Array,
running_variance: jax.Array,
clip_threshold: float,
array: jax.Array) -> jax.Array: # yapf: disable
return jnp.sqrt(running_variance) * jnp.clip(array, -clip_threshold, clip_threshold) + running_mean
@jax.jit
def _standardization(running_mean: jax.Array,
running_variance: jax.Array,
clip_threshold: float,
epsilon: float,
array: jax.Array) -> jax.Array:
return jnp.clip((array - running_mean) / (jnp.sqrt(running_variance) + epsilon), -clip_threshold, clip_threshold)
class RunningStandardScaler:
def __init__(self,
size: Union[int, Tuple[int], gym.Space, gymnasium.Space],
epsilon: float = 1e-8,
clip_threshold: float = 5.0,
device: Optional[Union[str, jax.Device]] = None) -> None:
"""Standardize the input data by removing the mean and scaling by the standard deviation
The implementation is adapted from the rl_games library
(https://github.com/Denys88/rl_games/blob/master/rl_games/algos_torch/running_mean_std.py)
Example::
>>> running_standard_scaler = RunningStandardScaler(size=2)
>>> data = jax.random.uniform(jax.random.PRNGKey(0), (3,2)) # tensor of shape (N, 2)
>>> running_standard_scaler(data)
Array([[0.57450044, 0.09968603],
[0.7419659 , 0.8941783 ],
[0.59656656, 0.45325184]], dtype=float32)
:param size: Size of the input space
:type size: int, tuple or list of integers, gym.Space, or gymnasium.Space
:param epsilon: Small number to avoid division by zero (default: ``1e-8``)
:type epsilon: float
:param clip_threshold: Threshold to clip the data (default: ``5.0``)
:type clip_threshold: float
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
"""
self._jax = config.jax.backend == "jax"
self.epsilon = epsilon
self.clip_threshold = clip_threshold
if device is None:
self.device = jax.devices()[0]
else:
self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0]
size = self._get_space_size(size)
if self._jax:
self.running_mean = jnp.zeros(size, dtype=jnp.float32)
self.running_variance = jnp.ones(size, dtype=jnp.float32)
self.current_count = jnp.ones((1,), dtype=jnp.float32)
else:
self.running_mean = np.zeros(size, dtype=np.float32)
self.running_variance = np.ones(size, dtype=np.float32)
self.current_count = np.ones((1,), dtype=np.float32)
@property
def state_dict(self) -> Mapping[str, Union[np.ndarray, jax.Array]]:
"""Dictionary containing references to the whole state of the module
"""
class _StateDict:
def __init__(self, params):
self.params = params
def replace(self, params):
return params
return _StateDict({
"running_mean": self.running_mean,
"running_variance": self.running_variance,
"current_count": self.current_count
})
@state_dict.setter
def state_dict(self, value: Mapping[str, Union[np.ndarray, jax.Array]]) -> None:
if self._jax:
self.running_mean = _copyto(self.running_mean, value["running_mean"])
self.running_variance = _copyto(self.running_variance, value["running_variance"])
self.current_count = _copyto(self.current_count, value["current_count"])
else:
np.copyto(self.running_mean, value["running_mean"])
np.copyto(self.running_variance, value["running_variance"])
np.copyto(self.current_count, value["current_count"])
def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space]) -> int:
"""Get the size (number of elements) of a space
:param space: Space or shape from which to obtain the number of elements
:type space: int, tuple or list of integers, gym.Space, or gymnasium.Space
:raises ValueError: If the space is not supported
:return: Size of the space data
:rtype: Space size (number of elements)
"""
if type(space) in [int, float]:
return int(space)
elif type(space) in [tuple, list]:
return np.prod(space)
elif issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
return 1
elif issubclass(type(space), gym.spaces.Box):
return np.prod(space.shape)
elif issubclass(type(space), gym.spaces.Dict):
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
elif issubclass(type(space), gymnasium.Space):
if issubclass(type(space), gymnasium.spaces.Discrete):
return 1
elif issubclass(type(space), gymnasium.spaces.Box):
return np.prod(space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
raise ValueError(f"Space type {type(space)} not supported")
def _parallel_variance(self,
input_mean: Union[np.ndarray, jax.Array],
input_var: Union[np.ndarray, jax.Array],
input_count: int) -> None:
"""Update internal variables using the parallel algorithm for computing variance
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
:param input_mean: Mean of the input data
:type input_mean: np.ndarray or jax.Array
:param input_var: Variance of the input data
:type input_var: np.ndarray or jax.Array
:param input_count: Batch size of the input data
:type input_count: int
"""
delta = input_mean - self.running_mean
total_count = self.current_count + input_count
M2 = (self.running_variance * self.current_count) + (input_var * input_count) \
+ delta ** 2 * self.current_count * input_count / total_count
# update internal variables
self.running_mean = self.running_mean + delta * input_count / total_count
self.running_variance = M2 / total_count
self.current_count = total_count
def __call__(self,
x: Union[np.ndarray, jax.Array],
train: bool = False,
inverse: bool = False) -> Union[np.ndarray, jax.Array]:
"""Forward pass of the standardizer
Example::
>>> x = jax.random.uniform(jax.random.PRNGKey(0), (3,2))
>>> running_standard_scaler(x)
Array([[0.57450044, 0.09968603],
[0.7419659 , 0.8941783 ],
[0.59656656, 0.45325184]], dtype=float32)
>>> running_standard_scaler(x, train=True)
Array([[ 0.167439 , -0.4292293 ],
[ 0.45878986, 0.8719094 ],
[ 0.20582889, 0.14980486]], dtype=float32)
>>> running_standard_scaler(x, inverse=True)
Array([[0.80847514, 0.4226486 ],
[0.9047325 , 0.90777594],
[0.8211585 , 0.6385405 ]], dtype=float32)
:param x: Input tensor
:type x: np.ndarray or jax.Array
:param train: Whether to train the standardizer (default: ``False``)
:type train: bool, optional
:param inverse: Whether to inverse the standardizer to scale back the data (default: ``False``)
:type inverse: bool, optional
:return: Standardized tensor
:rtype: np.ndarray or jax.Array
"""
if train:
if self._jax:
self.running_mean, self.running_variance, self.current_count = \
_parallel_variance(self.running_mean, self.running_variance, self.current_count, x)
else:
# ddof = 1: https://github.com/pytorch/pytorch/issues/50010
if x.ndim == 3:
self._parallel_variance(np.mean(x, axis=(0, 1)),
np.var(x, axis=(0, 1), ddof=1),
x.shape[0] * x.shape[1])
else:
self._parallel_variance(np.mean(x, axis=0), np.var(x, axis=0, ddof=1), x.shape[0])
# scale back the data to the original representation
if inverse:
if self._jax:
return _inverse(self.running_mean, self.running_variance, self.clip_threshold, x)
return np.sqrt(self.running_variance) * np.clip(x, -self.clip_threshold,
self.clip_threshold) + self.running_mean
# standardization by centering and scaling
if self._jax:
return _standardization(self.running_mean, self.running_variance, self.clip_threshold, self.epsilon, x)
return np.clip((x - self.running_mean) / (np.sqrt(self.running_variance) + self.epsilon),
a_min=-self.clip_threshold,
a_max=self.clip_threshold)
| 10,976 | Python | 42.216535 | 117 | 0.580813 |
Toni-SM/skrl/skrl/resources/preprocessors/jax/__init__.py | from skrl.resources.preprocessors.jax.running_standard_scaler import RunningStandardScaler
| 91 | Python | 44.999978 | 90 | 0.89011 |
Toni-SM/skrl/skrl/resources/noises/torch/base.py | from typing import Optional, Tuple, Union
import torch
class Noise():
def __init__(self, device: Optional[Union[str, torch.device]] = None) -> None:
"""Base class representing a noise
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
Custom noises should override the ``sample`` method::
import torch
from skrl.resources.noises.torch import Noise
class CustomNoise(Noise):
def __init__(self, device=None):
super().__init__(device)
def sample(self, size):
return torch.rand(size, device=self.device)
"""
if device is None:
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
def sample_like(self, tensor: torch.Tensor) -> torch.Tensor:
"""Sample a noise with the same size (shape) as the input tensor
This method will call the sampling method as follows ``.sample(tensor.shape)``
:param tensor: Input tensor used to determine output tensor size (shape)
:type tensor: torch.Tensor
:return: Sampled noise
:rtype: torch.Tensor
Example::
>>> x = torch.rand(3, 2, device="cuda:0")
>>> noise.sample_like(x)
tensor([[-0.0423, -0.1325],
[-0.0639, -0.0957],
[-0.1367, 0.1031]], device='cuda:0')
"""
return self.sample(tensor.shape)
def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor:
"""Noise sampling method to be implemented by the inheriting classes
:param size: Shape of the sampled tensor
:type size: tuple or list of int, or torch.Size
:raises NotImplementedError: The method is not implemented by the inheriting classes
:return: Sampled noise
:rtype: torch.Tensor
"""
raise NotImplementedError("The sampling method (.sample()) is not implemented")
| 2,241 | Python | 34.031249 | 98 | 0.58456 |
Toni-SM/skrl/skrl/resources/noises/torch/ornstein_uhlenbeck.py | from typing import Optional, Tuple, Union
import torch
from torch.distributions import Normal
from skrl.resources.noises.torch import Noise
class OrnsteinUhlenbeckNoise(Noise):
def __init__(self,
theta: float,
sigma: float,
base_scale: float,
mean: float = 0,
std: float = 1,
device: Optional[Union[str, torch.device]] = None) -> None:
"""Class representing an Ornstein-Uhlenbeck noise
:param theta: Factor to apply to current internal state
:type theta: float
:param sigma: Factor to apply to the normal distribution
:type sigma: float
:param base_scale: Factor to apply to returned noise
:type base_scale: float
:param mean: Mean of the normal distribution (default: ``0.0``)
:type mean: float, optional
:param std: Standard deviation of the normal distribution (default: ``1.0``)
:type std: float, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
Example::
>>> noise = OrnsteinUhlenbeckNoise(theta=0.1, sigma=0.2, base_scale=0.5)
"""
super().__init__(device)
self.state = 0
self.theta = theta
self.sigma = sigma
self.base_scale = base_scale
self.distribution = Normal(loc=torch.tensor(mean, device=self.device, dtype=torch.float32),
scale=torch.tensor(std, device=self.device, dtype=torch.float32))
def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor:
"""Sample an Ornstein-Uhlenbeck noise
:param size: Shape of the sampled tensor
:type size: tuple or list of int, or torch.Size
:return: Sampled noise
:rtype: torch.Tensor
Example::
>>> noise.sample((3, 2))
tensor([[-0.0452, 0.0162],
[ 0.0649, -0.0708],
[-0.0211, 0.0066]], device='cuda:0')
>>> x = torch.rand(3, 2, device="cuda:0")
>>> noise.sample(x.shape)
tensor([[-0.0540, 0.0461],
[ 0.1117, -0.1157],
[-0.0074, 0.0420]], device='cuda:0')
"""
if hasattr(self.state, "shape") and self.state.shape != torch.Size(size):
self.state = 0
self.state += -self.state * self.theta + self.sigma * self.distribution.sample(size)
return self.base_scale * self.state
| 2,696 | Python | 35.445945 | 100 | 0.560831 |
Toni-SM/skrl/skrl/resources/noises/torch/__init__.py | from skrl.resources.noises.torch.base import Noise # isort:skip
from skrl.resources.noises.torch.gaussian import GaussianNoise
from skrl.resources.noises.torch.ornstein_uhlenbeck import OrnsteinUhlenbeckNoise
| 211 | Python | 41.399992 | 81 | 0.853081 |
Toni-SM/skrl/skrl/resources/noises/torch/gaussian.py | from typing import Optional, Tuple, Union
import torch
from torch.distributions import Normal
from skrl.resources.noises.torch import Noise
class GaussianNoise(Noise):
def __init__(self, mean: float, std: float, device: Optional[Union[str, torch.device]] = None) -> None:
"""Class representing a Gaussian noise
:param mean: Mean of the normal distribution
:type mean: float
:param std: Standard deviation of the normal distribution
:type std: float
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
Example::
>>> noise = GaussianNoise(mean=0, std=1)
"""
super().__init__(device)
self.distribution = Normal(loc=torch.tensor(mean, device=self.device, dtype=torch.float32),
scale=torch.tensor(std, device=self.device, dtype=torch.float32))
def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor:
"""Sample a Gaussian noise
:param size: Shape of the sampled tensor
:type size: tuple or list of int, or torch.Size
:return: Sampled noise
:rtype: torch.Tensor
Example::
>>> noise.sample((3, 2))
tensor([[-0.4901, 1.3357],
[-1.2141, 0.3323],
[-0.0889, -1.1651]], device='cuda:0')
>>> x = torch.rand(3, 2, device="cuda:0")
>>> noise.sample(x.shape)
tensor([[0.5398, 1.2009],
[0.0307, 1.3065],
[0.2082, 0.6116]], device='cuda:0')
"""
return self.distribution.sample(size)
| 1,820 | Python | 33.35849 | 107 | 0.564835 |
Toni-SM/skrl/skrl/resources/noises/jax/base.py | from typing import Optional, Tuple, Union
import jax
import numpy as np
from skrl import config
class Noise():
def __init__(self, device: Optional[Union[str, jax.Device]] = None) -> None:
"""Base class representing a noise
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
Custom noises should override the ``sample`` method::
import jax
from skrl.resources.noises.jax import Noise
class CustomNoise(Noise):
def __init__(self, device=None):
super().__init__(device)
def sample(self, size):
return jax.random.uniform(jax.random.PRNGKey(0), size)
"""
self._jax = config.jax.backend == "jax"
if device is None:
self.device = jax.devices()[0]
else:
self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0]
def sample_like(self, tensor: Union[np.ndarray, jax.Array]) -> Union[np.ndarray, jax.Array]:
"""Sample a noise with the same size (shape) as the input tensor
This method will call the sampling method as follows ``.sample(tensor.shape)``
:param tensor: Input tensor used to determine output tensor size (shape)
:type tensor: np.ndarray or jax.Array
:return: Sampled noise
:rtype: np.ndarray or jax.Array
Example::
>>> x = jax.random.uniform(jax.random.PRNGKey(0), (3, 2))
>>> noise.sample_like(x)
Array([[0.57450044, 0.09968603],
[0.7419659 , 0.8941783 ],
[0.59656656, 0.45325184]], dtype=float32)
"""
return self.sample(tensor.shape)
def sample(self, size: Tuple[int]) -> Union[np.ndarray, jax.Array]:
"""Noise sampling method to be implemented by the inheriting classes
:param size: Shape of the sampled tensor
:type size: tuple or list of int
:raises NotImplementedError: The method is not implemented by the inheriting classes
:return: Sampled noise
:rtype: np.ndarray or jax.Array
"""
raise NotImplementedError("The sampling method (.sample()) is not implemented")
| 2,413 | Python | 33.985507 | 98 | 0.596353 |
Toni-SM/skrl/skrl/resources/noises/jax/ornstein_uhlenbeck.py | from typing import Optional, Tuple, Union
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
from skrl.resources.noises.jax import Noise
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@partial(jax.jit, static_argnames=("shape"))
def _sample(theta, sigma, state, mean, std, key, iterator, shape):
subkey = jax.random.fold_in(key, iterator)
return state * theta + sigma * (jax.random.normal(subkey, shape) * std + mean)
class OrnsteinUhlenbeckNoise(Noise):
def __init__(self,
theta: float,
sigma: float,
base_scale: float,
mean: float = 0,
std: float = 1,
device: Optional[Union[str, jax.Device]] = None) -> None:
"""Class representing an Ornstein-Uhlenbeck noise
:param theta: Factor to apply to current internal state
:type theta: float
:param sigma: Factor to apply to the normal distribution
:type sigma: float
:param base_scale: Factor to apply to returned noise
:type base_scale: float
:param mean: Mean of the normal distribution (default: ``0.0``)
:type mean: float, optional
:param std: Standard deviation of the normal distribution (default: ``1.0``)
:type std: float, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
Example::
>>> noise = OrnsteinUhlenbeckNoise(theta=0.1, sigma=0.2, base_scale=0.5)
"""
super().__init__(device)
self.state = 0
self.theta = theta
self.sigma = sigma
self.base_scale = base_scale
if self._jax:
self.mean = jnp.array(mean)
self.std = jnp.array(std)
self._i = 0
self._key = config.jax.key
else:
self.mean = np.array(mean)
self.std = np.array(std)
def sample(self, size: Tuple[int]) -> Union[np.ndarray, jax.Array]:
"""Sample an Ornstein-Uhlenbeck noise
:param size: Shape of the sampled tensor
:type size: tuple or list of int
:return: Sampled noise
:rtype: np.ndarray or jax.Array
Example::
>>> noise.sample((3, 2))
Array([[ 0.01878439, -0.12833427],
[ 0.06494182, 0.12490594],
[ 0.024447 , -0.01174496]], dtype=float32)
>>> x = jax.random.uniform(jax.random.PRNGKey(0), (3, 2))
>>> noise.sample(x.shape)
Array([[ 0.17988093, -1.2289404 ],
[ 0.6218886 , 1.1961104 ],
[ 0.23410667, -0.11247082]], dtype=float32)
"""
if hasattr(self.state, "shape") and self.state.shape != size:
self.state = 0
if self._jax:
self._i += 1
self.state = _sample(self.theta, self.sigma, self.state, self.mean, self.std, self._key, self._i, size)
else:
self.state += -self.state * self.theta + self.sigma * np.random.normal(self.mean, self.std, size)
return self.base_scale * self.state
| 3,360 | Python | 34.378947 | 115 | 0.569048 |
Toni-SM/skrl/skrl/resources/noises/jax/__init__.py | from skrl.resources.noises.jax.base import Noise # isort:skip
from skrl.resources.noises.jax.gaussian import GaussianNoise
from skrl.resources.noises.jax.ornstein_uhlenbeck import OrnsteinUhlenbeckNoise
| 205 | Python | 40.199992 | 79 | 0.84878 |
Toni-SM/skrl/skrl/resources/noises/jax/gaussian.py | from typing import Optional, Tuple, Union
from functools import partial
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
from skrl.resources.noises.jax import Noise
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@partial(jax.jit, static_argnames=("shape"))
def _sample(mean, std, key, iterator, shape):
subkey = jax.random.fold_in(key, iterator)
return jax.random.normal(subkey, shape) * std + mean
class GaussianNoise(Noise):
def __init__(self, mean: float, std: float, device: Optional[Union[str, jax.Device]] = None) -> None:
"""Class representing a Gaussian noise
:param mean: Mean of the normal distribution
:type mean: float
:param std: Standard deviation of the normal distribution
:type std: float
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
Example::
>>> noise = GaussianNoise(mean=0, std=1)
"""
super().__init__(device)
if self._jax:
self._i = 0
self._key = config.jax.key
self.mean = jnp.array(mean)
self.std = jnp.array(std)
else:
self.mean = np.array(mean)
self.std = np.array(std)
def sample(self, size: Tuple[int]) -> Union[np.ndarray, jax.Array]:
"""Sample a Gaussian noise
:param size: Shape of the sampled tensor
:type size: tuple or list of int
:return: Sampled noise
:rtype: np.ndarray or jax.Array
Example::
>>> noise.sample((3, 2))
Array([[ 0.01878439, -0.12833427],
[ 0.06494182, 0.12490594],
[ 0.024447 , -0.01174496]], dtype=float32)
>>> x = jax.random.uniform(jax.random.PRNGKey(0), (3, 2))
>>> noise.sample(x.shape)
Array([[ 0.17988093, -1.2289404 ],
[ 0.6218886 , 1.1961104 ],
[ 0.23410667, -0.11247082]], dtype=float32)
"""
if self._jax:
self._i += 1
return _sample(self.mean, self.std, self._key, self._i, size)
return np.random.normal(self.mean, self.std, size)
| 2,395 | Python | 31.821917 | 105 | 0.573278 |
Toni-SM/skrl/skrl/resources/optimizers/jax/__init__.py | from skrl.resources.optimizers.jax.adam import Adam
| 52 | Python | 25.499987 | 51 | 0.846154 |
Toni-SM/skrl/skrl/resources/optimizers/jax/adam.py | from typing import Optional
import functools
import flax
import jax
import optax
from skrl.models.jax import Model
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@functools.partial(jax.jit, static_argnames=("transformation"))
def _step(transformation, grad, state, state_dict):
# optax transform
params, optimizer_state = transformation.update(grad, state, state_dict.params)
# apply transformation
params = optax.apply_updates(state_dict.params, params)
return optimizer_state, state_dict.replace(params=params)
@functools.partial(jax.jit, static_argnames=("transformation"))
def _step_with_scale(transformation, grad, state, state_dict, scale):
# optax transform
params, optimizer_state = transformation.update(grad, state, state_dict.params)
# custom scale
# https://optax.readthedocs.io/en/latest/api.html?#optax.scale
params = jax.tree_util.tree_map(lambda params: scale * params, params)
# apply transformation
params = optax.apply_updates(state_dict.params, params)
return optimizer_state, state_dict.replace(params=params)
class Adam:
def __new__(cls, model: Model, lr: float = 1e-3, grad_norm_clip: float = 0, scale: bool = True) -> "Optimizer":
"""Adam optimizer
Adapted from `Optax's Adam <https://optax.readthedocs.io/en/latest/api.html?#adam>`_
to support custom scale (learning rate)
:param model: Model
:type model: skrl.models.jax.Model
:param lr: Learning rate (default: ``1e-3``)
:type lr: float, optional
:param grad_norm_clip: Clipping coefficient for the norm of the gradients (default: ``0``).
Disabled if less than or equal to zero
:type grad_norm_clip: float, optional
:param scale: Whether to instantiate the optimizer as-is or remove the scaling step (default: ``True``).
Remove the scaling step if a custom learning rate is to be applied during optimization steps
:type scale: bool, optional
:return: Adam optimizer
:rtype: flax.struct.PyTreeNode
Example::
>>> optimizer = Adam(model=policy, lr=5e-4)
>>> # step the optimizer given a computed gradiend (grad)
>>> optimizer = optimizer.step(grad, policy)
# apply custom learning rate during optimization steps
>>> optimizer = Adam(model=policy, lr=5e-4, scale=False)
>>> # step the optimizer given a computed gradiend and an updated learning rate (lr)
>>> optimizer = optimizer.step(grad, policy, lr)
"""
class Optimizer(flax.struct.PyTreeNode):
"""Optimizer
This class is the result of isolating the Optax optimizer,
which is mixed with the model parameters, from Flax's TrainState class
https://flax.readthedocs.io/en/latest/api_reference/flax.training.html#train-state
"""
transformation: optax.GradientTransformation = flax.struct.field(pytree_node=False)
state: optax.OptState = flax.struct.field(pytree_node=True)
@classmethod
def _create(cls, *, transformation, state, **kwargs):
return cls(transformation=transformation, state=state, **kwargs)
def step(self, grad: jax.Array, model: Model, lr: Optional[float] = None) -> "Optimizer":
"""Performs a single optimization step
:param grad: Gradients
:type grad: jax.Array
:param model: Model
:type model: skrl.models.jax.Model
:param lr: Learning rate.
If given, a scale optimization step will be performed
:type lr: float, optional
:return: Optimizer
:rtype: flax.struct.PyTreeNode
"""
if lr is None:
optimizer_state, model.state_dict = _step(self.transformation, grad, self.state, model.state_dict)
else:
optimizer_state, model.state_dict = _step_with_scale(self.transformation, grad, self.state, model.state_dict, -lr)
return self.replace(state=optimizer_state)
# default optax transformation
if scale:
transformation = optax.adam(learning_rate=lr)
# optax transformation without scaling step
else:
transformation = optax.scale_by_adam()
# clip updates using their global norm
if grad_norm_clip > 0:
transformation = optax.chain(optax.clip_by_global_norm(grad_norm_clip), transformation)
return Optimizer._create(transformation=transformation, state=transformation.init(model.state_dict.params))
| 4,818 | Python | 41.646017 | 134 | 0.634703 |
Toni-SM/skrl/skrl/trainers/torch/base.py | from typing import List, Optional, Union
import atexit
import sys
import tqdm
import torch
from skrl import logger
from skrl.agents.torch import Agent
from skrl.envs.wrappers.torch import Wrapper
def generate_equally_spaced_scopes(num_envs: int, num_simultaneous_agents: int) -> List[int]:
"""Generate a list of equally spaced scopes for the agents
:param num_envs: Number of environments
:type num_envs: int
:param num_simultaneous_agents: Number of simultaneous agents
:type num_simultaneous_agents: int
:raises ValueError: If the number of simultaneous agents is greater than the number of environments
:return: List of equally spaced scopes
:rtype: List[int]
"""
scopes = [int(num_envs / num_simultaneous_agents)] * num_simultaneous_agents
if sum(scopes):
scopes[-1] += num_envs - sum(scopes)
else:
raise ValueError(f"The number of simultaneous agents ({num_simultaneous_agents}) is greater than the number of environments ({num_envs})")
return scopes
class Trainer:
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Base class for trainers
:param env: Environment to train on
:type env: skrl.envs.wrappers.torch.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``)
:type cfg: dict, optional
"""
self.cfg = cfg if cfg is not None else {}
self.env = env
self.agents = agents
self.agents_scope = agents_scope if agents_scope is not None else []
# get configuration
self.timesteps = self.cfg.get("timesteps", 0)
self.headless = self.cfg.get("headless", False)
self.disable_progressbar = self.cfg.get("disable_progressbar", False)
self.close_environment_at_exit = self.cfg.get("close_environment_at_exit", True)
self.initial_timestep = 0
# setup agents
self.num_simultaneous_agents = 0
self._setup_agents()
# register environment closing if configured
if self.close_environment_at_exit:
@atexit.register
def close_env():
logger.info("Closing environment")
self.env.close()
logger.info("Environment closed")
def __str__(self) -> str:
"""Generate a string representation of the trainer
:return: Representation of the trainer as string
:rtype: str
"""
string = f"Trainer: {self}"
string += f"\n |-- Number of parallelizable environments: {self.env.num_envs}"
string += f"\n |-- Number of simultaneous agents: {self.num_simultaneous_agents}"
string += "\n |-- Agents and scopes:"
if self.num_simultaneous_agents > 1:
for agent, scope in zip(self.agents, self.agents_scope):
string += f"\n | |-- agent: {type(agent)}"
string += f"\n | | |-- scope: {scope[1] - scope[0]} environments ({scope[0]}:{scope[1]})"
else:
string += f"\n | |-- agent: {type(self.agents)}"
string += f"\n | | |-- scope: {self.env.num_envs} environment(s)"
return string
def _setup_agents(self) -> None:
"""Setup agents for training
:raises ValueError: Invalid setup
"""
# validate agents and their scopes
if type(self.agents) in [tuple, list]:
# single agent
if len(self.agents) == 1:
self.num_simultaneous_agents = 1
self.agents = self.agents[0]
self.agents_scope = [1]
# parallel agents
elif len(self.agents) > 1:
self.num_simultaneous_agents = len(self.agents)
# check scopes
if not len(self.agents_scope):
logger.warning("The agents' scopes are empty, they will be generated as equal as possible")
self.agents_scope = [int(self.env.num_envs / len(self.agents))] * len(self.agents)
if sum(self.agents_scope):
self.agents_scope[-1] += self.env.num_envs - sum(self.agents_scope)
else:
raise ValueError(f"The number of agents ({len(self.agents)}) is greater than the number of parallelizable environments ({self.env.num_envs})")
elif len(self.agents_scope) != len(self.agents):
raise ValueError(f"The number of agents ({len(self.agents)}) doesn't match the number of scopes ({len(self.agents_scope)})")
elif sum(self.agents_scope) != self.env.num_envs:
raise ValueError(f"The scopes ({sum(self.agents_scope)}) don't cover the number of parallelizable environments ({self.env.num_envs})")
# generate agents' scopes
index = 0
for i in range(len(self.agents_scope)):
index += self.agents_scope[i]
self.agents_scope[i] = (index - self.agents_scope[i], index)
else:
raise ValueError("A list of agents is expected")
else:
self.num_simultaneous_agents = 1
def train(self) -> None:
"""Train the agents
:raises NotImplementedError: Not implemented
"""
raise NotImplementedError
def eval(self) -> None:
"""Evaluate the agents
:raises NotImplementedError: Not implemented
"""
raise NotImplementedError
def single_agent_train(self) -> None:
"""Train agent
This method executes the following steps in loop:
- Pre-interaction
- Compute actions
- Interact with the environments
- Render scene
- Record transitions
- Post-interaction
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents == 1, "This method is not allowed for multi-agents"
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with torch.no_grad():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if self.env.num_envs > 1:
states = next_states
else:
if terminated.any() or truncated.any():
with torch.no_grad():
states, infos = self.env.reset()
else:
states = next_states
def single_agent_eval(self) -> None:
"""Evaluate agent
This method executes the following steps in loop:
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents == 1, "This method is not allowed for multi-agents"
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with torch.no_grad():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# write data to TensorBoard
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if self.env.num_envs > 1:
states = next_states
else:
if terminated.any() or truncated.any():
with torch.no_grad():
states, infos = self.env.reset()
else:
states = next_states
def multi_agent_train(self) -> None:
"""Train multi-agents
This method executes the following steps in loop:
- Pre-interaction
- Compute actions
- Interact with the environments
- Render scene
- Record transitions
- Post-interaction
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents > 1, "This method is not allowed for single-agent"
# reset env
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with torch.no_grad():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
shared_next_states = infos.get("shared_states", None)
infos["shared_states"] = shared_states
infos["shared_next_states"] = shared_next_states
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
with torch.no_grad():
if not self.env.agents:
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
else:
states = next_states
shared_states = shared_next_states
def multi_agent_eval(self) -> None:
"""Evaluate multi-agents
This method executes the following steps in loop:
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents > 1, "This method is not allowed for single-agent"
# reset env
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with torch.no_grad():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
shared_next_states = infos.get("shared_states", None)
infos["shared_states"] = shared_states
infos["shared_next_states"] = shared_next_states
# render scene
if not self.headless:
self.env.render()
# write data to TensorBoard
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if not self.env.agents:
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
else:
states = next_states
shared_states = shared_next_states
| 15,410 | Python | 40.539083 | 166 | 0.53355 |
Toni-SM/skrl/skrl/trainers/torch/__init__.py | from skrl.trainers.torch.base import Trainer, generate_equally_spaced_scopes # isort:skip
from skrl.trainers.torch.parallel import ParallelTrainer
from skrl.trainers.torch.sequential import SequentialTrainer
from skrl.trainers.torch.step import StepTrainer
| 259 | Python | 42.333326 | 90 | 0.849421 |
Toni-SM/skrl/skrl/trainers/torch/step.py | from typing import Any, List, Optional, Tuple, Union
import copy
import sys
import tqdm
import torch
from skrl.agents.torch import Agent
from skrl.envs.wrappers.torch import Wrapper
from skrl.trainers.torch import Trainer
# [start-config-dict-torch]
STEP_TRAINER_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
# [end-config-dict-torch]
class StepTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Step-by-step trainer
Train agents by controlling the training/evaluation loop step by step
:param env: Environment to train on
:type env: skrl.envs.wrappers.torch.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``).
See STEP_TRAINER_DEFAULT_CONFIG for default values
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(STEP_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# init agents
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.init(trainer_cfg=self.cfg)
else:
self.agents.init(trainer_cfg=self.cfg)
self._timestep = 0
self._progress = None
self.states = None
def train(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Execute a training iteration
This method executes the following steps once:
- Pre-interaction (sequentially if num_simultaneous_agents > 1)
- Compute actions (sequentially if num_simultaneous_agents > 1)
- Interact with the environments
- Render scene
- Record transitions (sequentially if num_simultaneous_agents > 1)
- Post-interaction (sequentially if num_simultaneous_agents > 1)
- Reset environments
:param timestep: Current timestep (default: ``None``).
If None, the current timestep will be carried by an internal variable
:type timestep: int, optional
:param timesteps: Total number of timesteps (default: ``None``).
If None, the total number of timesteps is obtained from the trainer's config
:type timesteps: int, optional
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
if timestep is None:
self._timestep += 1
timestep = self._timestep
timesteps = self.timesteps if timesteps is None else timesteps
if self._progress is None:
self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout)
self._progress.update(n=1)
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("train")
else:
self.agents.set_running_mode("train")
# reset env
if self.states is None:
self.states, infos = self.env.reset()
if self.num_simultaneous_agents == 1:
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=timesteps)
# compute actions
with torch.no_grad():
actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0]
else:
# pre-interaction
for agent in self.agents:
agent.pre_interaction(timestep=timestep, timesteps=timesteps)
# compute actions
with torch.no_grad():
actions = torch.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
with torch.no_grad():
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
if self.num_simultaneous_agents == 1:
# record the environments' transitions
with torch.no_grad():
self.agents.record_transition(states=self.states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=timesteps)
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=timesteps)
else:
# record the environments' transitions
with torch.no_grad():
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=self.states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=timesteps)
# post-interaction
for agent in self.agents:
agent.post_interaction(timestep=timestep, timesteps=timesteps)
# reset environments
with torch.no_grad():
if terminated.any() or truncated.any():
self.states, infos = self.env.reset()
else:
self.states = next_states
return next_states, rewards, terminated, truncated, infos
def eval(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \
Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]:
"""Evaluate the agents sequentially
This method executes the following steps in loop:
- Compute actions (sequentially if num_simultaneous_agents > 1)
- Interact with the environments
- Render scene
- Reset environments
:param timestep: Current timestep (default: ``None``).
If None, the current timestep will be carried by an internal variable
:type timestep: int, optional
:param timesteps: Total number of timesteps (default: ``None``).
If None, the total number of timesteps is obtained from the trainer's config
:type timesteps: int, optional
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of torch.Tensor and any other info
"""
if timestep is None:
self._timestep += 1
timestep = self._timestep
timesteps = self.timesteps if timesteps is None else timesteps
if self._progress is None:
self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout)
self._progress.update(n=1)
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# reset env
if self.states is None:
self.states, infos = self.env.reset()
with torch.no_grad():
if self.num_simultaneous_agents == 1:
# compute actions
actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0]
else:
# compute actions
actions = torch.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
if self.num_simultaneous_agents == 1:
# write data to TensorBoard
self.agents.record_transition(states=self.states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=timesteps)
super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=timesteps)
else:
# write data to TensorBoard
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=self.states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=timesteps)
super(type(agent), agent).post_interaction(timestep=timestep, timesteps=timesteps)
# reset environments
if terminated.any() or truncated.any():
self.states, infos = self.env.reset()
else:
self.states = next_states
return next_states, rewards, terminated, truncated, infos
| 11,519 | Python | 42.308271 | 126 | 0.537894 |
Toni-SM/skrl/skrl/trainers/torch/parallel.py | from typing import List, Optional, Union
import copy
import sys
import tqdm
import torch
import torch.multiprocessing as mp
from skrl.agents.torch import Agent
from skrl.envs.wrappers.torch import Wrapper
from skrl.trainers.torch import Trainer
# [start-config-dict-torch]
PARALLEL_TRAINER_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
# [end-config-dict-torch]
def fn_processor(process_index, *args):
print(f"[INFO] Processor {process_index}: started")
pipe = args[0][process_index]
queue = args[1][process_index]
barrier = args[2]
scope = args[3][process_index]
trainer_cfg = args[4]
agent = None
_states = None
_actions = None
# wait for the main process to start all the workers
barrier.wait()
while True:
msg = pipe.recv()
task = msg['task']
# terminate process
if task == 'terminate':
break
# initialize agent
elif task == 'init':
agent = queue.get()
agent.init(trainer_cfg=trainer_cfg)
print(f"[INFO] Processor {process_index}: init agent {type(agent).__name__} with scope {scope}")
barrier.wait()
# execute agent's pre-interaction step
elif task == "pre_interaction":
agent.pre_interaction(timestep=msg['timestep'], timesteps=msg['timesteps'])
barrier.wait()
# get agent's actions
elif task == "act":
_states = queue.get()[scope[0]:scope[1]]
with torch.no_grad():
_actions = agent.act(_states, timestep=msg['timestep'], timesteps=msg['timesteps'])[0]
if not _actions.is_cuda:
_actions.share_memory_()
queue.put(_actions)
barrier.wait()
# record agent's experience
elif task == "record_transition":
with torch.no_grad():
agent.record_transition(states=_states,
actions=_actions,
rewards=queue.get()[scope[0]:scope[1]],
next_states=queue.get()[scope[0]:scope[1]],
terminated=queue.get()[scope[0]:scope[1]],
truncated=queue.get()[scope[0]:scope[1]],
infos=queue.get(),
timestep=msg['timestep'],
timesteps=msg['timesteps'])
barrier.wait()
# execute agent's post-interaction step
elif task == "post_interaction":
agent.post_interaction(timestep=msg['timestep'], timesteps=msg['timesteps'])
barrier.wait()
# write data to TensorBoard (evaluation)
elif task == "eval-record_transition-post_interaction":
with torch.no_grad():
agent.record_transition(states=_states,
actions=_actions,
rewards=queue.get()[scope[0]:scope[1]],
next_states=queue.get()[scope[0]:scope[1]],
terminated=queue.get()[scope[0]:scope[1]],
truncated=queue.get()[scope[0]:scope[1]],
infos=queue.get(),
timestep=msg['timestep'],
timesteps=msg['timesteps'])
super(type(agent), agent).post_interaction(timestep=msg['timestep'], timesteps=msg['timesteps'])
barrier.wait()
class ParallelTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Parallel trainer
Train agents in parallel using multiple processes
:param env: Environment to train on
:type env: skrl.envs.wrappers.torch.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``).
See PARALLEL_TRAINER_DEFAULT_CONFIG for default values
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(PARALLEL_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
mp.set_start_method(method='spawn', force=True)
def train(self) -> None:
"""Train the agents in parallel
This method executes the following steps in loop:
- Pre-interaction (parallel)
- Compute actions (in parallel)
- Interact with the environments
- Render scene
- Record transitions (in parallel)
- Post-interaction (in parallel)
- Reset environments
"""
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("train")
else:
self.agents.set_running_mode("train")
# non-simultaneous agents
if self.num_simultaneous_agents == 1:
self.agents.init(trainer_cfg=self.cfg)
# single-agent
if self.env.num_agents == 1:
self.single_agent_train()
# multi-agent
else:
self.multi_agent_train()
return
# initialize multiprocessing variables
queues = []
producer_pipes = []
consumer_pipes = []
barrier = mp.Barrier(self.num_simultaneous_agents + 1)
processes = []
for i in range(self.num_simultaneous_agents):
pipe_read, pipe_write = mp.Pipe(duplex=False)
producer_pipes.append(pipe_write)
consumer_pipes.append(pipe_read)
queues.append(mp.Queue())
# move tensors to shared memory
for agent in self.agents:
if agent.memory is not None:
agent.memory.share_memory()
for model in agent.models.values():
try:
model.share_memory()
except RuntimeError:
pass
# spawn and wait for all processes to start
for i in range(self.num_simultaneous_agents):
process = mp.Process(target=fn_processor,
args=(i, consumer_pipes, queues, barrier, self.agents_scope, self.cfg),
daemon=True)
processes.append(process)
process.start()
barrier.wait()
# initialize agents
for pipe, queue, agent in zip(producer_pipes, queues, self.agents):
pipe.send({'task': 'init'})
queue.put(agent)
barrier.wait()
# reset env
states, infos = self.env.reset()
if not states.is_cuda:
states.share_memory_()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
for pipe in producer_pipes:
pipe.send({"task": "pre_interaction", "timestep": timestep, "timesteps": self.timesteps})
barrier.wait()
# compute actions
with torch.no_grad():
for pipe, queue in zip(producer_pipes, queues):
pipe.send({"task": "act", "timestep": timestep, "timesteps": self.timesteps})
queue.put(states)
barrier.wait()
actions = torch.vstack([queue.get() for queue in queues])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
if not rewards.is_cuda:
rewards.share_memory_()
if not next_states.is_cuda:
next_states.share_memory_()
if not terminated.is_cuda:
terminated.share_memory_()
if not truncated.is_cuda:
truncated.share_memory_()
for pipe, queue in zip(producer_pipes, queues):
pipe.send({"task": "record_transition", "timestep": timestep, "timesteps": self.timesteps})
queue.put(rewards)
queue.put(next_states)
queue.put(terminated)
queue.put(truncated)
queue.put(infos)
barrier.wait()
# post-interaction
for pipe in producer_pipes:
pipe.send({"task": "post_interaction", "timestep": timestep, "timesteps": self.timesteps})
barrier.wait()
# reset environments
with torch.no_grad():
if terminated.any() or truncated.any():
states, infos = self.env.reset()
if not states.is_cuda:
states.share_memory_()
else:
states.copy_(next_states)
# terminate processes
for pipe in producer_pipes:
pipe.send({"task": "terminate"})
# join processes
for process in processes:
process.join()
def eval(self) -> None:
"""Evaluate the agents sequentially
This method executes the following steps in loop:
- Compute actions (in parallel)
- Interact with the environments
- Render scene
- Reset environments
"""
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# non-simultaneous agents
if self.num_simultaneous_agents == 1:
self.agents.init(trainer_cfg=self.cfg)
# single-agent
if self.env.num_agents == 1:
self.single_agent_eval()
# multi-agent
else:
self.multi_agent_eval()
return
# initialize multiprocessing variables
queues = []
producer_pipes = []
consumer_pipes = []
barrier = mp.Barrier(self.num_simultaneous_agents + 1)
processes = []
for i in range(self.num_simultaneous_agents):
pipe_read, pipe_write = mp.Pipe(duplex=False)
producer_pipes.append(pipe_write)
consumer_pipes.append(pipe_read)
queues.append(mp.Queue())
# move tensors to shared memory
for agent in self.agents:
if agent.memory is not None:
agent.memory.share_memory()
for model in agent.models.values():
if model is not None:
try:
model.share_memory()
except RuntimeError:
pass
# spawn and wait for all processes to start
for i in range(self.num_simultaneous_agents):
process = mp.Process(target=fn_processor,
args=(i, consumer_pipes, queues, barrier, self.agents_scope, self.cfg),
daemon=True)
processes.append(process)
process.start()
barrier.wait()
# initialize agents
for pipe, queue, agent in zip(producer_pipes, queues, self.agents):
pipe.send({'task': 'init'})
queue.put(agent)
barrier.wait()
# reset env
states, infos = self.env.reset()
if not states.is_cuda:
states.share_memory_()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with torch.no_grad():
for pipe, queue in zip(producer_pipes, queues):
pipe.send({"task": "act", "timestep": timestep, "timesteps": self.timesteps})
queue.put(states)
barrier.wait()
actions = torch.vstack([queue.get() for queue in queues])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# write data to TensorBoard
if not rewards.is_cuda:
rewards.share_memory_()
if not next_states.is_cuda:
next_states.share_memory_()
if not terminated.is_cuda:
terminated.share_memory_()
if not truncated.is_cuda:
truncated.share_memory_()
for pipe, queue in zip(producer_pipes, queues):
pipe.send({"task": "eval-record_transition-post_interaction",
"timestep": timestep,
"timesteps": self.timesteps})
queue.put(rewards)
queue.put(next_states)
queue.put(terminated)
queue.put(truncated)
queue.put(infos)
barrier.wait()
# reset environments
if terminated.any() or truncated.any():
states, infos = self.env.reset()
if not states.is_cuda:
states.share_memory_()
else:
states.copy_(next_states)
# terminate processes
for pipe in producer_pipes:
pipe.send({"task": "terminate"})
# join processes
for process in processes:
process.join()
| 14,758 | Python | 36.176322 | 131 | 0.521751 |
Toni-SM/skrl/skrl/trainers/torch/sequential.py | from typing import List, Optional, Union
import copy
import sys
import tqdm
import torch
from skrl.agents.torch import Agent
from skrl.envs.wrappers.torch import Wrapper
from skrl.trainers.torch import Trainer
# [start-config-dict-torch]
SEQUENTIAL_TRAINER_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
# [end-config-dict-torch]
class SequentialTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Sequential trainer
Train agents sequentially (i.e., one after the other in each interaction with the environment)
:param env: Environment to train on
:type env: skrl.envs.wrappers.torch.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``).
See SEQUENTIAL_TRAINER_DEFAULT_CONFIG for default values
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# init agents
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.init(trainer_cfg=self.cfg)
else:
self.agents.init(trainer_cfg=self.cfg)
def train(self) -> None:
"""Train the agents sequentially
This method executes the following steps in loop:
- Pre-interaction (sequentially)
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Record transitions (sequentially)
- Post-interaction (sequentially)
- Reset environments
"""
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("train")
else:
self.agents.set_running_mode("train")
# non-simultaneous agents
if self.num_simultaneous_agents == 1:
# single-agent
if self.env.num_agents == 1:
self.single_agent_train()
# multi-agent
else:
self.multi_agent_train()
return
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
for agent in self.agents:
agent.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with torch.no_grad():
actions = torch.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
# post-interaction
for agent in self.agents:
agent.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
with torch.no_grad():
if terminated.any() or truncated.any():
states, infos = self.env.reset()
else:
states = next_states
def eval(self) -> None:
"""Evaluate the agents sequentially
This method executes the following steps in loop:
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Reset environments
"""
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# non-simultaneous agents
if self.num_simultaneous_agents == 1:
# single-agent
if self.env.num_agents == 1:
self.single_agent_eval()
# multi-agent
else:
self.multi_agent_eval()
return
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with torch.no_grad():
actions = torch.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# write data to TensorBoard
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if terminated.any() or truncated.any():
states, infos = self.env.reset()
else:
states = next_states
| 7,732 | Python | 39.276041 | 131 | 0.535049 |
Toni-SM/skrl/skrl/trainers/jax/base.py | from typing import List, Optional, Union
import atexit
import contextlib
import sys
import tqdm
from skrl import logger
from skrl.agents.jax import Agent
from skrl.envs.wrappers.jax import Wrapper
def generate_equally_spaced_scopes(num_envs: int, num_simultaneous_agents: int) -> List[int]:
"""Generate a list of equally spaced scopes for the agents
:param num_envs: Number of environments
:type num_envs: int
:param num_simultaneous_agents: Number of simultaneous agents
:type num_simultaneous_agents: int
:raises ValueError: If the number of simultaneous agents is greater than the number of environments
:return: List of equally spaced scopes
:rtype: List[int]
"""
scopes = [int(num_envs / num_simultaneous_agents)] * num_simultaneous_agents
if sum(scopes):
scopes[-1] += num_envs - sum(scopes)
else:
raise ValueError(f"The number of simultaneous agents ({num_simultaneous_agents}) is greater than the number of environments ({num_envs})")
return scopes
class Trainer:
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Base class for trainers
:param env: Environment to train on
:type env: skrl.envs.wrappers.jax.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``)
:type cfg: dict, optional
"""
self.cfg = cfg if cfg is not None else {}
self.env = env
self.agents = agents
self.agents_scope = agents_scope if agents_scope is not None else []
# get configuration
self.timesteps = self.cfg.get("timesteps", 0)
self.headless = self.cfg.get("headless", False)
self.disable_progressbar = self.cfg.get("disable_progressbar", False)
self.close_environment_at_exit = self.cfg.get("close_environment_at_exit", True)
self.initial_timestep = 0
# setup agents
self.num_simultaneous_agents = 0
self._setup_agents()
# register environment closing if configured
if self.close_environment_at_exit:
@atexit.register
def close_env():
logger.info("Closing environment")
self.env.close()
logger.info("Environment closed")
def __str__(self) -> str:
"""Generate a string representation of the trainer
:return: Representation of the trainer as string
:rtype: str
"""
string = f"Trainer: {self}"
string += f"\n |-- Number of parallelizable environments: {self.env.num_envs}"
string += f"\n |-- Number of simultaneous agents: {self.num_simultaneous_agents}"
string += "\n |-- Agents and scopes:"
if self.num_simultaneous_agents > 1:
for agent, scope in zip(self.agents, self.agents_scope):
string += f"\n | |-- agent: {type(agent)}"
string += f"\n | | |-- scope: {scope[1] - scope[0]} environments ({scope[0]}:{scope[1]})"
else:
string += f"\n | |-- agent: {type(self.agents)}"
string += f"\n | | |-- scope: {self.env.num_envs} environment(s)"
return string
def _setup_agents(self) -> None:
"""Setup agents for training
:raises ValueError: Invalid setup
"""
# validate agents and their scopes
if type(self.agents) in [tuple, list]:
# single agent
if len(self.agents) == 1:
self.num_simultaneous_agents = 1
self.agents = self.agents[0]
self.agents_scope = [1]
# parallel agents
elif len(self.agents) > 1:
self.num_simultaneous_agents = len(self.agents)
# check scopes
if not len(self.agents_scope):
logger.warning("The agents' scopes are empty, they will be generated as equal as possible")
self.agents_scope = [int(self.env.num_envs / len(self.agents))] * len(self.agents)
if sum(self.agents_scope):
self.agents_scope[-1] += self.env.num_envs - sum(self.agents_scope)
else:
raise ValueError(f"The number of agents ({len(self.agents)}) is greater than the number of parallelizable environments ({self.env.num_envs})")
elif len(self.agents_scope) != len(self.agents):
raise ValueError(f"The number of agents ({len(self.agents)}) doesn't match the number of scopes ({len(self.agents_scope)})")
elif sum(self.agents_scope) != self.env.num_envs:
raise ValueError(f"The scopes ({sum(self.agents_scope)}) don't cover the number of parallelizable environments ({self.env.num_envs})")
# generate agents' scopes
index = 0
for i in range(len(self.agents_scope)):
index += self.agents_scope[i]
self.agents_scope[i] = (index - self.agents_scope[i], index)
else:
raise ValueError("A list of agents is expected")
else:
self.num_simultaneous_agents = 1
def train(self) -> None:
"""Train the agents
:raises NotImplementedError: Not implemented
"""
raise NotImplementedError
def eval(self) -> None:
"""Evaluate the agents
:raises NotImplementedError: Not implemented
"""
raise NotImplementedError
def single_agent_train(self) -> None:
"""Train agent
This method executes the following steps in loop:
- Pre-interaction
- Compute actions
- Interact with the environments
- Render scene
- Record transitions
- Post-interaction
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents == 1, "This method is not allowed for multi-agents"
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with contextlib.nullcontext():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
with contextlib.nullcontext():
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if self.env.num_envs > 1:
states = next_states
else:
if terminated.any() or truncated.any():
with contextlib.nullcontext():
states, infos = self.env.reset()
else:
states = next_states
def single_agent_eval(self) -> None:
"""Evaluate agent
This method executes the following steps in loop:
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents == 1, "This method is not allowed for multi-agents"
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with contextlib.nullcontext():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
with contextlib.nullcontext():
# write data to TensorBoard
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if self.env.num_envs > 1:
states = next_states
else:
if terminated.any() or truncated.any():
with contextlib.nullcontext():
states, infos = self.env.reset()
else:
states = next_states
def multi_agent_train(self) -> None:
"""Train multi-agents
This method executes the following steps in loop:
- Pre-interaction
- Compute actions
- Interact with the environments
- Render scene
- Record transitions
- Post-interaction
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents > 1, "This method is not allowed for single-agent"
# reset env
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with contextlib.nullcontext():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
shared_next_states = infos.get("shared_states", None)
infos["shared_states"] = shared_states
infos["shared_next_states"] = shared_next_states
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
with contextlib.nullcontext():
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
with contextlib.nullcontext():
if not self.env.agents:
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
else:
states = next_states
shared_states = shared_next_states
def multi_agent_eval(self) -> None:
"""Evaluate multi-agents
This method executes the following steps in loop:
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Reset environments
"""
assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents"
assert self.env.num_agents > 1, "This method is not allowed for single-agent"
# reset env
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with contextlib.nullcontext():
actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0]
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
shared_next_states = infos.get("shared_states", None)
infos["shared_states"] = shared_states
infos["shared_next_states"] = shared_next_states
# render scene
if not self.headless:
self.env.render()
with contextlib.nullcontext():
# write data to TensorBoard
self.agents.record_transition(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if not self.env.agents:
states, infos = self.env.reset()
shared_states = infos.get("shared_states", None)
else:
states = next_states
shared_states = shared_next_states
| 15,531 | Python | 40.529412 | 166 | 0.540274 |
Toni-SM/skrl/skrl/trainers/jax/__init__.py | from skrl.trainers.jax.base import Trainer, generate_equally_spaced_scopes # isort:skip
from skrl.trainers.jax.sequential import SequentialTrainer
from skrl.trainers.jax.step import StepTrainer
| 196 | Python | 38.399992 | 88 | 0.836735 |
Toni-SM/skrl/skrl/trainers/jax/step.py | from typing import Any, List, Optional, Tuple, Union
import contextlib
import copy
import sys
import tqdm
import jax
import jax.numpy as jnp
import numpy as np
from skrl.agents.jax import Agent
from skrl.envs.wrappers.jax import Wrapper
from skrl.trainers.jax import Trainer
# [start-config-dict-jax]
STEP_TRAINER_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
# [end-config-dict-jax]
class StepTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Step-by-step trainer
Train agents by controlling the training/evaluation loop step by step
:param env: Environment to train on
:type env: skrl.envs.wrappers.jax.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``).
See STEP_TRAINER_DEFAULT_CONFIG for default values
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(STEP_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# init agents
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.init(trainer_cfg=self.cfg)
else:
self.agents.init(trainer_cfg=self.cfg)
self._timestep = 0
self._progress = None
self.states = None
def train(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \
Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array],
Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]:
"""Execute a training iteration
This method executes the following steps once:
- Pre-interaction (sequentially if num_simultaneous_agents > 1)
- Compute actions (sequentially if num_simultaneous_agents > 1)
- Interact with the environments
- Render scene
- Record transitions (sequentially if num_simultaneous_agents > 1)
- Post-interaction (sequentially if num_simultaneous_agents > 1)
- Reset environments
:param timestep: Current timestep (default: ``None``).
If None, the current timestep will be carried by an internal variable
:type timestep: int, optional
:param timesteps: Total number of timesteps (default: ``None``).
If None, the total number of timesteps is obtained from the trainer's config
:type timesteps: int, optional
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of np.ndarray or jax.Array and any other info
"""
if timestep is None:
self._timestep += 1
timestep = self._timestep
timesteps = self.timesteps if timesteps is None else timesteps
if self._progress is None:
self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout)
self._progress.update(n=1)
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("train")
else:
self.agents.set_running_mode("train")
# reset env
if self.states is None:
self.states, infos = self.env.reset()
if self.num_simultaneous_agents == 1:
# pre-interaction
self.agents.pre_interaction(timestep=timestep, timesteps=timesteps)
# compute actions
with contextlib.nullcontext():
actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0]
else:
# pre-interaction
for agent in self.agents:
agent.pre_interaction(timestep=timestep, timesteps=timesteps)
# compute actions
with contextlib.nullcontext():
actions = jnp.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
if self.num_simultaneous_agents == 1:
# record the environments' transitions
with contextlib.nullcontext():
self.agents.record_transition(states=self.states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=timesteps)
# post-interaction
self.agents.post_interaction(timestep=timestep, timesteps=timesteps)
else:
# record the environments' transitions
with contextlib.nullcontext():
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=self.states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=timesteps)
# post-interaction
for agent in self.agents:
agent.post_interaction(timestep=timestep, timesteps=timesteps)
# reset environments
with contextlib.nullcontext():
if terminated.any() or truncated.any():
self.states, infos = self.env.reset()
else:
self.states = next_states
return next_states, rewards, terminated, truncated, infos
def eval(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \
Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array],
Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]:
"""Evaluate the agents sequentially
This method executes the following steps in loop:
- Compute actions (sequentially if num_simultaneous_agents > 1)
- Interact with the environments
- Render scene
- Reset environments
:param timestep: Current timestep (default: ``None``).
If None, the current timestep will be carried by an internal variable
:type timestep: int, optional
:param timesteps: Total number of timesteps (default: ``None``).
If None, the total number of timesteps is obtained from the trainer's config
:type timesteps: int, optional
:return: Observation, reward, terminated, truncated, info
:rtype: tuple of np.ndarray or jax.Array and any other info
"""
if timestep is None:
self._timestep += 1
timestep = self._timestep
timesteps = self.timesteps if timesteps is None else timesteps
if self._progress is None:
self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout)
self._progress.update(n=1)
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# reset env
if self.states is None:
self.states, infos = self.env.reset()
with contextlib.nullcontext():
if self.num_simultaneous_agents == 1:
# compute actions
actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0]
else:
# compute actions
actions = jnp.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
with contextlib.nullcontext():
if self.num_simultaneous_agents == 1:
# write data to TensorBoard
self.agents.record_transition(states=self.states,
actions=actions,
rewards=rewards,
next_states=next_states,
terminated=terminated,
truncated=truncated,
infos=infos,
timestep=timestep,
timesteps=timesteps)
super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=timesteps)
else:
# write data to TensorBoard
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=self.states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=timesteps)
super(type(agent), agent).post_interaction(timestep=timestep, timesteps=timesteps)
# reset environments
if terminated.any() or truncated.any():
self.states, infos = self.env.reset()
else:
self.states = next_states
return next_states, rewards, terminated, truncated, infos
| 11,759 | Python | 42.394834 | 124 | 0.544434 |
Toni-SM/skrl/skrl/trainers/jax/sequential.py | from typing import List, Optional, Union
import contextlib
import copy
import sys
import tqdm
import jax.numpy as jnp
from skrl.agents.jax import Agent
from skrl.envs.wrappers.jax import Wrapper
from skrl.trainers.jax import Trainer
# [start-config-dict-jax]
SEQUENTIAL_TRAINER_DEFAULT_CONFIG = {
"timesteps": 100000, # number of timesteps to train for
"headless": False, # whether to use headless mode (no rendering)
"disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY
"close_environment_at_exit": True, # whether to close the environment on normal program termination
}
# [end-config-dict-jax]
class SequentialTrainer(Trainer):
def __init__(self,
env: Wrapper,
agents: Union[Agent, List[Agent]],
agents_scope: Optional[List[int]] = None,
cfg: Optional[dict] = None) -> None:
"""Sequential trainer
Train agents sequentially (i.e., one after the other in each interaction with the environment)
:param env: Environment to train on
:type env: skrl.envs.wrappers.jax.Wrapper
:param agents: Agents to train
:type agents: Union[Agent, List[Agent]]
:param agents_scope: Number of environments for each agent to train on (default: ``None``)
:type agents_scope: tuple or list of int, optional
:param cfg: Configuration dictionary (default: ``None``).
See SEQUENTIAL_TRAINER_DEFAULT_CONFIG for default values
:type cfg: dict, optional
"""
_cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
agents_scope = agents_scope if agents_scope is not None else []
super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg)
# init agents
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.init(trainer_cfg=self.cfg)
else:
self.agents.init(trainer_cfg=self.cfg)
def train(self) -> None:
"""Train the agents sequentially
This method executes the following steps in loop:
- Pre-interaction (sequentially)
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Record transitions (sequentially)
- Post-interaction (sequentially)
- Reset environments
"""
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("train")
else:
self.agents.set_running_mode("train")
# non-simultaneous agents
if self.num_simultaneous_agents == 1:
# single-agent
if self.env.num_agents == 1:
self.single_agent_train()
# multi-agent
else:
self.multi_agent_train()
return
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# pre-interaction
for agent in self.agents:
agent.pre_interaction(timestep=timestep, timesteps=self.timesteps)
# compute actions
with contextlib.nullcontext():
actions = jnp.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
# record the environments' transitions
with contextlib.nullcontext():
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
# post-interaction
for agent in self.agents:
agent.post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
with contextlib.nullcontext():
if terminated.any() or truncated.any():
states, infos = self.env.reset()
else:
states = next_states
def eval(self) -> None:
"""Evaluate the agents sequentially
This method executes the following steps in loop:
- Compute actions (sequentially)
- Interact with the environments
- Render scene
- Reset environments
"""
# set running mode
if self.num_simultaneous_agents > 1:
for agent in self.agents:
agent.set_running_mode("eval")
else:
self.agents.set_running_mode("eval")
# non-simultaneous agents
if self.num_simultaneous_agents == 1:
# single-agent
if self.env.num_agents == 1:
self.single_agent_eval()
# multi-agent
else:
self.multi_agent_eval()
return
# reset env
states, infos = self.env.reset()
for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout):
# compute actions
with contextlib.nullcontext():
actions = jnp.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \
for agent, scope in zip(self.agents, self.agents_scope)])
# step the environments
next_states, rewards, terminated, truncated, infos = self.env.step(actions)
# render scene
if not self.headless:
self.env.render()
with contextlib.nullcontext():
# write data to TensorBoard
for agent, scope in zip(self.agents, self.agents_scope):
agent.record_transition(states=states[scope[0]:scope[1]],
actions=actions[scope[0]:scope[1]],
rewards=rewards[scope[0]:scope[1]],
next_states=next_states[scope[0]:scope[1]],
terminated=terminated[scope[0]:scope[1]],
truncated=truncated[scope[0]:scope[1]],
infos=infos,
timestep=timestep,
timesteps=self.timesteps)
super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps)
# reset environments
if terminated.any() or truncated.any():
states, infos = self.env.reset()
else:
states = next_states
| 7,810 | Python | 39.05641 | 131 | 0.540973 |
Toni-SM/skrl/skrl/models/torch/base.py | from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import collections
import gym
import gymnasium
import numpy as np
import torch
from skrl import logger
class Model(torch.nn.Module):
def __init__(self,
observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
device: Optional[Union[str, torch.device]] = None) -> None:
"""Base class representing a function approximator
The following properties are defined:
- ``device`` (torch.device): Device to be used for the computations
- ``observation_space`` (int, sequence of int, gym.Space, gymnasium.Space): Observation/state space
- ``action_space`` (int, sequence of int, gym.Space, gymnasium.Space): Action space
- ``num_observations`` (int): Number of elements in the observation/state space
- ``num_actions`` (int): Number of elements in the action space
:param observation_space: Observation/state space or shape.
The ``num_observations`` property will contain the size of that space
:type observation_space: int, sequence of int, gym.Space, gymnasium.Space
:param action_space: Action space or shape.
The ``num_actions`` property will contain the size of that space
:type action_space: int, sequence of int, gym.Space, gymnasium.Space
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
Custom models should override the ``act`` method::
import torch
from skrl.models.torch import Model
class CustomModel(Model):
def __init__(self, observation_space, action_space, device="cuda:0"):
Model.__init__(self, observation_space, action_space, device)
self.layer_1 = nn.Linear(self.num_observations, 64)
self.layer_2 = nn.Linear(64, self.num_actions)
def act(self, inputs, role=""):
x = F.relu(self.layer_1(inputs["states"]))
x = F.relu(self.layer_2(x))
return x, None, {}
"""
super(Model, self).__init__()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device)
self.observation_space = observation_space
self.action_space = action_space
self.num_observations = None if observation_space is None else self._get_space_size(observation_space)
self.num_actions = None if action_space is None else self._get_space_size(action_space)
self._random_distribution = None
def _get_space_size(self,
space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
number_of_elements: bool = True) -> int:
"""Get the size (number of elements) of a space
:param space: Space or shape from which to obtain the number of elements
:type space: int, sequence of int, gym.Space, or gymnasium.Space
:param number_of_elements: Whether the number of elements occupied by the space is returned (default: ``True``).
If ``False``, the shape of the space is returned.
It only affects Discrete and MultiDiscrete spaces
:type number_of_elements: bool, optional
:raises ValueError: If the space is not supported
:return: Size of the space (number of elements)
:rtype: int
Example::
# from int
>>> model._get_space_size(2)
2
# from sequence of int
>>> model._get_space_size([2, 3])
6
# Box space
>>> space = gym.spaces.Box(low=-1, high=1, shape=(2, 3))
>>> model._get_space_size(space)
6
# Discrete space
>>> space = gym.spaces.Discrete(4)
>>> model._get_space_size(space)
4
>>> model._get_space_size(space, number_of_elements=False)
1
# MultiDiscrete space
>>> space = gym.spaces.MultiDiscrete([5, 3, 2])
>>> model._get_space_size(space)
10
>>> model._get_space_size(space, number_of_elements=False)
3
# Dict space
>>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)),
... 'b': gym.spaces.Discrete(4)})
>>> model._get_space_size(space)
10
>>> model._get_space_size(space, number_of_elements=False)
7
"""
size = None
if type(space) in [int, float]:
size = space
elif type(space) in [tuple, list]:
size = np.prod(space)
elif issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
if number_of_elements:
size = space.n
else:
size = 1
elif issubclass(type(space), gym.spaces.MultiDiscrete):
if number_of_elements:
size = np.sum(space.nvec)
else:
size = space.nvec.shape[0]
elif issubclass(type(space), gym.spaces.Box):
size = np.prod(space.shape)
elif issubclass(type(space), gym.spaces.Dict):
size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces])
elif issubclass(type(space), gymnasium.Space):
if issubclass(type(space), gymnasium.spaces.Discrete):
if number_of_elements:
size = space.n
else:
size = 1
elif issubclass(type(space), gymnasium.spaces.MultiDiscrete):
if number_of_elements:
size = np.sum(space.nvec)
else:
size = space.nvec.shape[0]
elif issubclass(type(space), gymnasium.spaces.Box):
size = np.prod(space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces])
if size is None:
raise ValueError(f"Space type {type(space)} not supported")
return int(size)
def tensor_to_space(self,
tensor: torch.Tensor,
space: Union[gym.Space, gymnasium.Space],
start: int = 0) -> Union[torch.Tensor, dict]:
"""Map a flat tensor to a Gym/Gymnasium space
The mapping is done in the following way:
- Tensors belonging to Discrete spaces are returned without modification
- Tensors belonging to Box spaces are reshaped to the corresponding space shape
keeping the first dimension (number of samples) as they are
- Tensors belonging to Dict spaces are mapped into a dictionary with the same keys as the original space
:param tensor: Tensor to map from
:type tensor: torch.Tensor
:param space: Space to map the tensor to
:type space: gym.Space or gymnasium.Space
:param start: Index of the first element of the tensor to map (default: ``0``)
:type start: int, optional
:raises ValueError: If the space is not supported
:return: Mapped tensor or dictionary
:rtype: torch.Tensor or dict
Example::
>>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)),
... 'b': gym.spaces.Discrete(4)})
>>> tensor = torch.tensor([[-0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 2]])
>>>
>>> model.tensor_to_space(tensor, space)
{'a': tensor([[[-0.3000, -0.2000, -0.1000],
[ 0.1000, 0.2000, 0.3000]]]),
'b': tensor([[2.]])}
"""
if issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
return tensor
elif issubclass(type(space), gym.spaces.Box):
return tensor.view(tensor.shape[0], *space.shape)
elif issubclass(type(space), gym.spaces.Dict):
output = {}
for k in sorted(space.keys()):
end = start + self._get_space_size(space[k], number_of_elements=False)
output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end)
start = end
return output
else:
if issubclass(type(space), gymnasium.spaces.Discrete):
return tensor
elif issubclass(type(space), gymnasium.spaces.Box):
return tensor.view(tensor.shape[0], *space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
output = {}
for k in sorted(space.keys()):
end = start + self._get_space_size(space[k], number_of_elements=False)
output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end)
start = end
return output
raise ValueError(f"Space type {type(space)} not supported")
def random_act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, None, Mapping[str, Union[torch.Tensor, Any]]]:
"""Act randomly according to the action space
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises NotImplementedError: Unsupported action space
:return: Model output. The first component is the action to be taken by the agent
:rtype: tuple of torch.Tensor, None, and dict
"""
# discrete action space (Discrete)
if issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete):
return torch.randint(self.action_space.n, (inputs["states"].shape[0], 1), device=self.device), None, {}
# continuous action space (Box)
elif issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box):
if self._random_distribution is None:
self._random_distribution = torch.distributions.uniform.Uniform(
low=torch.tensor(self.action_space.low[0], device=self.device, dtype=torch.float32),
high=torch.tensor(self.action_space.high[0], device=self.device, dtype=torch.float32))
return self._random_distribution.sample(sample_shape=(inputs["states"].shape[0], self.num_actions)), None, {}
else:
raise NotImplementedError(f"Action space type ({type(self.action_space)}) not supported")
def init_parameters(self, method_name: str = "normal_", *args, **kwargs) -> None:
"""Initialize the model parameters according to the specified method name
Method names are from the `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ module.
Allowed method names are *uniform_*, *normal_*, *constant_*, etc.
:param method_name: `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ method name (default: ``"normal_"``)
:type method_name: str, optional
:param args: Positional arguments of the method to be called
:type args: tuple, optional
:param kwargs: Key-value arguments of the method to be called
:type kwargs: dict, optional
Example::
# initialize all parameters with an orthogonal distribution with a gain of 0.5
>>> model.init_parameters("orthogonal_", gain=0.5)
# initialize all parameters as a sparse matrix with a sparsity of 0.1
>>> model.init_parameters("sparse_", sparsity=0.1)
"""
for parameters in self.parameters():
exec(f"torch.nn.init.{method_name}(parameters, *args, **kwargs)")
def init_weights(self, method_name: str = "orthogonal_", *args, **kwargs) -> None:
"""Initialize the model weights according to the specified method name
Method names are from the `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ module.
Allowed method names are *uniform_*, *normal_*, *constant_*, etc.
The following layers will be initialized:
- torch.nn.Linear
:param method_name: `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ method name (default: ``"orthogonal_"``)
:type method_name: str, optional
:param args: Positional arguments of the method to be called
:type args: tuple, optional
:param kwargs: Key-value arguments of the method to be called
:type kwargs: dict, optional
Example::
# initialize all weights with uniform distribution in range [-0.1, 0.1]
>>> model.init_weights(method_name="uniform_", a=-0.1, b=0.1)
# initialize all weights with normal distribution with mean 0 and standard deviation 0.25
>>> model.init_weights(method_name="normal_", mean=0.0, std=0.25)
"""
def _update_weights(module, method_name, args, kwargs):
for layer in module:
if isinstance(layer, torch.nn.Sequential):
_update_weights(layer, method_name, args, kwargs)
elif isinstance(layer, torch.nn.Linear):
exec(f"torch.nn.init.{method_name}(layer.weight, *args, **kwargs)")
_update_weights(self.children(), method_name, args, kwargs)
def init_biases(self, method_name: str = "constant_", *args, **kwargs) -> None:
"""Initialize the model biases according to the specified method name
Method names are from the `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ module.
Allowed method names are *uniform_*, *normal_*, *constant_*, etc.
The following layers will be initialized:
- torch.nn.Linear
:param method_name: `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ method name (default: ``"constant_"``)
:type method_name: str, optional
:param args: Positional arguments of the method to be called
:type args: tuple, optional
:param kwargs: Key-value arguments of the method to be called
:type kwargs: dict, optional
Example::
# initialize all biases with a constant value (0)
>>> model.init_biases(method_name="constant_", val=0)
# initialize all biases with normal distribution with mean 0 and standard deviation 0.25
>>> model.init_biases(method_name="normal_", mean=0.0, std=0.25)
"""
def _update_biases(module, method_name, args, kwargs):
for layer in module:
if isinstance(layer, torch.nn.Sequential):
_update_biases(layer, method_name, args, kwargs)
elif isinstance(layer, torch.nn.Linear):
exec(f"torch.nn.init.{method_name}(layer.bias, *args, **kwargs)")
_update_biases(self.children(), method_name, args, kwargs)
def get_specification(self) -> Mapping[str, Any]:
"""Returns the specification of the model
The following keys are used by the agents for initialization:
- ``"rnn"``: Recurrent Neural Network (RNN) specification for RNN, LSTM and GRU layers/cells
- ``"sizes"``: List of RNN shapes (number of layers, number of environments, number of features in the RNN state).
There must be as many tuples as there are states in the recurrent layer/cell. E.g., LSTM has 2 states (hidden and cell).
:return: Dictionary containing advanced specification of the model
:rtype: dict
Example::
# model with a LSTM layer.
# - number of layers: 1
# - number of environments: 4
# - number of features in the RNN state: 64
>>> model.get_specification()
{'rnn': {'sizes': [(1, 4, 64), (1, 4, 64)]}}
"""
return {}
def forward(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Forward pass of the model
This method calls the ``.act()`` method and returns its outputs
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function for stochastic models
or None for deterministic models. The third component is a dictionary containing extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
"""
return self.act(inputs, role)
def compute(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[Union[torch.Tensor, Mapping[str, Union[torch.Tensor, Any]]]]:
"""Define the computation performed (to be implemented by the inheriting classes) by the models
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises NotImplementedError: Child class must implement this method
:return: Computation performed by the models
:rtype: tuple of torch.Tensor and dict
"""
raise NotImplementedError("The computation performed by the models (.compute()) is not implemented")
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act according to the specified behavior (to be implemented by the inheriting classes)
Agents will call this method to obtain the decision to be taken given the state of the environment.
This method is currently implemented by the helper models (**GaussianModel**, etc.).
The classes that inherit from the latter must only implement the ``.compute()`` method
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises NotImplementedError: Child class must implement this method
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function for stochastic models
or None for deterministic models. The third component is a dictionary containing extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
"""
logger.warning("Make sure to place Mixins before Model during model definition")
raise NotImplementedError("The action to be taken by the agent (.act()) is not implemented")
def set_mode(self, mode: str) -> None:
"""Set the model mode (training or evaluation)
:param mode: Mode: ``"train"`` for training or ``"eval"`` for evaluation.
See `torch.nn.Module.train <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.train>`_
:type mode: str
:raises ValueError: If the mode is not ``"train"`` or ``"eval"``
"""
if mode == "train":
self.train(True)
elif mode == "eval":
self.train(False)
else:
raise ValueError("Invalid mode. Use 'train' for training or 'eval' for evaluation")
def save(self, path: str, state_dict: Optional[dict] = None) -> None:
"""Save the model to the specified path
:param path: Path to save the model to
:type path: str
:param state_dict: State dictionary to save (default: ``None``).
If None, the model's state_dict will be saved
:type state_dict: dict, optional
Example::
# save the current model to the specified path
>>> model.save("/tmp/model.pt")
# save an older version of the model to the specified path
>>> old_state_dict = copy.deepcopy(model.state_dict())
>>> # ...
>>> model.save("/tmp/model.pt", old_state_dict)
"""
torch.save(self.state_dict() if state_dict is None else state_dict, path)
def load(self, path: str) -> None:
"""Load the model from the specified path
The final storage device is determined by the constructor of the model
:param path: Path to load the model from
:type path: str
Example::
# load the model onto the CPU
>>> model = Model(observation_space, action_space, device="cpu")
>>> model.load("model.pt")
# load the model onto the GPU 1
>>> model = Model(observation_space, action_space, device="cuda:1")
>>> model.load("model.pt")
"""
self.load_state_dict(torch.load(path, map_location=self.device))
self.eval()
def migrate(self,
state_dict: Optional[Mapping[str, torch.Tensor]] = None,
path: Optional[str] = None,
name_map: Mapping[str, str] = {},
auto_mapping: bool = True,
verbose: bool = False) -> bool:
"""Migrate the specified extrernal model's state dict to the current model
The final storage device is determined by the constructor of the model
Only one of ``state_dict`` or ``path`` can be specified.
The ``path`` parameter allows automatic loading the ``state_dict`` only from files generated
by the *rl_games* and *stable-baselines3* libraries at the moment
For ambiguous models (where 2 or more parameters, for source or current model, have equal shape)
it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully
:param state_dict: External model's state dict to migrate from (default: ``None``)
:type state_dict: Mapping[str, torch.Tensor], optional
:param path: Path to the external checkpoint to migrate from (default: ``None``)
:type path: str, optional
:param name_map: Name map to use for the migration (default: ``{}``).
Keys are the current parameter names and values are the external parameter names
:type name_map: Mapping[str, str], optional
:param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``)
:type auto_mapping: bool, optional
:param verbose: Show model names and migration (default: ``False``)
:type verbose: bool, optional
:raises ValueError: If neither or both of ``state_dict`` and ``path`` parameters have been set
:raises ValueError: If the correct file type cannot be identified from the ``path`` parameter
:return: True if the migration was successful, False otherwise.
Migration is successful if all parameters of the current model are found in the external model
:rtype: bool
Example::
# migrate a rl_games checkpoint with unambiguous state_dict
>>> model.migrate(path="./runs/Ant/nn/Ant.pth")
True
# migrate a rl_games checkpoint with ambiguous state_dict
>>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", verbose=False)
[skrl:WARNING] Ambiguous match for log_std_parameter <- [value_mean_std.running_mean, value_mean_std.running_var, a2c_network.sigma]
[skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias]
[skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias]
[skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight]
[skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias]
[skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias]
[skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias]
False
>>> name_map = {"log_std_parameter": "a2c_network.sigma",
... "net.0.bias": "a2c_network.actor_mlp.0.bias",
... "net.2.bias": "a2c_network.actor_mlp.2.bias",
... "net.4.weight": "a2c_network.mu.weight",
... "net.4.bias": "a2c_network.mu.bias"}
>>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", name_map=name_map, verbose=True)
[skrl:INFO] Models
[skrl:INFO] |-- current: 7 items
[skrl:INFO] | |-- log_std_parameter : torch.Size([1])
[skrl:INFO] | |-- net.0.weight : torch.Size([32, 4])
[skrl:INFO] | |-- net.0.bias : torch.Size([32])
[skrl:INFO] | |-- net.2.weight : torch.Size([32, 32])
[skrl:INFO] | |-- net.2.bias : torch.Size([32])
[skrl:INFO] | |-- net.4.weight : torch.Size([1, 32])
[skrl:INFO] | |-- net.4.bias : torch.Size([1])
[skrl:INFO] |-- source: 15 items
[skrl:INFO] | |-- value_mean_std.running_mean : torch.Size([1])
[skrl:INFO] | |-- value_mean_std.running_var : torch.Size([1])
[skrl:INFO] | |-- value_mean_std.count : torch.Size([])
[skrl:INFO] | |-- running_mean_std.running_mean : torch.Size([4])
[skrl:INFO] | |-- running_mean_std.running_var : torch.Size([4])
[skrl:INFO] | |-- running_mean_std.count : torch.Size([])
[skrl:INFO] | |-- a2c_network.sigma : torch.Size([1])
[skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : torch.Size([32, 4])
[skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : torch.Size([32])
[skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : torch.Size([32, 32])
[skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : torch.Size([32])
[skrl:INFO] | |-- a2c_network.value.weight : torch.Size([1, 32])
[skrl:INFO] | |-- a2c_network.value.bias : torch.Size([1])
[skrl:INFO] | |-- a2c_network.mu.weight : torch.Size([1, 32])
[skrl:INFO] | |-- a2c_network.mu.bias : torch.Size([1])
[skrl:INFO] Migration
[skrl:INFO] |-- map: log_std_parameter <- a2c_network.sigma
[skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight
[skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias
[skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight
[skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias
[skrl:INFO] |-- map: net.4.weight <- a2c_network.mu.weight
[skrl:INFO] |-- map: net.4.bias <- a2c_network.mu.bias
False
# migrate a stable-baselines3 checkpoint with unambiguous state_dict
>>> model.migrate(path="./ddpg_pendulum.zip")
True
# migrate from any exported model by loading its state_dict (unambiguous state_dict)
>>> state_dict = torch.load("./external_model.pt")
>>> model.migrate(state_dict=state_dict)
True
"""
if (state_dict is not None) + (path is not None) != 1:
raise ValueError("Exactly one of state_dict or path may be specified")
# load state_dict from path
if path is not None:
state_dict = {}
# rl_games checkpoint
if path.endswith(".pt") or path.endswith(".pth"):
checkpoint = torch.load(path, map_location=self.device)
if type(checkpoint) is dict:
state_dict = checkpoint.get("model", {})
# stable-baselines3
elif path.endswith(".zip"):
import zipfile
try:
archive = zipfile.ZipFile(path, 'r')
with archive.open('policy.pth', mode="r") as file:
state_dict = torch.load(file, map_location=self.device)
except KeyError as e:
logger.warning(str(e))
state_dict = {}
else:
raise ValueError("Cannot identify file type")
# show state_dict
if verbose:
logger.info("Models")
logger.info(f" |-- current: {len(self.state_dict().keys())} items")
for name, tensor in self.state_dict().items():
logger.info(f" | |-- {name} : {list(tensor.shape)}")
logger.info(f" |-- source: {len(state_dict.keys())} items")
for name, tensor in state_dict.items():
logger.info(f" | |-- {name} : {list(tensor.shape)}")
logger.info("Migration")
# migrate the state_dict to current model
new_state_dict = collections.OrderedDict()
match_counter = collections.defaultdict(list)
used_counter = collections.defaultdict(list)
for name, tensor in self.state_dict().items():
for external_name, external_tensor in state_dict.items():
# mapped names
if name_map.get(name, "") == external_name:
if tensor.shape == external_tensor.shape:
new_state_dict[name] = external_tensor
match_counter[name].append(external_name)
used_counter[external_name].append(name)
if verbose:
logger.info(f" |-- map: {name} <- {external_name}")
break
else:
logger.warning(f"Shape mismatch for {name} <- {external_name} : {tensor.shape} != {external_tensor.shape}")
# auto-mapped names
if auto_mapping and name not in name_map:
if tensor.shape == external_tensor.shape:
if name.endswith(".weight"):
if external_name.endswith(".weight"):
new_state_dict[name] = external_tensor
match_counter[name].append(external_name)
used_counter[external_name].append(name)
if verbose:
logger.info(f" |-- auto: {name} <- {external_name}")
elif name.endswith(".bias"):
if external_name.endswith(".bias"):
new_state_dict[name] = external_tensor
match_counter[name].append(external_name)
used_counter[external_name].append(name)
if verbose:
logger.info(f" |-- auto: {name} <- {external_name}")
else:
if not external_name.endswith(".weight") and not external_name.endswith(".bias"):
new_state_dict[name] = external_tensor
match_counter[name].append(external_name)
used_counter[external_name].append(name)
if verbose:
logger.info(f" |-- auto: {name} <- {external_name}")
# show ambiguous matches
status = True
for name, tensor in self.state_dict().items():
if len(match_counter.get(name, [])) > 1:
logger.warning("Ambiguous match for {} <- [{}]".format(name, ", ".join(match_counter.get(name, []))))
status = False
# show missing matches
for name, tensor in self.state_dict().items():
if not match_counter.get(name, []):
logger.warning(f"Missing match for {name}")
status = False
# show multiple uses
for name, tensor in state_dict.items():
if len(used_counter.get(name, [])) > 1:
logger.warning("Multiple use of {} -> [{}]".format(name, ", ".join(used_counter.get(name, []))))
status = False
# load new state dict
self.load_state_dict(new_state_dict, strict=False)
self.eval()
return status
def freeze_parameters(self, freeze: bool = True) -> None:
"""Freeze or unfreeze internal parameters
- Freeze: disable gradient computation (``parameters.requires_grad = False``)
- Unfreeze: enable gradient computation (``parameters.requires_grad = True``)
:param freeze: Freeze the internal parameters if True, otherwise unfreeze them (default: ``True``)
:type freeze: bool, optional
Example::
# freeze model parameters
>>> model.freeze_parameters(True)
# unfreeze model parameters
>>> model.freeze_parameters(False)
"""
for parameters in self.parameters():
parameters.requires_grad = not freeze
def update_parameters(self, model: torch.nn.Module, polyak: float = 1) -> None:
"""Update internal parameters by hard or soft (polyak averaging) update
- Hard update: :math:`\\theta = \\theta_{net}`
- Soft (polyak averaging) update: :math:`\\theta = (1 - \\rho) \\theta + \\rho \\theta_{net}`
:param model: Model used to update the internal parameters
:type model: torch.nn.Module (skrl.models.torch.Model)
:param polyak: Polyak hyperparameter between 0 and 1 (default: ``1``).
A hard update is performed when its value is 1
:type polyak: float, optional
Example::
# hard update (from source model)
>>> model.update_parameters(source_model)
# soft update (from source model)
>>> model.update_parameters(source_model, polyak=0.005)
"""
with torch.no_grad():
# hard update
if polyak == 1:
for parameters, model_parameters in zip(self.parameters(), model.parameters()):
parameters.data.copy_(model_parameters.data)
# soft update (use in-place operations to avoid creating new parameters)
else:
for parameters, model_parameters in zip(self.parameters(), model.parameters()):
parameters.data.mul_(1 - polyak)
parameters.data.add_(polyak * model_parameters.data)
| 36,772 | Python | 48.293566 | 144 | 0.571794 |
Toni-SM/skrl/skrl/models/torch/tabular.py | from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import torch
from skrl.models.torch import Model
class TabularMixin:
def __init__(self, num_envs: int = 1, role: str = "") -> None:
"""Tabular mixin model
:param num_envs: Number of environments (default: 1)
:type num_envs: int, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
# define the model
>>> import torch
>>> from skrl.models.torch import Model, TabularMixin
>>>
>>> class GreedyPolicy(TabularMixin, Model):
... def __init__(self, observation_space, action_space, device="cuda:0", num_envs=1):
... Model.__init__(self, observation_space, action_space, device)
... TabularMixin.__init__(self, num_envs)
...
... self.table = torch.ones((num_envs, self.num_observations, self.num_actions),
... dtype=torch.float32, device=self.device)
...
... def compute(self, inputs, role):
... actions = torch.argmax(self.table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]],
... dim=-1, keepdim=True).view(-1,1)
... return actions, {}
...
>>> # given an observation_space: gym.spaces.Discrete with n=100
>>> # and an action_space: gym.spaces.Discrete with n=5
>>> model = GreedyPolicy(observation_space, action_space, num_envs=1)
>>>
>>> print(model)
GreedyPolicy(
(table): Tensor(shape=[1, 100, 5])
)
"""
self.num_envs = num_envs
def __repr__(self) -> str:
"""String representation of an object as torch.nn.Module
"""
lines = []
for name in self._get_tensor_names():
tensor = getattr(self, name)
lines.append(f"({name}): {tensor.__class__.__name__}(shape={list(tensor.shape)})")
main_str = self.__class__.__name__ + '('
if lines:
main_str += "\n {}\n".format("\n ".join(lines))
main_str += ')'
return main_str
def _get_tensor_names(self) -> Sequence[str]:
"""Get the names of the tensors that the model is using
:return: Tensor names
:rtype: sequence of str
"""
tensors = []
for attr in dir(self):
if not attr.startswith("__") and issubclass(type(getattr(self, attr)), torch.Tensor):
tensors.append(attr)
return sorted(tensors)
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is ``None``. The third component is a dictionary containing extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
Example::
>>> # given a batch of sample states with shape (1, 100)
>>> actions, _, outputs = model.act({"states": states})
>>> print(actions[0], outputs)
tensor([[3]], device='cuda:0') {}
"""
actions, outputs = self.compute(inputs, role)
return actions, None, outputs
def table(self) -> torch.Tensor:
"""Return the Q-table
:return: Q-table
:rtype: torch.Tensor
Example::
>>> output = model.table()
>>> print(output.shape)
torch.Size([1, 100, 5])
"""
return self.q_table
def to(self, *args, **kwargs) -> Model:
"""Move the model to a different device
:param args: Arguments to pass to the method
:type args: tuple
:param kwargs: Keyword arguments to pass to the method
:type kwargs: dict
:return: Model moved to the specified device
:rtype: Model
"""
Model.to(self, *args, **kwargs)
for name in self._get_tensor_names():
setattr(self, name, getattr(self, name).to(*args, **kwargs))
return self
def state_dict(self, *args, **kwargs) -> Mapping:
"""Returns a dictionary containing a whole state of the module
:return: A dictionary containing a whole state of the module
:rtype: dict
"""
_state_dict = {name: getattr(self, name) for name in self._get_tensor_names()}
Model.state_dict(self, destination=_state_dict)
return _state_dict
def load_state_dict(self, state_dict: Mapping, strict: bool = True) -> None:
"""Copies parameters and buffers from state_dict into this module and its descendants
:param state_dict: A dict containing parameters and persistent buffers
:type state_dict: dict
:param strict: Whether to strictly enforce that the keys in state_dict match the keys
returned by this module's state_dict() function (default: ``True``)
:type strict: bool, optional
"""
Model.load_state_dict(self, state_dict, strict=False)
for name, tensor in state_dict.items():
if hasattr(self, name) and isinstance(getattr(self, name), torch.Tensor):
_tensor = getattr(self, name)
if isinstance(_tensor, torch.Tensor):
if _tensor.shape == tensor.shape and _tensor.dtype == tensor.dtype:
setattr(self, name, tensor)
else:
raise ValueError(f"Tensor shape ({_tensor.shape} vs {tensor.shape}) or dtype ({_tensor.dtype} vs {tensor.dtype}) mismatch")
else:
raise ValueError(f"{name} is not a tensor of {self.__class__.__name__}")
def save(self, path: str, state_dict: Optional[dict] = None) -> None:
"""Save the model to the specified path
:param path: Path to save the model to
:type path: str
:param state_dict: State dictionary to save (default: ``None``).
If None, the model's state_dict will be saved
:type state_dict: dict, optional
Example::
# save the current model to the specified path
>>> model.save("/tmp/model.pt")
"""
# TODO: save state_dict
torch.save({name: getattr(self, name) for name in self._get_tensor_names()}, path)
def load(self, path: str) -> None:
"""Load the model from the specified path
The final storage device is determined by the constructor of the model
:param path: Path to load the model from
:type path: str
:raises ValueError: If the models are not compatible
Example::
# load the model onto the CPU
>>> model = Model(observation_space, action_space, device="cpu")
>>> model.load("model.pt")
# load the model onto the GPU 1
>>> model = Model(observation_space, action_space, device="cuda:1")
>>> model.load("model.pt")
"""
tensors = torch.load(path)
for name, tensor in tensors.items():
if hasattr(self, name) and isinstance(getattr(self, name), torch.Tensor):
_tensor = getattr(self, name)
if isinstance(_tensor, torch.Tensor):
if _tensor.shape == tensor.shape and _tensor.dtype == tensor.dtype:
setattr(self, name, tensor)
else:
raise ValueError(f"Tensor shape ({_tensor.shape} vs {tensor.shape}) or dtype ({_tensor.dtype} vs {tensor.dtype}) mismatch")
else:
raise ValueError(f"{name} is not a tensor of {self.__class__.__name__}")
| 8,489 | Python | 39.428571 | 147 | 0.552715 |
Toni-SM/skrl/skrl/models/torch/__init__.py | from skrl.models.torch.base import Model # isort:skip
from skrl.models.torch.categorical import CategoricalMixin
from skrl.models.torch.deterministic import DeterministicMixin
from skrl.models.torch.gaussian import GaussianMixin
from skrl.models.torch.multicategorical import MultiCategoricalMixin
from skrl.models.torch.multivariate_gaussian import MultivariateGaussianMixin
from skrl.models.torch.tabular import TabularMixin
| 429 | Python | 46.777773 | 77 | 0.869464 |
Toni-SM/skrl/skrl/models/torch/deterministic.py | from typing import Any, Mapping, Tuple, Union
import gym
import gymnasium
import torch
class DeterministicMixin:
def __init__(self, clip_actions: bool = False, role: str = "") -> None:
"""Deterministic mixin model (deterministic model)
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``)
:type clip_actions: bool, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
# define the model
>>> import torch
>>> import torch.nn as nn
>>> from skrl.models.torch import Model, DeterministicMixin
>>>
>>> class Value(DeterministicMixin, Model):
... def __init__(self, observation_space, action_space, device="cuda:0", clip_actions=False):
... Model.__init__(self, observation_space, action_space, device)
... DeterministicMixin.__init__(self, clip_actions)
...
... self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
... nn.ELU(),
... nn.Linear(32, 32),
... nn.ELU(),
... nn.Linear(32, 1))
...
... def compute(self, inputs, role):
... return self.net(inputs["states"]), {}
...
>>> # given an observation_space: gym.spaces.Box with shape (60,)
>>> # and an action_space: gym.spaces.Box with shape (8,)
>>> model = Value(observation_space, action_space)
>>>
>>> print(model)
Value(
(net): Sequential(
(0): Linear(in_features=60, out_features=32, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=32, out_features=32, bias=True)
(3): ELU(alpha=1.0)
(4): Linear(in_features=32, out_features=1, bias=True)
)
)
"""
self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \
issubclass(type(self.action_space), gymnasium.Space))
if self._clip_actions:
self._clip_actions_min = torch.tensor(self.action_space.low, device=self.device, dtype=torch.float32)
self._clip_actions_max = torch.tensor(self.action_space.high, device=self.device, dtype=torch.float32)
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act deterministically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is ``None``. The third component is a dictionary containing extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 60)
>>> actions, _, outputs = model.act({"states": states})
>>> print(actions.shape, outputs)
torch.Size([4096, 1]) {}
"""
# map from observations/states to actions
actions, outputs = self.compute(inputs, role)
# clip actions
if self._clip_actions:
actions = torch.clamp(actions, min=self._clip_actions_min, max=self._clip_actions_max)
return actions, None, outputs
| 4,136 | Python | 43.483871 | 124 | 0.544246 |
Toni-SM/skrl/skrl/models/torch/gaussian.py | from typing import Any, Mapping, Tuple, Union
import gym
import gymnasium
import torch
from torch.distributions import Normal
class GaussianMixin:
def __init__(self,
clip_actions: bool = False,
clip_log_std: bool = True,
min_log_std: float = -20,
max_log_std: float = 2,
reduction: str = "sum",
role: str = "") -> None:
"""Gaussian mixin model (stochastic model)
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``)
:type clip_actions: bool, optional
:param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``)
:type clip_log_std: bool, optional
:param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``)
:type min_log_std: float, optional
:param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``)
:type max_log_std: float, optional
:param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``).
Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density
function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)``
:type reduction: str, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises ValueError: If the reduction method is not valid
Example::
# define the model
>>> import torch
>>> import torch.nn as nn
>>> from skrl.models.torch import Model, GaussianMixin
>>>
>>> class Policy(GaussianMixin, Model):
... def __init__(self, observation_space, action_space, device="cuda:0",
... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"):
... Model.__init__(self, observation_space, action_space, device)
... GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
...
... self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
... nn.ELU(),
... nn.Linear(32, 32),
... nn.ELU(),
... nn.Linear(32, self.num_actions))
... self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
...
... def compute(self, inputs, role):
... return self.net(inputs["states"]), self.log_std_parameter, {}
...
>>> # given an observation_space: gym.spaces.Box with shape (60,)
>>> # and an action_space: gym.spaces.Box with shape (8,)
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
(net): Sequential(
(0): Linear(in_features=60, out_features=32, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=32, out_features=32, bias=True)
(3): ELU(alpha=1.0)
(4): Linear(in_features=32, out_features=8, bias=True)
)
)
"""
self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \
issubclass(type(self.action_space), gymnasium.Space))
if self._clip_actions:
self._clip_actions_min = torch.tensor(self.action_space.low, device=self.device, dtype=torch.float32)
self._clip_actions_max = torch.tensor(self.action_space.high, device=self.device, dtype=torch.float32)
self._clip_log_std = clip_log_std
self._log_std_min = min_log_std
self._log_std_max = max_log_std
self._log_std = None
self._num_samples = None
self._distribution = None
if reduction not in ["mean", "sum", "prod", "none"]:
raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'")
self._reduction = torch.mean if reduction == "mean" else torch.sum if reduction == "sum" \
else torch.prod if reduction == "prod" else None
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the mean actions ``"mean_actions"``
and extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 60)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape)
torch.Size([4096, 8]) torch.Size([4096, 1]) torch.Size([4096, 8])
"""
# map from states/observations to mean actions and log standard deviations
mean_actions, log_std, outputs = self.compute(inputs, role)
# clamp log standard deviations
if self._clip_log_std:
log_std = torch.clamp(log_std, self._log_std_min, self._log_std_max)
self._log_std = log_std
self._num_samples = mean_actions.shape[0]
# distribution
self._distribution = Normal(mean_actions, log_std.exp())
# sample using the reparameterization trick
actions = self._distribution.rsample()
# clip actions
if self._clip_actions:
actions = torch.clamp(actions, min=self._clip_actions_min, max=self._clip_actions_max)
# log of the probability density function
log_prob = self._distribution.log_prob(inputs.get("taken_actions", actions))
if self._reduction is not None:
log_prob = self._reduction(log_prob, dim=-1)
if log_prob.dim() != actions.dim():
log_prob = log_prob.unsqueeze(-1)
outputs["mean_actions"] = mean_actions
return actions, log_prob, outputs
def get_entropy(self, role: str = "") -> torch.Tensor:
"""Compute and return the entropy of the model
:return: Entropy of the model
:rtype: torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> entropy = model.get_entropy()
>>> print(entropy.shape)
torch.Size([4096, 8])
"""
if self._distribution is None:
return torch.tensor(0.0, device=self.device)
return self._distribution.entropy().to(self.device)
def get_log_std(self, role: str = "") -> torch.Tensor:
"""Return the log standard deviation of the model
:return: Log standard deviation of the model
:rtype: torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> log_std = model.get_log_std()
>>> print(log_std.shape)
torch.Size([4096, 8])
"""
return self._log_std.repeat(self._num_samples, 1)
def distribution(self, role: str = "") -> torch.distributions.Normal:
"""Get the current distribution of the model
:return: Distribution of the model
:rtype: torch.distributions.Normal
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> distribution = model.distribution()
>>> print(distribution)
Normal(loc: torch.Size([4096, 8]), scale: torch.Size([4096, 8]))
"""
return self._distribution
| 8,814 | Python | 43.075 | 139 | 0.562401 |
Toni-SM/skrl/skrl/models/torch/multicategorical.py | from typing import Any, Mapping, Sequence, Tuple, Union
import torch
from torch.distributions import Categorical
class MultiCategoricalMixin:
def __init__(self, unnormalized_log_prob: bool = True, reduction: str = "sum", role: str = "") -> None:
"""MultiCategorical mixin model (stochastic model)
:param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``).
If True, the model's output is interpreted as unnormalized log probabilities
(it can be any real number), otherwise as normalized probabilities
(the output must be non-negative, finite and have a non-zero sum)
:type unnormalized_log_prob: bool, optional
:param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``).
Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density
function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)``
:type reduction: str, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises ValueError: If the reduction method is not valid
Example::
# define the model
>>> import torch
>>> import torch.nn as nn
>>> from skrl.models.torch import Model, MultiCategoricalMixin
>>>
>>> class Policy(MultiCategoricalMixin, Model):
... def __init__(self, observation_space, action_space, device="cuda:0", unnormalized_log_prob=True, reduction="sum"):
... Model.__init__(self, observation_space, action_space, device)
... MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
...
... self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
... nn.ELU(),
... nn.Linear(32, 32),
... nn.ELU(),
... nn.Linear(32, self.num_actions))
...
... def compute(self, inputs, role):
... return self.net(inputs["states"]), {}
...
>>> # given an observation_space: gym.spaces.Box with shape (4,)
>>> # and an action_space: gym.spaces.MultiDiscrete with nvec = [3, 2]
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
(net): Sequential(
(0): Linear(in_features=4, out_features=32, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=32, out_features=32, bias=True)
(3): ELU(alpha=1.0)
(4): Linear(in_features=32, out_features=5, bias=True)
)
)
"""
self._unnormalized_log_prob = unnormalized_log_prob
self._distributions = []
if reduction not in ["mean", "sum", "prod", "none"]:
raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'")
self._reduction = torch.mean if reduction == "mean" else torch.sum if reduction == "sum" \
else torch.prod if reduction == "prod" else None
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the network output ``"net_output"``
and extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 4)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["net_output"].shape)
torch.Size([4096, 2]) torch.Size([4096, 1]) torch.Size([4096, 5])
"""
# map from states/observations to normalized probabilities or unnormalized log probabilities
net_output, outputs = self.compute(inputs, role)
# unnormalized log probabilities
if self._unnormalized_log_prob:
self._distributions = [Categorical(logits=logits) for logits in torch.split(net_output, self.action_space.nvec.tolist(), dim=-1)]
# normalized probabilities
else:
self._distributions = [Categorical(probs=probs) for probs in torch.split(net_output, self.action_space.nvec.tolist(), dim=-1)]
# actions
actions = torch.stack([distribution.sample() for distribution in self._distributions], dim=-1)
# log of the probability density function
log_prob = torch.stack([distribution.log_prob(_actions.view(-1)) for _actions, distribution \
in zip(torch.unbind(inputs.get("taken_actions", actions), dim=-1), self._distributions)], dim=-1)
if self._reduction is not None:
log_prob = self._reduction(log_prob, dim=-1)
if log_prob.dim() != actions.dim():
log_prob = log_prob.unsqueeze(-1)
outputs["net_output"] = net_output
return actions, log_prob, outputs
def get_entropy(self, role: str = "") -> torch.Tensor:
"""Compute and return the entropy of the model
:return: Entropy of the model
:rtype: torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> entropy = model.get_entropy()
>>> print(entropy.shape)
torch.Size([4096, 1])
"""
if self._distributions:
entropy = torch.stack([distribution.entropy().to(self.device) for distribution in self._distributions], dim=-1)
if self._reduction is not None:
return self._reduction(entropy, dim=-1).unsqueeze(-1)
return entropy
return torch.tensor(0.0, device=self.device)
def distribution(self, role: str = "") -> torch.distributions.Categorical:
"""Get the current distribution of the model
:return: First distributions of the model
:rtype: torch.distributions.Categorical
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> distribution = model.distribution()
>>> print(distribution)
Categorical(probs: torch.Size([10, 3]), logits: torch.Size([10, 3]))
"""
# TODO: find a way to integrate in the class the distribution functions (e.g.: stddev)
return self._distributions[0]
| 7,655 | Python | 48.076923 | 141 | 0.568125 |
Toni-SM/skrl/skrl/models/torch/multivariate_gaussian.py | from typing import Any, Mapping, Tuple, Union
import gym
import gymnasium
import torch
from torch.distributions import MultivariateNormal
class MultivariateGaussianMixin:
def __init__(self,
clip_actions: bool = False,
clip_log_std: bool = True,
min_log_std: float = -20,
max_log_std: float = 2,
role: str = "") -> None:
"""Multivariate Gaussian mixin model (stochastic model)
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``)
:type clip_actions: bool, optional
:param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``)
:type clip_log_std: bool, optional
:param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``)
:type min_log_std: float, optional
:param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``)
:type max_log_std: float, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
# define the model
>>> import torch
>>> import torch.nn as nn
>>> from skrl.models.torch import Model, MultivariateGaussianMixin
>>>
>>> class Policy(MultivariateGaussianMixin, Model):
... def __init__(self, observation_space, action_space, device="cuda:0",
... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2):
... Model.__init__(self, observation_space, action_space, device)
... MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
...
... self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
... nn.ELU(),
... nn.Linear(32, 32),
... nn.ELU(),
... nn.Linear(32, self.num_actions))
... self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions))
...
... def compute(self, inputs, role):
... return self.net(inputs["states"]), self.log_std_parameter, {}
...
>>> # given an observation_space: gym.spaces.Box with shape (60,)
>>> # and an action_space: gym.spaces.Box with shape (8,)
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
(net): Sequential(
(0): Linear(in_features=60, out_features=32, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=32, out_features=32, bias=True)
(3): ELU(alpha=1.0)
(4): Linear(in_features=32, out_features=8, bias=True)
)
)
"""
self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \
issubclass(type(self.action_space), gymnasium.Space))
if self._clip_actions:
self._clip_actions_min = torch.tensor(self.action_space.low, device=self.device, dtype=torch.float32)
self._clip_actions_max = torch.tensor(self.action_space.high, device=self.device, dtype=torch.float32)
self._clip_log_std = clip_log_std
self._log_std_min = min_log_std
self._log_std_max = max_log_std
self._log_std = None
self._num_samples = None
self._distribution = None
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the mean actions ``"mean_actions"``
and extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 60)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape)
torch.Size([4096, 8]) torch.Size([4096, 1]) torch.Size([4096, 8])
"""
# map from states/observations to mean actions and log standard deviations
mean_actions, log_std, outputs = self.compute(inputs, role)
# clamp log standard deviations
if self._clip_log_std:
log_std = torch.clamp(log_std, self._log_std_min, self._log_std_max)
self._log_std = log_std
self._num_samples = mean_actions.shape[0]
# distribution
covariance = torch.diag(log_std.exp() * log_std.exp())
self._distribution = MultivariateNormal(mean_actions, scale_tril=covariance)
# sample using the reparameterization trick
actions = self._distribution.rsample()
# clip actions
if self._clip_actions:
actions = torch.clamp(actions, min=self._clip_actions_min, max=self._clip_actions_max)
# log of the probability density function
log_prob = self._distribution.log_prob(inputs.get("taken_actions", actions))
if log_prob.dim() != actions.dim():
log_prob = log_prob.unsqueeze(-1)
outputs["mean_actions"] = mean_actions
return actions, log_prob, outputs
def get_entropy(self, role: str = "") -> torch.Tensor:
"""Compute and return the entropy of the model
:return: Entropy of the model
:rtype: torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> entropy = model.get_entropy()
>>> print(entropy.shape)
torch.Size([4096])
"""
if self._distribution is None:
return torch.tensor(0.0, device=self.device)
return self._distribution.entropy().to(self.device)
def get_log_std(self, role: str = "") -> torch.Tensor:
"""Return the log standard deviation of the model
:return: Log standard deviation of the model
:rtype: torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> log_std = model.get_log_std()
>>> print(log_std.shape)
torch.Size([4096, 8])
"""
return self._log_std.repeat(self._num_samples, 1)
def distribution(self, role: str = "") -> torch.distributions.MultivariateNormal:
"""Get the current distribution of the model
:return: Distribution of the model
:rtype: torch.distributions.MultivariateNormal
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> distribution = model.distribution()
>>> print(distribution)
MultivariateNormal(loc: torch.Size([4096, 8]), scale_tril: torch.Size([4096, 8, 8]))
"""
return self._distribution
| 8,041 | Python | 42.005347 | 124 | 0.567342 |
Toni-SM/skrl/skrl/models/torch/categorical.py | from typing import Any, Mapping, Tuple, Union
import torch
from torch.distributions import Categorical
class CategoricalMixin:
def __init__(self, unnormalized_log_prob: bool = True, role: str = "") -> None:
"""Categorical mixin model (stochastic model)
:param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``).
If True, the model's output is interpreted as unnormalized log probabilities
(it can be any real number), otherwise as normalized probabilities
(the output must be non-negative, finite and have a non-zero sum)
:type unnormalized_log_prob: bool, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
# define the model
>>> import torch
>>> import torch.nn as nn
>>> from skrl.models.torch import Model, CategoricalMixin
>>>
>>> class Policy(CategoricalMixin, Model):
... def __init__(self, observation_space, action_space, device="cuda:0", unnormalized_log_prob=True):
... Model.__init__(self, observation_space, action_space, device)
... CategoricalMixin.__init__(self, unnormalized_log_prob)
...
... self.net = nn.Sequential(nn.Linear(self.num_observations, 32),
... nn.ELU(),
... nn.Linear(32, 32),
... nn.ELU(),
... nn.Linear(32, self.num_actions))
...
... def compute(self, inputs, role):
... return self.net(inputs["states"]), {}
...
>>> # given an observation_space: gym.spaces.Box with shape (4,)
>>> # and an action_space: gym.spaces.Discrete with n = 2
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
(net): Sequential(
(0): Linear(in_features=4, out_features=32, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=32, out_features=32, bias=True)
(3): ELU(alpha=1.0)
(4): Linear(in_features=32, out_features=2, bias=True)
)
)
"""
self._unnormalized_log_prob = unnormalized_log_prob
self._distribution = None
def act(self,
inputs: Mapping[str, Union[torch.Tensor, Any]],
role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the network output ``"net_output"``
and extra output values
:rtype: tuple of torch.Tensor, torch.Tensor or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 4)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["net_output"].shape)
torch.Size([4096, 1]) torch.Size([4096, 1]) torch.Size([4096, 2])
"""
# map from states/observations to normalized probabilities or unnormalized log probabilities
net_output, outputs = self.compute(inputs, role)
# unnormalized log probabilities
if self._unnormalized_log_prob:
self._distribution = Categorical(logits=net_output)
# normalized probabilities
else:
self._distribution = Categorical(probs=net_output)
# actions and log of the probability density function
actions = self._distribution.sample()
log_prob = self._distribution.log_prob(inputs.get("taken_actions", actions).view(-1))
outputs["net_output"] = net_output
return actions.unsqueeze(-1), log_prob.unsqueeze(-1), outputs
def get_entropy(self, role: str = "") -> torch.Tensor:
"""Compute and return the entropy of the model
:return: Entropy of the model
:rtype: torch.Tensor
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> entropy = model.get_entropy()
>>> print(entropy.shape)
torch.Size([4096, 1])
"""
if self._distribution is None:
return torch.tensor(0.0, device=self.device)
return self._distribution.entropy().to(self.device)
def distribution(self, role: str = "") -> torch.distributions.Categorical:
"""Get the current distribution of the model
:return: Distribution of the model
:rtype: torch.distributions.Categorical
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
>>> distribution = model.distribution()
>>> print(distribution)
Categorical(probs: torch.Size([4096, 2]), logits: torch.Size([4096, 2]))
"""
return self._distribution
| 5,941 | Python | 43.343283 | 118 | 0.555967 |
Toni-SM/skrl/skrl/models/jax/base.py | from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Union
import gym
import gymnasium
import flax
import jax
import numpy as np
from skrl import config
class StateDict(flax.struct.PyTreeNode):
apply_fn: Callable = flax.struct.field(pytree_node=False)
params: flax.core.FrozenDict[str, Any] = flax.struct.field(pytree_node=True)
@classmethod
def create(cls, *, apply_fn, params, **kwargs):
return cls(apply_fn=apply_fn, params=params, **kwargs)
class Model(flax.linen.Module):
observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space]
action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space]
device: Optional[Union[str, jax.Device]] = None
def __init__(self,
observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
device: Optional[Union[str, jax.Device]] = None,
parent: Optional[Any] = None,
name: Optional[str] = None) -> None:
"""Base class representing a function approximator
The following properties are defined:
- ``device`` (jax.Device): Device to be used for the computations
- ``observation_space`` (int, sequence of int, gym.Space, gymnasium.Space): Observation/state space
- ``action_space`` (int, sequence of int, gym.Space, gymnasium.Space): Action space
- ``num_observations`` (int): Number of elements in the observation/state space
- ``num_actions`` (int): Number of elements in the action space
:param observation_space: Observation/state space or shape.
The ``num_observations`` property will contain the size of that space
:type observation_space: int, sequence of int, gym.Space, gymnasium.Space
:param action_space: Action space or shape.
The ``num_actions`` property will contain the size of that space
:type action_space: int, sequence of int, gym.Space, gymnasium.Space
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param parent: The parent Module of this Module (default: ``None``).
It is a Flax reserved attribute
:type parent: str, optional
:param name: The name of this Module (default: ``None``).
It is a Flax reserved attribute
:type name: str, optional
Custom models should override the ``act`` method::
import flax.linen as nn
from skrl.models.jax import Model
class CustomModel(Model):
def __init__(self, observation_space, action_space, device=None, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
# https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError
flax.linen.Module.__post_init__(self)
@nn.compact
def __call__(self, inputs, role):
x = nn.relu(nn.Dense(64)(inputs["states"]))
x = nn.relu(nn.Dense(self.num_actions)(x))
return x, None, {}
"""
self._jax = config.jax.backend == "jax"
if device is None:
self.device = jax.devices()[0]
else:
self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0]
self.observation_space = observation_space
self.action_space = action_space
self.num_observations = None if observation_space is None else self._get_space_size(observation_space)
self.num_actions = None if action_space is None else self._get_space_size(action_space)
self.state_dict: StateDict
self.training = False
# https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.ReservedModuleAttributeError
self.parent = parent
self.name = name
def init_state_dict(self,
role: str,
inputs: Mapping[str, Union[np.ndarray, jax.Array]] = {},
key: Optional[jax.Array] = None) -> None:
"""Initialize state dictionary
:param role: Role play by the model
:type role: str
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
If not specified, the keys will be populated with observation and action space samples
:type inputs: dict of np.ndarray or jax.Array, optional
:param key: Pseudo-random number generator (PRNG) key (default: ``None``).
If not provided, the skrl's PRNG key (``config.jax.key``) will be used
:type key: jax.Array, optional
"""
if not inputs:
inputs = {"states": self.observation_space.sample(), "taken_actions": self.action_space.sample()}
if key is None:
key = config.jax.key
if isinstance(inputs["states"], (int, np.int32, np.int64)):
inputs["states"] = np.array(inputs["states"]).reshape(-1,1)
# init internal state dict
self.state_dict = StateDict.create(apply_fn=self.apply, params=self.init(key, inputs, role))
def _get_space_size(self,
space: Union[int, Sequence[int], gym.Space, gymnasium.Space],
number_of_elements: bool = True) -> int:
"""Get the size (number of elements) of a space
:param space: Space or shape from which to obtain the number of elements
:type space: int, sequence of int, gym.Space, or gymnasium.Space
:param number_of_elements: Whether the number of elements occupied by the space is returned (default: ``True``).
If ``False``, the shape of the space is returned.
It only affects Discrete and MultiDiscrete spaces
:type number_of_elements: bool, optional
:raises ValueError: If the space is not supported
:return: Size of the space (number of elements)
:rtype: int
Example::
# from int
>>> model._get_space_size(2)
2
# from sequence of int
>>> model._get_space_size([2, 3])
6
# Box space
>>> space = gym.spaces.Box(low=-1, high=1, shape=(2, 3))
>>> model._get_space_size(space)
6
# Discrete space
>>> space = gym.spaces.Discrete(4)
>>> model._get_space_size(space)
4
>>> model._get_space_size(space, number_of_elements=False)
1
# MultiDiscrete space
>>> space = gym.spaces.MultiDiscrete([5, 3, 2])
>>> model._get_space_size(space)
10
>>> model._get_space_size(space, number_of_elements=False)
3
# Dict space
>>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)),
... 'b': gym.spaces.Discrete(4)})
>>> model._get_space_size(space)
10
>>> model._get_space_size(space, number_of_elements=False)
7
"""
size = None
if type(space) in [int, float]:
size = space
elif type(space) in [tuple, list]:
size = np.prod(space)
elif issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
if number_of_elements:
size = space.n
else:
size = 1
elif issubclass(type(space), gym.spaces.MultiDiscrete):
if number_of_elements:
size = np.sum(space.nvec)
else:
size = space.nvec.shape[0]
elif issubclass(type(space), gym.spaces.Box):
size = np.prod(space.shape)
elif issubclass(type(space), gym.spaces.Dict):
size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces])
elif issubclass(type(space), gymnasium.Space):
if issubclass(type(space), gymnasium.spaces.Discrete):
if number_of_elements:
size = space.n
else:
size = 1
elif issubclass(type(space), gymnasium.spaces.MultiDiscrete):
if number_of_elements:
size = np.sum(space.nvec)
else:
size = space.nvec.shape[0]
elif issubclass(type(space), gymnasium.spaces.Box):
size = np.prod(space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces])
if size is None:
raise ValueError(f"Space type {type(space)} not supported")
return int(size)
def tensor_to_space(self,
tensor: Union[np.ndarray, jax.Array],
space: Union[gym.Space, gymnasium.Space],
start: int = 0) -> Union[Union[np.ndarray, jax.Array], dict]:
"""Map a flat tensor to a Gym/Gymnasium space
The mapping is done in the following way:
- Tensors belonging to Discrete spaces are returned without modification
- Tensors belonging to Box spaces are reshaped to the corresponding space shape
keeping the first dimension (number of samples) as they are
- Tensors belonging to Dict spaces are mapped into a dictionary with the same keys as the original space
:param tensor: Tensor to map from
:type tensor: np.ndarray or jax.Array
:param space: Space to map the tensor to
:type space: gym.Space or gymnasium.Space
:param start: Index of the first element of the tensor to map (default: ``0``)
:type start: int, optional
:raises ValueError: If the space is not supported
:return: Mapped tensor or dictionary
:rtype: np.ndarray or jax.Array, or dict
Example::
>>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)),
... 'b': gym.spaces.Discrete(4)})
>>> tensor = jnp.array([[-0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 2]])
>>>
>>> model.tensor_to_space(tensor, space)
{'a': Array([[[-0.3, -0.2, -0.1],
[ 0.1, 0.2, 0.3]]], dtype=float32),
'b': Array([[2.]], dtype=float32)}
"""
if issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
return tensor
elif issubclass(type(space), gym.spaces.Box):
return tensor.reshape(tensor.shape[0], *space.shape)
elif issubclass(type(space), gym.spaces.Dict):
output = {}
for k in sorted(space.keys()):
end = start + self._get_space_size(space[k], number_of_elements=False)
output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end)
start = end
return output
else:
if issubclass(type(space), gymnasium.spaces.Discrete):
return tensor
elif issubclass(type(space), gymnasium.spaces.Box):
return tensor.reshape(tensor.shape[0], *space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
output = {}
for k in sorted(space.keys()):
end = start + self._get_space_size(space[k], number_of_elements=False)
output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end)
start = end
return output
raise ValueError(f"Space type {type(space)} not supported")
def random_act(self,
inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]],
role: str = "",
params: Optional[jax.Array] = None) -> Tuple[Union[np.ndarray, jax.Array], Union[Union[np.ndarray, jax.Array], None], Mapping[str, Union[Union[np.ndarray, jax.Array], Any]]]:
"""Act randomly according to the action space
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically np.ndarray or jax.Array
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:raises NotImplementedError: Unsupported action space
:return: Model output. The first component is the action to be taken by the agent
:rtype: tuple of np.ndarray or jax.Array, None, and dict
"""
# discrete action space (Discrete)
if issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete):
actions = np.random.randint(self.action_space.n, size=(inputs["states"].shape[0], 1))
# continuous action space (Box)
elif issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box):
actions = np.random.uniform(low=self.action_space.low[0], high=self.action_space.high[0], size=(inputs["states"].shape[0], self.num_actions))
else:
raise NotImplementedError(f"Action space type ({type(self.action_space)}) not supported")
if self._jax:
return jax.device_put(actions), None, {}
return actions, None, {}
def init_parameters(self, method_name: str = "normal", *args, **kwargs) -> None:
"""Initialize the model parameters according to the specified method name
Method names are from the `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ module.
Allowed method names are *uniform*, *normal*, *constant*, etc.
:param method_name: `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ method name (default: ``"normal"``)
:type method_name: str, optional
:param args: Positional arguments of the method to be called
:type args: tuple, optional
:param kwargs: Key-value arguments of the method to be called
:type kwargs: dict, optional
Example::
# initialize all parameters with an orthogonal distribution with a scale of 0.5
>>> model.init_parameters("orthogonal", scale=0.5)
# initialize all parameters as a normal distribution with a standard deviation of 0.1
>>> model.init_parameters("normal", stddev=0.1)
"""
if method_name in ["ones", "zeros"]:
method = eval(f"flax.linen.initializers.{method_name}")
else:
method = eval(f"flax.linen.initializers.{method_name}(*args, **kwargs)")
params = jax.tree_util.tree_map(lambda param: method(config.jax.key, param.shape), self.state_dict.params)
self.state_dict = self.state_dict.replace(params=params)
def init_weights(self, method_name: str = "normal", *args, **kwargs) -> None:
"""Initialize the model weights according to the specified method name
Method names are from the `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ module.
Allowed method names are *uniform*, *normal*, *constant*, etc.
:param method_name: `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ method name (default: ``"normal"``)
:type method_name: str, optional
:param args: Positional arguments of the method to be called
:type args: tuple, optional
:param kwargs: Key-value arguments of the method to be called
:type kwargs: dict, optional
Example::
# initialize all weights with uniform distribution in range [-0.1, 0.1]
>>> model.init_weights(method_name="uniform_", a=-0.1, b=0.1)
# initialize all weights with normal distribution with mean 0 and standard deviation 0.25
>>> model.init_weights(method_name="normal_", mean=0.0, std=0.25)
"""
if method_name in ["ones", "zeros"]:
method = eval(f"flax.linen.initializers.{method_name}")
else:
method = eval(f"flax.linen.initializers.{method_name}(*args, **kwargs)")
params = jax.tree_util.tree_map_with_path(lambda path, param: method(config.jax.key, param.shape) if path[-1].key == "kernel" else param,
self.state_dict.params)
self.state_dict = self.state_dict.replace(params=params)
def init_biases(self, method_name: str = "constant_", *args, **kwargs) -> None:
"""Initialize the model biases according to the specified method name
Method names are from the `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ module.
Allowed method names are *uniform*, *normal*, *constant*, etc.
:param method_name: `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ method name (default: ``"normal"``)
:type method_name: str, optional
:param args: Positional arguments of the method to be called
:type args: tuple, optional
:param kwargs: Key-value arguments of the method to be called
:type kwargs: dict, optional
Example::
# initialize all biases with a constant value (0)
>>> model.init_biases(method_name="constant_", val=0)
# initialize all biases with normal distribution with mean 0 and standard deviation 0.25
>>> model.init_biases(method_name="normal_", mean=0.0, std=0.25)
"""
if method_name in ["ones", "zeros"]:
method = eval(f"flax.linen.initializers.{method_name}")
else:
method = eval(f"flax.linen.initializers.{method_name}(*args, **kwargs)")
params = jax.tree_util.tree_map_with_path(lambda path, param: method(config.jax.key, param.shape) if path[-1].key == "bias" else param,
self.state_dict.params)
self.state_dict = self.state_dict.replace(params=params)
def get_specification(self) -> Mapping[str, Any]:
"""Returns the specification of the model
The following keys are used by the agents for initialization:
- ``"rnn"``: Recurrent Neural Network (RNN) specification for RNN, LSTM and GRU layers/cells
- ``"sizes"``: List of RNN shapes (number of layers, number of environments, number of features in the RNN state).
There must be as many tuples as there are states in the recurrent layer/cell. E.g., LSTM has 2 states (hidden and cell).
:return: Dictionary containing advanced specification of the model
:rtype: dict
Example::
# model with a LSTM layer.
# - number of layers: 1
# - number of environments: 4
# - number of features in the RNN state: 64
>>> model.get_specification()
{'rnn': {'sizes': [(1, 4, 64), (1, 4, 64)]}}
"""
return {}
def act(self,
inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]],
role: str = "",
params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]:
"""Act according to the specified behavior (to be implemented by the inheriting classes)
Agents will call this method to obtain the decision to be taken given the state of the environment.
The classes that inherit from the latter must only implement the ``.__call__()`` method
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically np.ndarray or jax.Array
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:raises NotImplementedError: Child class must implement this method
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function for stochastic models
or None for deterministic models. The third component is a dictionary containing extra output values
:rtype: tuple of jax.Array, jax.Array or None, and dict
"""
raise NotImplementedError
def set_mode(self, mode: str) -> None:
"""Set the model mode (training or evaluation)
:param mode: Mode: ``"train"`` for training or ``"eval"`` for evaluation
:type mode: str
:raises ValueError: If the mode is not ``"train"`` or ``"eval"``
"""
if mode == "train":
self.training = True
elif mode == "eval":
self.training = False
else:
raise ValueError("Invalid mode. Use 'train' for training or 'eval' for evaluation")
def save(self, path: str, state_dict: Optional[dict] = None) -> None:
"""Save the model to the specified path
:param path: Path to save the model to
:type path: str
:param state_dict: State dictionary to save (default: ``None``).
If None, the model's state_dict will be saved
:type state_dict: dict, optional
Example::
# save the current model to the specified path
>>> model.save("/tmp/model.flax")
# TODO: save an older version of the model to the specified path
"""
# HACK: Does it make sense to use https://github.com/google/orbax
with open(path, "wb") as file:
file.write(flax.serialization.to_bytes(self.state_dict.params if state_dict is None else state_dict.params))
def load(self, path: str) -> None:
"""Load the model from the specified path
:param path: Path to load the model from
:type path: str
Example::
# load the model
>>> model = Model(observation_space, action_space)
>>> model.load("model.flax")
"""
# HACK: Does it make sense to use https://github.com/google/orbax
with open(path, "rb") as file:
params = flax.serialization.from_bytes(self.state_dict.params, file.read())
self.state_dict = self.state_dict.replace(params=params)
self.set_mode("eval")
def migrate(self,
state_dict: Optional[Mapping[str, Any]] = None,
path: Optional[str] = None,
name_map: Mapping[str, str] = {},
auto_mapping: bool = True,
verbose: bool = False) -> bool:
"""Migrate the specified extrernal model's state dict to the current model
.. warning::
This method is not implemented yet, just maintains compatibility with other ML frameworks
:raises NotImplementedError: Not implemented
"""
raise NotImplementedError
def freeze_parameters(self, freeze: bool = True) -> None:
"""Freeze or unfreeze internal parameters
.. note::
This method does nothing, just maintains compatibility with other ML frameworks
:param freeze: Freeze the internal parameters if True, otherwise unfreeze them (default: ``True``)
:type freeze: bool, optional
Example::
# freeze model parameters
>>> model.freeze_parameters(True)
# unfreeze model parameters
>>> model.freeze_parameters(False)
"""
pass
def update_parameters(self, model: flax.linen.Module, polyak: float = 1) -> None:
"""Update internal parameters by hard or soft (polyak averaging) update
- Hard update: :math:`\\theta = \\theta_{net}`
- Soft (polyak averaging) update: :math:`\\theta = (1 - \\rho) \\theta + \\rho \\theta_{net}`
:param model: Model used to update the internal parameters
:type model: flax.linen.Module (skrl.models.jax.Model)
:param polyak: Polyak hyperparameter between 0 and 1 (default: ``1``).
A hard update is performed when its value is 1
:type polyak: float, optional
Example::
# hard update (from source model)
>>> model.update_parameters(source_model)
# soft update (from source model)
>>> model.update_parameters(source_model, polyak=0.005)
"""
# hard update
if polyak == 1:
self.state_dict = self.state_dict.replace(params=model.state_dict.params)
# soft update
else:
# HACK: Does it make sense to use https://optax.readthedocs.io/en/latest/api.html?#optax.incremental_update
params = jax.tree_util.tree_map(lambda params, model_params: polyak * model_params + (1 - polyak) * params,
self.state_dict.params, model.state_dict.params)
self.state_dict = self.state_dict.replace(params=params)
| 26,683 | Python | 45.732049 | 193 | 0.590901 |
Toni-SM/skrl/skrl/models/jax/__init__.py | from skrl.models.jax.base import Model # isort:skip
from skrl.models.jax.categorical import CategoricalMixin
from skrl.models.jax.deterministic import DeterministicMixin
from skrl.models.jax.gaussian import GaussianMixin
from skrl.models.jax.multicategorical import MultiCategoricalMixin
| 290 | Python | 40.571423 | 66 | 0.858621 |
Toni-SM/skrl/skrl/models/jax/deterministic.py | from typing import Any, Mapping, Optional, Tuple, Union
import gym
import gymnasium
import flax
import jax
import jax.numpy as jnp
import numpy as np
class DeterministicMixin:
def __init__(self, clip_actions: bool = False, role: str = "") -> None:
"""Deterministic mixin model (deterministic model)
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``)
:type clip_actions: bool, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
# define the model
>>> import flax.linen as nn
>>> from skrl.models.jax import Model, DeterministicMixin
>>>
>>> class Value(DeterministicMixin, Model):
... def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs):
... Model.__init__(self, observation_space, action_space, device, **kwargs)
... DeterministicMixin.__init__(self, clip_actions)
...
... @nn.compact # marks the given module method allowing inlined submodules
... def __call__(self, inputs, role):
... x = nn.elu(nn.Dense(32)([inputs["states"]))
... x = nn.elu(nn.Dense(32)(x))
... x = nn.Dense(1)(x)
... return x, {}
...
>>> # given an observation_space: gym.spaces.Box with shape (60,)
>>> # and an action_space: gym.spaces.Box with shape (8,)
>>> model = Value(observation_space, action_space)
>>>
>>> print(model)
Value(
# attributes
observation_space = Box(-1.0, 1.0, (60,), float32)
action_space = Box(-1.0, 1.0, (8,), float32)
device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0)
)
"""
if not hasattr(self, "_d_clip_actions"):
self._d_clip_actions = {}
self._d_clip_actions[role] = clip_actions and (issubclass(type(self.action_space), gym.Space) or \
issubclass(type(self.action_space), gymnasium.Space))
if self._d_clip_actions[role]:
self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32)
self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32)
# https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]],
role: str = "",
params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]:
"""Act deterministically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically np.ndarray or jax.Array
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:return: Model output. The first component is the action to be taken by the agent.
The second component is ``None``. The third component is a dictionary containing extra output values
:rtype: tuple of jax.Array, jax.Array or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 60)
>>> actions, _, outputs = model.act({"states": states})
>>> print(actions.shape, outputs)
(4096, 1) {}
"""
# map from observations/states to actions
actions, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role)
# clip actions
if self._d_clip_actions[role] if role in self._d_clip_actions else self._d_clip_actions[""]:
actions = jnp.clip(actions, a_min=self.clip_actions_min, a_max=self.clip_actions_max)
return actions, None, outputs
| 4,542 | Python | 44.888888 | 129 | 0.57948 |
Toni-SM/skrl/skrl/models/jax/gaussian.py | from typing import Any, Mapping, Optional, Tuple, Union
from functools import partial
import gym
import gymnasium
import flax
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@partial(jax.jit, static_argnames=("reduction"))
def _gaussian(loc,
log_std,
log_std_min,
log_std_max,
clip_actions_min,
clip_actions_max,
taken_actions,
key,
reduction):
# clamp log standard deviations
log_std = jnp.clip(log_std, a_min=log_std_min, a_max=log_std_max)
# distribution
scale = jnp.exp(log_std)
# sample actions
actions = jax.random.normal(key, loc.shape) * scale + loc
# clip actions
actions = jnp.clip(actions, a_min=clip_actions_min, a_max=clip_actions_max)
# log of the probability density function
taken_actions = actions if taken_actions is None else taken_actions
log_prob = -jnp.square(taken_actions - loc) / (2 * jnp.square(scale)) - jnp.log(scale) - 0.5 * jnp.log(2 * jnp.pi)
if reduction is not None:
log_prob = reduction(log_prob, axis=-1)
if log_prob.ndim != actions.ndim:
log_prob = jnp.expand_dims(log_prob, -1)
return actions, log_prob, log_std, scale
@jax.jit
def _entropy(scale):
return 0.5 + 0.5 * jnp.log(2 * jnp.pi) + jnp.log(scale)
class GaussianMixin:
def __init__(self,
clip_actions: bool = False,
clip_log_std: bool = True,
min_log_std: float = -20,
max_log_std: float = 2,
reduction: str = "sum",
role: str = "") -> None:
"""Gaussian mixin model (stochastic model)
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``)
:type clip_actions: bool, optional
:param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``)
:type clip_log_std: bool, optional
:param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``)
:type min_log_std: float, optional
:param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``)
:type max_log_std: float, optional
:param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``).
Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density
function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)``
:type reduction: str, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises ValueError: If the reduction method is not valid
Example::
# define the model
>>> import flax.linen as nn
>>> from skrl.models.jax import Model, GaussianMixin
>>>
>>> class Policy(GaussianMixin, Model):
... def __init__(self, observation_space, action_space, device=None,
... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
... Model.__init__(self, observation_space, action_space, device, **kwargs)
... GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
...
... def setup(self):
... self.layer_1 = nn.Dense(32)
... self.layer_2 = nn.Dense(32)
... self.layer_3 = nn.Dense(self.num_actions)
...
... self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions))
...
... def __call__(self, inputs, role):
... x = nn.elu(self.layer_1(inputs["states"]))
... x = nn.elu(self.layer_2(x))
... return self.layer_3(x), self.log_std_parameter, {}
...
>>> # given an observation_space: gym.spaces.Box with shape (60,)
>>> # and an action_space: gym.spaces.Box with shape (8,)
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
# attributes
observation_space = Box(-1.0, 1.0, (60,), float32)
action_space = Box(-1.0, 1.0, (8,), float32)
device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0)
)
"""
self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \
issubclass(type(self.action_space), gymnasium.Space))
if self._clip_actions:
self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32)
self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32)
else:
self.clip_actions_min = -jnp.inf
self.clip_actions_max = jnp.inf
self._clip_log_std = clip_log_std
if self._clip_log_std:
self._log_std_min = min_log_std
self._log_std_max = max_log_std
else:
self._log_std_min = -jnp.inf
self._log_std_max = jnp.inf
if reduction not in ["mean", "sum", "prod", "none"]:
raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'")
self._reduction = jnp.mean if reduction == "mean" else jnp.sum if reduction == "sum" \
else jnp.prod if reduction == "prod" else None
self._i = 0
self._key = config.jax.key
# https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]],
role: str = "",
params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically np.ndarray or jax.Array
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the mean actions ``"mean_actions"``
and extra output values
:rtype: tuple of jax.Array, jax.Array or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 60)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape)
(4096, 8) (4096, 1) (4096, 8)
"""
self._i += 1
subkey = jax.random.fold_in(self._key, self._i)
inputs["key"] = subkey
# map from states/observations to mean actions and log standard deviations
mean_actions, log_std, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role)
actions, log_prob, log_std, stddev = _gaussian(mean_actions,
log_std,
self._log_std_min,
self._log_std_max,
self.clip_actions_min,
self.clip_actions_max,
inputs.get("taken_actions", None),
subkey,
self._reduction)
outputs["mean_actions"] = mean_actions
# avoid jax.errors.UnexpectedTracerError
outputs["log_std"] = log_std
outputs["stddev"] = stddev
return actions, log_prob, outputs
def get_entropy(self, stddev: jax.Array, role: str = "") -> jax.Array:
"""Compute and return the entropy of the model
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Entropy of the model
:rtype: jax.Array
Example::
# given a standard deviation array: stddev
>>> entropy = model.get_entropy(stddev)
>>> print(entropy.shape)
(4096, 8)
"""
return _entropy(stddev)
| 9,496 | Python | 42.764977 | 139 | 0.548757 |
Toni-SM/skrl/skrl/models/jax/multicategorical.py | from typing import Any, Mapping, Optional, Tuple, Union
from functools import partial
import flax
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@partial(jax.jit, static_argnames=("unnormalized_log_prob"))
def _categorical(net_output,
unnormalized_log_prob,
taken_actions,
key):
# normalize
if unnormalized_log_prob:
logits = net_output - jax.scipy.special.logsumexp(net_output, axis=-1, keepdims=True)
# probs = jax.nn.softmax(logits)
else:
probs = net_output / net_output.sum(-1, keepdims=True)
eps = jnp.finfo(probs.dtype).eps
logits = jnp.log(probs.clip(min=eps, max=1 - eps))
# sample actions
actions = jax.random.categorical(key, logits, axis=-1, shape=None)
# log of the probability density function
taken_actions = actions if taken_actions is None else taken_actions.astype(jnp.int32).reshape(-1)
log_prob = jax.nn.log_softmax(logits)[jnp.arange(taken_actions.shape[0]), taken_actions]
return actions.reshape(-1, 1), log_prob.reshape(-1, 1)
@jax.jit
def _entropy(logits):
logits = logits - jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True)
logits = logits.clip(min=jnp.finfo(logits.dtype).min)
p_log_p = logits * jax.nn.softmax(logits)
return -p_log_p.sum(-1)
class MultiCategoricalMixin:
def __init__(self, unnormalized_log_prob: bool = True, reduction: str = "sum", role: str = "") -> None:
"""MultiCategorical mixin model (stochastic model)
:param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``).
If True, the model's output is interpreted as unnormalized log probabilities
(it can be any real number), otherwise as normalized probabilities
(the output must be non-negative, finite and have a non-zero sum)
:type unnormalized_log_prob: bool, optional
:param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``).
Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density
function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)``
:type reduction: str, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:raises ValueError: If the reduction method is not valid
Example::
# define the model
>>> import flax.linen as nn
>>> from skrl.models.jax import Model, MultiCategoricalMixin
>>>
>>> class Policy(MultiCategoricalMixin, Model):
... def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs):
... Model.__init__(self, observation_space, action_space, device, **kwargs)
... MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction)
...
... @nn.compact # marks the given module method allowing inlined submodules
... def __call__(self, inputs, role):
... x = nn.elu(nn.Dense(32)(inputs["states"]))
... x = nn.elu(nn.Dense(32)(x))
... x = nn.Dense(self.num_actions)(x)
... return x, {}
...
>>> # given an observation_space: gym.spaces.Box with shape (4,)
>>> # and an action_space: gym.spaces.MultiDiscrete with nvec = [3, 2]
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
# attributes
observation_space = Box(-1.0, 1.0, (4,), float32)
action_space = MultiDiscrete([3 2])
device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0)
)
"""
self._unnormalized_log_prob = unnormalized_log_prob
if reduction not in ["mean", "sum", "prod", "none"]:
raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'")
self._reduction = jnp.mean if reduction == "mean" else jnp.sum if reduction == "sum" \
else jnp.prod if reduction == "prod" else None
self._i = 0
self._key = config.jax.key
self._action_space_nvec = np.cumsum(self.action_space.nvec).tolist()
self._action_space_shape = self._get_space_size(self.action_space, number_of_elements=False)
# https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]],
role: str = "",
params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically np.ndarray or jax.Array
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the network output ``"net_output"``
and extra output values
:rtype: tuple of jax.Array, jax.Array or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 4)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["net_output"].shape)
(4096, 2) (4096, 1) (4096, 5)
"""
self._i += 1
subkey = jax.random.fold_in(self._key, self._i)
inputs["key"] = subkey
# map from states/observations to normalized probabilities or unnormalized log probabilities
net_output, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role)
# split inputs
net_outputs = jnp.split(net_output, self._action_space_nvec, axis=-1)
if "taken_actions" in inputs:
taken_actions = jnp.split(inputs["taken_actions"], self._action_space_shape, axis=-1)
else:
taken_actions = [None] * self._action_space_shape
# compute actions and log_prob
actions, log_prob = [], []
for _net_output, _taken_actions in zip(net_outputs, taken_actions):
_actions, _log_prob = _categorical(_net_output,
self._unnormalized_log_prob,
_taken_actions,
subkey)
actions.append(_actions)
log_prob.append(_log_prob)
actions = jnp.concatenate(actions, axis=-1)
log_prob = jnp.concatenate(log_prob, axis=-1)
if self._reduction is not None:
log_prob = self._reduction(log_prob, axis=-1)
if log_prob.ndim != actions.ndim:
log_prob = jnp.expand_dims(log_prob, -1)
outputs["net_output"] = net_output
# avoid jax.errors.UnexpectedTracerError
outputs["stddev"] = jnp.full_like(log_prob, jnp.nan)
return actions, log_prob, outputs
def get_entropy(self, logits: jax.Array, role: str = "") -> jax.Array:
"""Compute and return the entropy of the model
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Entropy of the model
:rtype: jax.Array
Example::
# given a standard deviation array: stddev
>>> entropy = model.get_entropy(stddev)
>>> print(entropy.shape)
(4096, 8)
"""
return _entropy(logits)
| 8,769 | Python | 44.440414 | 140 | 0.583647 |
Toni-SM/skrl/skrl/models/jax/categorical.py | from typing import Any, Mapping, Optional, Tuple, Union
from functools import partial
import flax
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@partial(jax.jit, static_argnames=("unnormalized_log_prob"))
def _categorical(net_output,
unnormalized_log_prob,
taken_actions,
key):
# normalize
if unnormalized_log_prob:
logits = net_output - jax.scipy.special.logsumexp(net_output, axis=-1, keepdims=True)
# probs = jax.nn.softmax(logits)
else:
probs = net_output / net_output.sum(-1, keepdims=True)
eps = jnp.finfo(probs.dtype).eps
logits = jnp.log(probs.clip(min=eps, max=1 - eps))
# sample actions
actions = jax.random.categorical(key, logits, axis=-1, shape=None)
# log of the probability density function
taken_actions = actions if taken_actions is None else taken_actions.astype(jnp.int32).reshape(-1)
log_prob = jax.nn.log_softmax(logits)[jnp.arange(taken_actions.shape[0]), taken_actions]
return actions.reshape(-1, 1), log_prob.reshape(-1, 1)
@jax.jit
def _entropy(logits):
logits = logits - jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True)
logits = logits.clip(min=jnp.finfo(logits.dtype).min)
p_log_p = logits * jax.nn.softmax(logits)
return -p_log_p.sum(-1)
class CategoricalMixin:
def __init__(self, unnormalized_log_prob: bool = True, role: str = "") -> None:
"""Categorical mixin model (stochastic model)
:param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``).
If True, the model's output is interpreted as unnormalized log probabilities
(it can be any real number), otherwise as normalized probabilities
(the output must be non-negative, finite and have a non-zero sum)
:type unnormalized_log_prob: bool, optional
:param role: Role play by the model (default: ``""``)
:type role: str, optional
Example::
# define the model
>>> import flax.linen as nn
>>> from skrl.models.jax import Model, CategoricalMixin
>>>
>>> class Policy(CategoricalMixin, Model):
... def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs):
... Model.__init__(self, observation_space, action_space, device, **kwargs)
... CategoricalMixin.__init__(self, unnormalized_log_prob)
...
... @nn.compact # marks the given module method allowing inlined submodules
... def __call__(self, inputs, role):
... x = nn.elu(nn.Dense(32)(inputs["states"]))
... x = nn.elu(nn.Dense(32)(x))
... x = nn.Dense(self.num_actions)(x)
... return x, {}
...
>>> # given an observation_space: gym.spaces.Box with shape (4,)
>>> # and an action_space: gym.spaces.Discrete with n = 2
>>> model = Policy(observation_space, action_space)
>>>
>>> print(model)
Policy(
# attributes
observation_space = Box(-1.0, 1.0, (4,), float32)
action_space = Discrete(2)
device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0)
)
"""
self._unnormalized_log_prob = unnormalized_log_prob
self._i = 0
self._key = config.jax.key
# https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError
flax.linen.Module.__post_init__(self)
def act(self,
inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]],
role: str = "",
params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]:
"""Act stochastically in response to the state of the environment
:param inputs: Model inputs. The most common keys are:
- ``"states"``: state of the environment used to make the decision
- ``"taken_actions"``: actions taken by the policy for the given states
:type inputs: dict where the values are typically np.ndarray or jax.Array
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:param params: Parameters used to compute the output (default: ``None``).
If ``None``, internal parameters will be used
:type params: jnp.array
:return: Model output. The first component is the action to be taken by the agent.
The second component is the log of the probability density function.
The third component is a dictionary containing the network output ``"net_output"``
and extra output values
:rtype: tuple of jax.Array, jax.Array or None, and dict
Example::
>>> # given a batch of sample states with shape (4096, 4)
>>> actions, log_prob, outputs = model.act({"states": states})
>>> print(actions.shape, log_prob.shape, outputs["net_output"].shape)
(4096, 1) (4096, 1) (4096, 2)
"""
self._i += 1
subkey = jax.random.fold_in(self._key, self._i)
inputs["key"] = subkey
# map from states/observations to normalized probabilities or unnormalized log probabilities
net_output, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role)
actions, log_prob = _categorical(net_output,
self._unnormalized_log_prob,
inputs.get("taken_actions", None),
subkey)
outputs["net_output"] = net_output
# avoid jax.errors.UnexpectedTracerError
outputs["stddev"] = jnp.full_like(log_prob, jnp.nan)
return actions, log_prob, outputs
def get_entropy(self, logits: jax.Array, role: str = "") -> jax.Array:
"""Compute and return the entropy of the model
:param role: Role play by the model (default: ``""``)
:type role: str, optional
:return: Entropy of the model
:rtype: jax.Array
Example::
# given a standard deviation array: stddev
>>> entropy = model.get_entropy(stddev)
>>> print(entropy.shape)
(4096, 8)
"""
return _entropy(logits)
| 6,846 | Python | 42.062893 | 129 | 0.580193 |
Toni-SM/skrl/skrl/multi_agents/torch/base.py | from typing import Any, Mapping, Optional, Sequence, Union
import collections
import copy
import datetime
import os
import gym
import gymnasium
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from skrl import logger
from skrl.memories.torch import Memory
from skrl.models.torch import Model
class MultiAgent:
def __init__(self,
possible_agents: Sequence[str],
models: Mapping[str, Mapping[str, Model]],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None,
action_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Base class that represent a RL multi-agent
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.torch.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.torch.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
self.possible_agents = possible_agents
self.num_agents = len(self.possible_agents)
self.models = models
self.memories = memories
self.observation_spaces = observation_spaces
self.action_spaces = action_spaces
self.cfg = cfg if cfg is not None else {}
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device)
# convert the models to their respective device
for _models in self.models.values():
for model in _models.values():
if model is not None:
model.to(model.device)
self.tracking_data = collections.defaultdict(list)
self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000)
self._track_rewards = collections.deque(maxlen=100)
self._track_timesteps = collections.deque(maxlen=100)
self._cumulative_rewards = None
self._cumulative_timesteps = None
self.training = True
# checkpoint
self.checkpoint_modules = {uid: {} for uid in self.possible_agents}
self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000)
self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False)
self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": True, "modules": {}}
# experiment directory
directory = self.cfg.get("experiment", {}).get("directory", "")
experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "")
if not directory:
directory = os.path.join(os.getcwd(), "runs")
if not experiment_name:
experiment_name = f"{datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S-%f')}_{self.__class__.__name__}"
self.experiment_dir = os.path.join(directory, experiment_name)
def __str__(self) -> str:
"""Generate a representation of the agent as string
:return: Representation of the agent as string
:rtype: str
"""
string = f"Multi-agent: {repr(self)}"
for k, v in self.cfg.items():
if type(v) is dict:
string += f"\n |-- {k}"
for k1, v1 in v.items():
string += f"\n | |-- {k1}: {v1}"
else:
string += f"\n |-- {k}: {v}"
return string
def _as_dict(self, _input: Any) -> Mapping[str, Any]:
"""Convert a configuration value into a dictionary according to the number of agents
:param _input: Configuration value
:type _input: Any
:raises ValueError: The configuration value is a dictionary different from the number of agents
:return: Configuration value as a dictionary
:rtype: list of any configuration value
"""
if _input and isinstance(_input, collections.abc.Mapping):
if set(_input) < set(self.possible_agents):
logger.error("The configuration value does not match possible agents")
raise ValueError("The configuration value does not match possible agents")
elif set(_input) >= set(self.possible_agents):
return _input
return {name: copy.deepcopy(_input) for name in self.possible_agents}
def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any:
"""Empty preprocess method
This method is defined because PyTorch multiprocessing can't pickle lambdas
:param _input: Input to preprocess
:type _input: Any
:return: Preprocessed input
:rtype: Any
"""
return _input
def _get_internal_value(self, _module: Any) -> Any:
"""Get internal module/variable state/value
:param _module: Module or variable
:type _module: Any
:return: Module/variable state/value
:rtype: Any
"""
return _module.state_dict() if hasattr(_module, "state_dict") else _module
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
This method should be called before the agent is used.
It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory
:param trainer_cfg: Trainer configuration
:type trainer_cfg: dict, optional
"""
# setup Weights & Biases
if self.cfg.get("experiment", {}).get("wandb", False):
# save experiment config
trainer_cfg = trainer_cfg if trainer_cfg is not None else {}
try:
models_cfg = {uid: {k: v.net._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents}
except AttributeError:
models_cfg = {uid: {k: v._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents}
config={**self.cfg, **trainer_cfg, **models_cfg}
# set default values
wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {}))
wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1])
wandb_kwargs.setdefault("sync_tensorboard", True)
wandb_kwargs.setdefault("config", {})
wandb_kwargs["config"].update(config)
# init Weights & Biases
import wandb
wandb.init(**wandb_kwargs)
# main entry to log data for consumption and visualization by TensorBoard
if self.write_interval > 0:
self.writer = SummaryWriter(log_dir=self.experiment_dir)
if self.checkpoint_interval > 0:
os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True)
def track_data(self, tag: str, value: float) -> None:
"""Track data to TensorBoard
Currently only scalar data are supported
:param tag: Data identifier (e.g. 'Loss / policy loss')
:type tag: str
:param value: Value to track
:type value: float
"""
self.tracking_data[tag].append(value)
def write_tracking_data(self, timestep: int, timesteps: int) -> None:
"""Write tracking data to TensorBoard
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
for k, v in self.tracking_data.items():
if k.endswith("(min)"):
self.writer.add_scalar(k, np.min(v), timestep)
elif k.endswith("(max)"):
self.writer.add_scalar(k, np.max(v), timestep)
else:
self.writer.add_scalar(k, np.mean(v), timestep)
# reset data containers for next iteration
self._track_rewards.clear()
self._track_timesteps.clear()
self.tracking_data.clear()
def write_checkpoint(self, timestep: int, timesteps: int) -> None:
"""Write checkpoint (modules) to disk
The checkpoints are saved in the directory 'checkpoints' in the experiment directory.
The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time.
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"))
# separated modules
if self.checkpoint_store_separately:
for uid in self.possible_agents:
for name, module in self.checkpoint_modules[uid].items():
torch.save(self._get_internal_value(module),
os.path.join(self.experiment_dir, "checkpoints", f"{uid}_{name}_{tag}.pt"))
# whole agent
else:
modules = {uid: {name: self._get_internal_value(module) for name, module in self.checkpoint_modules[uid].items()} \
for uid in self.possible_agents}
torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pt"))
# best modules
if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]:
# separated modules
if self.checkpoint_store_separately:
for uid in self.possible_agents:
for name in self.checkpoint_modules[uid].keys():
torch.save(self.checkpoint_best_modules["modules"][uid][name],
os.path.join(self.experiment_dir, "checkpoints", f"best_{uid}_{name}.pt"))
# whole agent
else:
modules = {uid: {name: self.checkpoint_best_modules["modules"][uid][name] \
for name in self.checkpoint_modules[uid].keys()} for uid in self.possible_agents}
torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", "best_agent.pt"))
self.checkpoint_best_modules["saved"] = True
def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: dictionary of torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
:return: Actions
:rtype: torch.Tensor
"""
raise NotImplementedError
def record_transition(self,
states: Mapping[str, torch.Tensor],
actions: Mapping[str, torch.Tensor],
rewards: Mapping[str, torch.Tensor],
next_states: Mapping[str, torch.Tensor],
terminated: Mapping[str, torch.Tensor],
truncated: Mapping[str, torch.Tensor],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory (to be implemented by the inheriting classes)
Inheriting classes must call this method to record episode information (rewards, timesteps, etc.).
In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded.
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of torch.Tensor
:param actions: Actions taken by the agent
:type actions: dictionary of torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: dictionary of torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of torch.Tensor
:param infos: Additional information about the environment
:type infos: dictionary of any supported type
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
_rewards = next(iter(rewards.values()))
# compute the cumulative sum of the rewards and timesteps
if self._cumulative_rewards is None:
self._cumulative_rewards = torch.zeros_like(_rewards, dtype=torch.float32)
self._cumulative_timesteps = torch.zeros_like(_rewards, dtype=torch.int32)
self._cumulative_rewards.add_(_rewards)
self._cumulative_timesteps.add_(1)
# check ended episodes
finished_episodes = (next(iter(terminated.values())) + next(iter(truncated.values()))).nonzero(as_tuple=False)
if finished_episodes.numel():
# storage cumulative rewards and timesteps
self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist())
self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist())
# reset the cumulative rewards and timesteps
self._cumulative_rewards[finished_episodes] = 0
self._cumulative_timesteps[finished_episodes] = 0
# record data
if self.write_interval > 0:
self.tracking_data["Reward / Instantaneous reward (max)"].append(torch.max(_rewards).item())
self.tracking_data["Reward / Instantaneous reward (min)"].append(torch.min(_rewards).item())
self.tracking_data["Reward / Instantaneous reward (mean)"].append(torch.mean(_rewards).item())
if len(self._track_rewards):
track_rewards = np.array(self._track_rewards)
track_timesteps = np.array(self._track_timesteps)
self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards))
self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards))
self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards))
self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps))
self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps))
self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps))
def set_mode(self, mode: str) -> None:
"""Set the model mode (training or evaluation)
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
for _models in self.models.values():
for model in _models.values():
if model is not None:
model.set_mode(mode)
def set_running_mode(self, mode: str) -> None:
"""Set the current running mode (training or evaluation)
This method sets the value of the ``training`` property (boolean).
This property can be used to know if the agent is running in training or evaluation mode.
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
self.training = mode == "train"
def save(self, path: str) -> None:
"""Save the agent to the specified path
:param path: Path to save the model to
:type path: str
"""
modules = {uid: {name: self._get_internal_value(module) for name, module in self.checkpoint_modules[uid].items()} \
for uid in self.possible_agents}
torch.save(modules, path)
def load(self, path: str) -> None:
"""Load the model from the specified path
The final storage device is determined by the constructor of the model
:param path: Path to load the model from
:type path: str
"""
modules = torch.load(path, map_location=self.device)
if type(modules) is dict:
for uid in self.possible_agents:
if uid not in modules:
logger.warning(f"Cannot load modules for {uid}. The agent doesn't have such an instance")
continue
for name, data in modules[uid].items():
module = self.checkpoint_modules[uid].get(name, None)
if module is not None:
if hasattr(module, "load_state_dict"):
module.load_state_dict(data)
if hasattr(module, "eval"):
module.eval()
else:
raise NotImplementedError
else:
logger.warning(f"Cannot load the {uid}:{name} module. The agent doesn't have such an instance")
def migrate(self,
path: str,
name_map: Mapping[str, Mapping[str, str]] = {},
auto_mapping: bool = True,
verbose: bool = False) -> bool:
"""Migrate the specified extrernal checkpoint to the current agent
The final storage device is determined by the constructor of the agent.
For ambiguous models (where 2 or more parameters, for source or current model, have equal shape)
it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully
:param path: Path to the external checkpoint to migrate from
:type path: str
:param name_map: Name map to use for the migration (default: ``{}``).
Keys are the current parameter names and values are the external parameter names
:type name_map: Mapping[str, Mapping[str, str]], optional
:param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``)
:type auto_mapping: bool, optional
:param verbose: Show model names and migration (default: ``False``)
:type verbose: bool, optional
:raises ValueError: If the correct file type cannot be identified from the ``path`` parameter
:return: True if the migration was successful, False otherwise.
Migration is successful if all parameters of the current model are found in the external model
:rtype: bool
"""
raise NotImplementedError
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
timestep += 1
# update best models and write checkpoints
if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval:
# update best models
reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31))
if reward > self.checkpoint_best_modules["reward"]:
self.checkpoint_best_modules["timestep"] = timestep
self.checkpoint_best_modules["reward"] = reward
self.checkpoint_best_modules["saved"] = False
self.checkpoint_best_modules["modules"] = {uid: {k: copy.deepcopy(self._get_internal_value(v)) \
for k, v in self.checkpoint_modules[uid].items()} for uid in self.possible_agents}
# write checkpoints
self.write_checkpoint(timestep, timesteps)
# write to tensorboard
if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval:
self.write_tracking_data(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
"""
raise NotImplementedError
| 22,137 | Python | 45.024948 | 128 | 0.610787 |
Toni-SM/skrl/skrl/multi_agents/torch/__init__.py | from skrl.multi_agents.torch.base import MultiAgent
| 52 | Python | 25.499987 | 51 | 0.846154 |
Toni-SM/skrl/skrl/multi_agents/torch/mappo/mappo.py | from typing import Any, Mapping, Optional, Sequence, Union
import copy
import itertools
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.multi_agents.torch import MultiAgent
from skrl.resources.schedulers.torch import KLAdaptiveLR
# [start-config-dict-torch]
MAPPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"shared_state_preprocessor": None, # shared state preprocessor class (see skrl.resources.preprocessors)
"shared_state_preprocessor_kwargs": {}, # shared state preprocessor's kwargs (e.g. {"size": env.shared_observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class MAPPO(MultiAgent):
def __init__(self,
possible_agents: Sequence[str],
models: Mapping[str, Model],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None,
shared_observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None) -> None:
"""Multi-Agent Proximal Policy Optimization (MAPPO)
https://arxiv.org/abs/2103.01955
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.torch.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.torch.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:param shared_observation_spaces: Shared observation/state space or shape (default: ``None``)
:type shared_observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
"""
_cfg = copy.deepcopy(MAPPO_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(possible_agents=possible_agents,
models=models,
memories=memories,
observation_spaces=observation_spaces,
action_spaces=action_spaces,
device=device,
cfg=_cfg)
self.shared_observation_spaces = shared_observation_spaces
# models
self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents}
self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents}
for uid in self.possible_agents:
self.checkpoint_modules[uid]["policy"] = self.policies[uid]
self.checkpoint_modules[uid]["value"] = self.values[uid]
# configuration
self._learning_epochs = self._as_dict(self.cfg["learning_epochs"])
self._mini_batches = self._as_dict(self.cfg["mini_batches"])
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"])
self._ratio_clip = self._as_dict(self.cfg["ratio_clip"])
self._value_clip = self._as_dict(self.cfg["value_clip"])
self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"])
self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"])
self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"])
self._kl_threshold = self._as_dict(self.cfg["kl_threshold"])
self._learning_rate = self._as_dict(self.cfg["learning_rate"])
self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"])
self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"])
self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"])
self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"])
self._shared_state_preprocessor = self._as_dict(self.cfg["shared_state_preprocessor"])
self._shared_state_preprocessor_kwargs = self._as_dict(self.cfg["shared_state_preprocessor_kwargs"])
self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"])
self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"])
self._discount_factor = self._as_dict(self.cfg["discount_factor"])
self._lambda = self._as_dict(self.cfg["lambda"])
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"])
# set up optimizer and learning rate scheduler
self.optimizers = {}
self.schedulers = {}
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
if policy is not None and value is not None:
if policy is value:
optimizer = torch.optim.Adam(policy.parameters(), lr=self._learning_rate[uid])
else:
optimizer = torch.optim.Adam(itertools.chain(policy.parameters(), value.parameters()),
lr=self._learning_rate[uid])
self.optimizers[uid] = optimizer
if self._learning_rate_scheduler[uid] is not None:
self.schedulers[uid] = self._learning_rate_scheduler[uid](optimizer, **self._learning_rate_scheduler_kwargs[uid])
self.checkpoint_modules[uid]["optimizer"] = self.optimizers[uid]
# set up preprocessors
if self._state_preprocessor[uid] is not None:
self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid]
else:
self._state_preprocessor[uid] = self._empty_preprocessor
if self._shared_state_preprocessor[uid] is not None:
self._shared_state_preprocessor[uid] = self._shared_state_preprocessor[uid](**self._shared_state_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["shared_state_preprocessor"] = self._shared_state_preprocessor[uid]
else:
self._shared_state_preprocessor[uid] = self._empty_preprocessor
if self._value_preprocessor[uid] is not None:
self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid]
else:
self._value_preprocessor[uid] = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memories
for uid in self.possible_agents:
self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=torch.float32)
self.memories[uid].create_tensor(name="shared_states", size=self.shared_observation_spaces[uid], dtype=torch.float32)
self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=torch.float32)
self.memories[uid].create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memories[uid].create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="values", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="returns", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="advantages", size=1, dtype=torch.float32)
# tensors sampled during training
self._tensors_names = ["states", "shared_states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = []
self._current_shared_next_states = []
def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policies
:param states: Environment's states
:type states: dictionary of torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# # sample random actions
# # TODO: fix for stochasticity, rnn and log_prob
# if timestep < self._random_timesteps:
# return self.policy.random_act({"states": states}, role="policy")
# sample stochastic actions
data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents]
actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)}
log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)}
outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)}
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Mapping[str, torch.Tensor],
actions: Mapping[str, torch.Tensor],
rewards: Mapping[str, torch.Tensor],
next_states: Mapping[str, torch.Tensor],
terminated: Mapping[str, torch.Tensor],
truncated: Mapping[str, torch.Tensor],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of torch.Tensor
:param actions: Actions taken by the agent
:type actions: dictionary of torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: dictionary of torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of torch.Tensor
:param infos: Additional information about the environment
:type infos: dictionary of any supported type
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memories:
shared_states = infos["shared_states"]
self._current_shared_next_states = infos["shared_next_states"]
for uid in self.possible_agents:
# reward shaping
if self._rewards_shaper is not None:
rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps)
# compute values
values, _, _ = self.values[uid].act({"states": self._shared_state_preprocessor[uid](shared_states[uid])}, role="value")
values = self._value_preprocessor[uid](values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap[uid]:
rewards[uid] += self._discount_factor[uid] * values * truncated[uid]
# storage transition in memory
self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid],
terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values,
shared_states=shared_states[uid])
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else last_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
memory = self.memories[uid]
# compute returns and advantages
with torch.no_grad():
value.train(False)
last_values, _, _ = value.act({"states": self._shared_state_preprocessor[uid](self._current_shared_next_states[uid].float())}, role="value")
value.train(True)
last_values = self._value_preprocessor[uid](last_values, inverse=True)
values = memory.get_tensor_by_name("values")
returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"),
dones=memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor[uid],
lambda_coefficient=self._lambda[uid])
memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True))
memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True))
memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid])
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs[uid]):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_shared_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages \
in sampled_batches:
sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch)
sampled_shared_states = self._shared_state_preprocessor[uid](sampled_shared_states, train=not epoch)
_, next_log_prob, _ = policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy")
# compute approximate KL divergence
with torch.no_grad():
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean()
kl_divergences.append(kl_divergence)
# early stopping with KL divergence
if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]:
break
# compute entropy loss
if self._entropy_loss_scale[uid]:
entropy_loss = -self._entropy_loss_scale[uid] * policy.get_entropy(role="policy").mean()
else:
entropy_loss = 0
# compute policy loss
ratio = torch.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip[uid], 1.0 + self._ratio_clip[uid])
policy_loss = -torch.min(surrogate, surrogate_clipped).mean()
# compute value loss
predicted_values, _, _ = value.act({"states": sampled_shared_states}, role="value")
if self._clip_predicted_values:
predicted_values = sampled_values + torch.clip(predicted_values - sampled_values,
min=-self._value_clip[uid],
max=self._value_clip[uid])
value_loss = self._value_loss_scale[uid] * F.mse_loss(sampled_returns, predicted_values)
# optimization step
self.optimizers[uid].zero_grad()
(policy_loss + entropy_loss + value_loss).backward()
if self._grad_norm_clip[uid] > 0:
if policy is value:
nn.utils.clip_grad_norm_(policy.parameters(), self._grad_norm_clip[uid])
else:
nn.utils.clip_grad_norm_(itertools.chain(policy.parameters(), value.parameters()), self._grad_norm_clip[uid])
self.optimizers[uid].step()
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale[uid]:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler[uid]:
if isinstance(self.schedulers[uid], KLAdaptiveLR):
self.schedulers[uid].step(torch.tensor(kl_divergences).mean())
else:
self.schedulers[uid].step()
# record data
self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
if self._entropy_loss_scale:
self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Policy / Standard deviation ({uid})", policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler[uid]:
self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid].get_last_lr()[0])
| 25,533 | Python | 51.110204 | 156 | 0.596013 |
Toni-SM/skrl/skrl/multi_agents/torch/mappo/__init__.py | from skrl.multi_agents.torch.mappo.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
| 76 | Python | 37.499981 | 75 | 0.828947 |
Toni-SM/skrl/skrl/multi_agents/torch/ippo/__init__.py | from skrl.multi_agents.torch.ippo.ippo import IPPO, IPPO_DEFAULT_CONFIG
| 72 | Python | 35.499982 | 71 | 0.819444 |
Toni-SM/skrl/skrl/multi_agents/torch/ippo/ippo.py | from typing import Any, Mapping, Optional, Sequence, Union
import copy
import itertools
import gym
import gymnasium
import torch
import torch.nn as nn
import torch.nn.functional as F
from skrl.memories.torch import Memory
from skrl.models.torch import Model
from skrl.multi_agents.torch import MultiAgent
from skrl.resources.schedulers.torch import KLAdaptiveLR
# [start-config-dict-torch]
IPPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-torch]
class IPPO(MultiAgent):
def __init__(self,
possible_agents: Sequence[str],
models: Mapping[str, Model],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
device: Optional[Union[str, torch.device]] = None,
cfg: Optional[dict] = None) -> None:
"""Independent Proximal Policy Optimization (IPPO)
https://arxiv.org/abs/2011.09533
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.torch.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.torch.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
_cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG)
_cfg.update(cfg if cfg is not None else {})
super().__init__(possible_agents=possible_agents,
models=models,
memories=memories,
observation_spaces=observation_spaces,
action_spaces=action_spaces,
device=device,
cfg=_cfg)
# models
self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents}
self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents}
for uid in self.possible_agents:
self.checkpoint_modules[uid]["policy"] = self.policies[uid]
self.checkpoint_modules[uid]["value"] = self.values[uid]
# configuration
self._learning_epochs = self._as_dict(self.cfg["learning_epochs"])
self._mini_batches = self._as_dict(self.cfg["mini_batches"])
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"])
self._ratio_clip = self._as_dict(self.cfg["ratio_clip"])
self._value_clip = self._as_dict(self.cfg["value_clip"])
self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"])
self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"])
self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"])
self._kl_threshold = self._as_dict(self.cfg["kl_threshold"])
self._learning_rate = self._as_dict(self.cfg["learning_rate"])
self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"])
self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"])
self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"])
self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"])
self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"])
self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"])
self._discount_factor = self._as_dict(self.cfg["discount_factor"])
self._lambda = self._as_dict(self.cfg["lambda"])
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"])
# set up optimizer and learning rate scheduler
self.optimizers = {}
self.schedulers = {}
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
if policy is not None and value is not None:
if policy is value:
optimizer = torch.optim.Adam(policy.parameters(), lr=self._learning_rate[uid])
else:
optimizer = torch.optim.Adam(itertools.chain(policy.parameters(), value.parameters()),
lr=self._learning_rate[uid])
self.optimizers[uid] = optimizer
if self._learning_rate_scheduler[uid] is not None:
self.schedulers[uid] = self._learning_rate_scheduler[uid](optimizer, **self._learning_rate_scheduler_kwargs[uid])
self.checkpoint_modules[uid]["optimizer"] = self.optimizers[uid]
# set up preprocessors
if self._state_preprocessor[uid] is not None:
self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid]
else:
self._state_preprocessor[uid] = self._empty_preprocessor
if self._value_preprocessor[uid] is not None:
self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid]
else:
self._value_preprocessor[uid] = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memories
for uid in self.possible_agents:
self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=torch.float32)
self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=torch.float32)
self.memories[uid].create_tensor(name="rewards", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="terminated", size=1, dtype=torch.bool)
self.memories[uid].create_tensor(name="log_prob", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="values", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="returns", size=1, dtype=torch.float32)
self.memories[uid].create_tensor(name="advantages", size=1, dtype=torch.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = []
self._current_next_states = []
def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor:
"""Process the environment's states to make a decision (actions) using the main policies
:param states: Environment's states
:type states: dictionary of torch.Tensor
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: torch.Tensor
"""
# # sample random actions
# # TODO: fix for stochasticity, rnn and log_prob
# if timestep < self._random_timesteps:
# return self.policy.random_act({"states": states}, role="policy")
# sample stochastic actions
data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents]
actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)}
log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)}
outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)}
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Mapping[str, torch.Tensor],
actions: Mapping[str, torch.Tensor],
rewards: Mapping[str, torch.Tensor],
next_states: Mapping[str, torch.Tensor],
terminated: Mapping[str, torch.Tensor],
truncated: Mapping[str, torch.Tensor],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of torch.Tensor
:param actions: Actions taken by the agent
:type actions: dictionary of torch.Tensor
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of torch.Tensor
:param next_states: Next observations/states of the environment
:type next_states: dictionary of torch.Tensor
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of torch.Tensor
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of torch.Tensor
:param infos: Additional information about the environment
:type infos: dictionary of any supported type
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memories:
self._current_next_states = next_states
for uid in self.possible_agents:
# reward shaping
if self._rewards_shaper is not None:
rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps)
# compute values
values, _, _ = self.values[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="value")
values = self._value_preprocessor[uid](values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap[uid]:
rewards[uid] += self._discount_factor[uid] * values * truncated[uid]
# storage transition in memory
self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid],
terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
def compute_gae(rewards: torch.Tensor,
dones: torch.Tensor,
values: torch.Tensor,
next_values: torch.Tensor,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> torch.Tensor:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: torch.Tensor
:param dones: Signals to indicate that episodes have ended
:type dones: torch.Tensor
:param values: Values obtained by the agent
:type values: torch.Tensor
:param next_values: Next values obtained by the agent
:type next_values: torch.Tensor
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: torch.Tensor
"""
advantage = 0
advantages = torch.zeros_like(rewards)
not_dones = dones.logical_not()
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else last_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
memory = self.memories[uid]
# compute returns and advantages
with torch.no_grad():
value.train(False)
last_values, _, _ = value.act({"states": self._state_preprocessor[uid](self._current_next_states[uid].float())}, role="value")
value.train(True)
last_values = self._value_preprocessor[uid](last_values, inverse=True)
values = memory.get_tensor_by_name("values")
returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"),
dones=memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor[uid],
lambda_coefficient=self._lambda[uid])
memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True))
memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True))
memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid])
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs[uid]):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch)
_, next_log_prob, _ = policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy")
# compute approximate KL divergence
with torch.no_grad():
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean()
kl_divergences.append(kl_divergence)
# early stopping with KL divergence
if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]:
break
# compute entropy loss
if self._entropy_loss_scale[uid]:
entropy_loss = -self._entropy_loss_scale[uid] * policy.get_entropy(role="policy").mean()
else:
entropy_loss = 0
# compute policy loss
ratio = torch.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip[uid], 1.0 + self._ratio_clip[uid])
policy_loss = -torch.min(surrogate, surrogate_clipped).mean()
# compute value loss
predicted_values, _, _ = value.act({"states": sampled_states}, role="value")
if self._clip_predicted_values:
predicted_values = sampled_values + torch.clip(predicted_values - sampled_values,
min=-self._value_clip[uid],
max=self._value_clip[uid])
value_loss = self._value_loss_scale[uid] * F.mse_loss(sampled_returns, predicted_values)
# optimization step
self.optimizers[uid].zero_grad()
(policy_loss + entropy_loss + value_loss).backward()
if self._grad_norm_clip[uid] > 0:
if policy is value:
nn.utils.clip_grad_norm_(policy.parameters(), self._grad_norm_clip[uid])
else:
nn.utils.clip_grad_norm_(itertools.chain(policy.parameters(), value.parameters()), self._grad_norm_clip[uid])
self.optimizers[uid].step()
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale[uid]:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler[uid]:
if isinstance(self.schedulers[uid], KLAdaptiveLR):
self.schedulers[uid].step(torch.tensor(kl_divergences).mean())
else:
self.schedulers[uid].step()
# record data
self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
if self._entropy_loss_scale:
self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Policy / Standard deviation ({uid})", policy.distribution(role="policy").stddev.mean().item())
if self._learning_rate_scheduler[uid]:
self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid].get_last_lr()[0])
| 23,730 | Python | 49.491489 | 153 | 0.590223 |
Toni-SM/skrl/skrl/multi_agents/jax/base.py | from typing import Any, Mapping, Optional, Sequence, Union
import collections
import copy
import datetime
import os
import pickle
import gym
import gymnasium
import flax
import jax
import numpy as np
from skrl import config, logger
from skrl.memories.jax import Memory
from skrl.models.jax import Model
class MultiAgent:
def __init__(self,
possible_agents: Sequence[str],
models: Mapping[str, Mapping[str, Model]],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None,
action_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Base class that represent a RL multi-agent
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.jax.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.jax.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
self._jax = config.jax.backend == "jax"
self.possible_agents = possible_agents
self.num_agents = len(self.possible_agents)
self.models = models
self.memories = memories
self.observation_spaces = observation_spaces
self.action_spaces = action_spaces
self.cfg = cfg if cfg is not None else {}
if device is None:
self.device = jax.devices()[0]
else:
self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0]
# convert the models to their respective device
for _models in self.models.values():
for model in _models.values():
if model is not None:
pass
self.tracking_data = collections.defaultdict(list)
self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000)
self._track_rewards = collections.deque(maxlen=100)
self._track_timesteps = collections.deque(maxlen=100)
self._cumulative_rewards = None
self._cumulative_timesteps = None
self.training = True
# checkpoint
self.checkpoint_modules = {uid: {} for uid in self.possible_agents}
self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000)
self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False)
self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": True, "modules": {}}
# experiment directory
directory = self.cfg.get("experiment", {}).get("directory", "")
experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "")
if not directory:
directory = os.path.join(os.getcwd(), "runs")
if not experiment_name:
experiment_name = f"{datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S-%f')}_{self.__class__.__name__}"
self.experiment_dir = os.path.join(directory, experiment_name)
def __str__(self) -> str:
"""Generate a representation of the agent as string
:return: Representation of the agent as string
:rtype: str
"""
string = f"Multi-agent: {repr(self)}"
for k, v in self.cfg.items():
if type(v) is dict:
string += f"\n |-- {k}"
for k1, v1 in v.items():
string += f"\n | |-- {k1}: {v1}"
else:
string += f"\n |-- {k}: {v}"
return string
def _as_dict(self, _input: Any) -> Mapping[str, Any]:
"""Convert a configuration value into a dictionary according to the number of agents
:param _input: Configuration value
:type _input: Any
:raises ValueError: The configuration value is a dictionary different from the number of agents
:return: Configuration value as a dictionary
:rtype: list of any configuration value
"""
if _input and isinstance(_input, collections.abc.Mapping):
if set(_input) < set(self.possible_agents):
logger.error("The configuration value does not match possible agents")
raise ValueError("The configuration value does not match possible agents")
elif set(_input) >= set(self.possible_agents):
return _input
try:
return {name: copy.deepcopy(_input) for name in self.possible_agents}
except TypeError:
return {name: _input for name in self.possible_agents}
def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any:
"""Empty preprocess method
This method is defined because PyTorch multiprocessing can't pickle lambdas
:param _input: Input to preprocess
:type _input: Any
:return: Preprocessed input
:rtype: Any
"""
return _input
def _get_internal_value(self, _module: Any) -> Any:
"""Get internal module/variable state/value
:param _module: Module or variable
:type _module: Any
:return: Module/variable state/value
:rtype: Any
"""
return _module.state_dict.params if hasattr(_module, "state_dict") else _module
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
This method should be called before the agent is used.
It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory
:param trainer_cfg: Trainer configuration
:type trainer_cfg: dict, optional
"""
# setup Weights & Biases
if self.cfg.get("experiment", {}).get("wandb", False):
# save experiment config
trainer_cfg = trainer_cfg if trainer_cfg is not None else {}
try:
models_cfg = {uid: {k: v.net._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents}
except AttributeError:
models_cfg = {uid: {k: v._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents}
config={**self.cfg, **trainer_cfg, **models_cfg}
# set default values
wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {}))
wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1])
wandb_kwargs.setdefault("sync_tensorboard", True)
wandb_kwargs.setdefault("config", {})
wandb_kwargs["config"].update(config)
# init Weights & Biases
import wandb
wandb.init(**wandb_kwargs)
# main entry to log data for consumption and visualization by TensorBoard
if self.write_interval > 0:
self.writer = None
# tensorboard via torch SummaryWriter
try:
from torch.utils.tensorboard import SummaryWriter
self.writer = SummaryWriter(log_dir=self.experiment_dir)
except ImportError as e:
pass
# tensorboard via tensorflow
if self.writer is None:
try:
import tensorflow
class _SummaryWriter:
def __init__(self, log_dir):
self.writer = tensorflow.summary.create_file_writer(logdir=log_dir)
def add_scalar(self, tag, value, step):
with self.writer.as_default():
tensorflow.summary.scalar(tag, value, step=step)
self.writer = _SummaryWriter(log_dir=self.experiment_dir)
except ImportError as e:
pass
# tensorboard via tensorboardX
if self.writer is None:
try:
import tensorboardX
self.writer = tensorboardX.SummaryWriter(log_dir=self.experiment_dir)
except ImportError as e:
pass
# show warnings and exit
if self.writer is None:
logger.warning("No package found to write events to Tensorboard.")
logger.warning("Set agent's `write_interval` setting to 0 to disable writing")
logger.warning("or install one of the following packages:")
logger.warning(" - PyTorch: https://pytorch.org/get-started/locally")
logger.warning(" - TensorFlow: https://www.tensorflow.org/install")
logger.warning(" - TensorboardX: https://github.com/lanpa/tensorboardX#install")
logger.warning("The current running process will be terminated.")
exit()
if self.checkpoint_interval > 0:
os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True)
def track_data(self, tag: str, value: float) -> None:
"""Track data to TensorBoard
Currently only scalar data are supported
:param tag: Data identifier (e.g. 'Loss / policy loss')
:type tag: str
:param value: Value to track
:type value: float
"""
self.tracking_data[tag].append(value)
def write_tracking_data(self, timestep: int, timesteps: int) -> None:
"""Write tracking data to TensorBoard
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
for k, v in self.tracking_data.items():
if k.endswith("(min)"):
self.writer.add_scalar(k, np.min(v), timestep)
elif k.endswith("(max)"):
self.writer.add_scalar(k, np.max(v), timestep)
else:
self.writer.add_scalar(k, np.mean(v), timestep)
# reset data containers for next iteration
self._track_rewards.clear()
self._track_timesteps.clear()
self.tracking_data.clear()
def write_checkpoint(self, timestep: int, timesteps: int) -> None:
"""Write checkpoint (modules) to disk
The checkpoints are saved in the directory 'checkpoints' in the experiment directory.
The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time.
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"))
# separated modules
if self.checkpoint_store_separately:
for uid in self.possible_agents:
for name, module in self.checkpoint_modules[uid].items():
with open(os.path.join(self.experiment_dir, "checkpoints", f"{uid}_{name}_{tag}.pickle"), "wb") as file:
pickle.dump(flax.serialization.to_bytes(self._get_internal_value(module)), file, protocol=4)
# whole agent
else:
modules = {uid: {name: flax.serialization.to_bytes(self._get_internal_value(module)) for name, module in self.checkpoint_modules[uid].items()} \
for uid in self.possible_agents}
with open(os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pickle"), "wb") as file:
pickle.dump(modules, file, protocol=4)
# best modules
if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]:
# separated modules
if self.checkpoint_store_separately:
for uid in self.possible_agents:
for name, module in self.checkpoint_modules.items():
with open(os.path.join(self.experiment_dir, "checkpoints", f"best_{uid}_{name}.pickle"), "wb") as file:
pickle.dump(flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][uid][name]), file, protocol=4)
# whole agent
else:
modules = {uid: {name: flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][uid][name]) \
for name in self.checkpoint_modules[uid].keys()} for uid in self.possible_agents}
with open(os.path.join(self.experiment_dir, "checkpoints", "best_agent.pickle"), "wb") as file:
pickle.dump(modules, file, protocol=4)
self.checkpoint_best_modules["saved"] = True
def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policy
:param states: Environment's states
:type states: dictionary of np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
:return: Actions
:rtype: np.ndarray or jax.Array
"""
raise NotImplementedError
def record_transition(self,
states: Mapping[str, Union[np.ndarray, jax.Array]],
actions: Mapping[str, Union[np.ndarray, jax.Array]],
rewards: Mapping[str, Union[np.ndarray, jax.Array]],
next_states: Mapping[str, Union[np.ndarray, jax.Array]],
terminated: Mapping[str, Union[np.ndarray, jax.Array]],
truncated: Mapping[str, Union[np.ndarray, jax.Array]],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory (to be implemented by the inheriting classes)
Inheriting classes must call this method to record episode information (rewards, timesteps, etc.).
In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded.
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: dictionary of np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: dictionary of np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: dictionary of any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
if self.write_interval > 0:
_rewards = next(iter(rewards.values()))
_terminated = next(iter(terminated.values()))
_truncated = next(iter(truncated.values()))
# compute the cumulative sum of the rewards and timesteps
if self._cumulative_rewards is None:
self._cumulative_rewards = np.zeros_like(_rewards, dtype=np.float32)
self._cumulative_timesteps = np.zeros_like(_rewards, dtype=np.int32)
# TODO: find a better way to avoid https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError
if self._jax:
_rewards = jax.device_get(_rewards)
_terminated = jax.device_get(_terminated)
_truncated = jax.device_get(_truncated)
self._cumulative_rewards += _rewards
self._cumulative_timesteps += 1
# check ended episodes
finished_episodes = (_terminated + _truncated).nonzero()[0]
if finished_episodes.size:
# storage cumulative rewards and timesteps
self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist())
self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist())
# reset the cumulative rewards and timesteps
self._cumulative_rewards[finished_episodes] = 0
self._cumulative_timesteps[finished_episodes] = 0
# record data
self.tracking_data["Reward / Instantaneous reward (max)"].append(np.max(_rewards).item())
self.tracking_data["Reward / Instantaneous reward (min)"].append(np.min(_rewards).item())
self.tracking_data["Reward / Instantaneous reward (mean)"].append(np.mean(_rewards).item())
if len(self._track_rewards):
track_rewards = np.array(self._track_rewards)
track_timesteps = np.array(self._track_timesteps)
self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards))
self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards))
self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards))
self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps))
self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps))
self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps))
def set_mode(self, mode: str) -> None:
"""Set the model mode (training or evaluation)
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
for _models in self.models.values():
for model in _models.values():
if model is not None:
model.set_mode(mode)
def set_running_mode(self, mode: str) -> None:
"""Set the current running mode (training or evaluation)
This method sets the value of the ``training`` property (boolean).
This property can be used to know if the agent is running in training or evaluation mode.
:param mode: Mode: 'train' for training or 'eval' for evaluation
:type mode: str
"""
self.training = mode == "train"
def save(self, path: str) -> None:
"""Save the agent to the specified path
:param path: Path to save the model to
:type path: str
"""
modules = {uid: {name: flax.serialization.to_bytes(self._get_internal_value(module)) \
for name, module in self.checkpoint_modules[uid].items()} for uid in self.possible_agents}
# HACK: Does it make sense to use https://github.com/google/orbax
# file.write(flax.serialization.to_bytes(modules))
with open(path, "wb") as file:
pickle.dump(modules, file, protocol=4)
def load(self, path: str) -> None:
"""Load the model from the specified path
:param path: Path to load the model from
:type path: str
"""
with open(path, "rb") as file:
modules = pickle.load(file)
if type(modules) is dict:
for uid in self.possible_agents:
if uid not in modules:
logger.warning(f"Cannot load modules for {uid}. The agent doesn't have such an instance")
continue
for name, data in modules[uid].items():
module = self.checkpoint_modules[uid].get(name, None)
if module is not None:
if hasattr(module, "load_state_dict"):
params = flax.serialization.from_bytes(module.state_dict.params, data)
module.state_dict = module.state_dict.replace(params=params)
else:
pass # TODO:raise NotImplementedError
else:
logger.warning(f"Cannot load the {uid}:{name} module. The agent doesn't have such an instance")
def migrate(self,
path: str,
name_map: Mapping[str, Mapping[str, str]] = {},
auto_mapping: bool = True,
verbose: bool = False) -> bool:
"""Migrate the specified extrernal checkpoint to the current agent
:raises NotImplementedError: Not yet implemented
"""
raise NotImplementedError
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
timestep += 1
# update best models and write checkpoints
if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval:
# update best models
reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31))
if reward > self.checkpoint_best_modules["reward"]:
self.checkpoint_best_modules["timestep"] = timestep
self.checkpoint_best_modules["reward"] = reward
self.checkpoint_best_modules["saved"] = False
self.checkpoint_best_modules["modules"] = {uid: {k: copy.deepcopy(self._get_internal_value(v)) \
for k, v in self.checkpoint_modules[uid].items()} for uid in self.possible_agents}
# write checkpoints
self.write_checkpoint(timestep, timesteps)
# write to tensorboard
if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval:
self.write_tracking_data(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:raises NotImplementedError: The method is not implemented by the inheriting classes
"""
raise NotImplementedError
| 24,232 | Python | 45.246183 | 156 | 0.599373 |
Toni-SM/skrl/skrl/multi_agents/jax/__init__.py | from skrl.multi_agents.jax.base import MultiAgent
| 50 | Python | 24.499988 | 49 | 0.84 |
Toni-SM/skrl/skrl/multi_agents/jax/mappo/mappo.py | from typing import Any, Mapping, Optional, Sequence, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.multi_agents.jax import MultiAgent
from skrl.resources.optimizers.jax import Adam
from skrl.resources.schedulers.jax import KLAdaptiveLR
# [start-config-dict-jax]
MAPPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"shared_state_preprocessor": None, # shared state preprocessor class (see skrl.resources.preprocessors)
"shared_state_preprocessor_kwargs": {}, # shared state preprocessor's kwargs (e.g. {"size": env.shared_observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
def compute_gae(rewards: np.ndarray,
dones: np.ndarray,
values: np.ndarray,
next_values: np.ndarray,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> np.ndarray:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: np.ndarray
:param dones: Signals to indicate that episodes have ended
:type dones: np.ndarray
:param values: Values obtained by the agent
:type values: np.ndarray
:param next_values: Next values obtained by the agent
:type next_values: np.ndarray
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: np.ndarray
"""
advantage = 0
advantages = np.zeros_like(rewards)
not_dones = np.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _compute_gae(rewards: jax.Array,
dones: jax.Array,
values: jax.Array,
next_values: jax.Array,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> jax.Array:
advantage = 0
advantages = jnp.zeros_like(rewards)
not_dones = jnp.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages = advantages.at[i].set(advantage)
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
@functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale"))
def _update_policy(policy_act,
policy_state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
ratio_clip,
get_entropy,
entropy_loss_scale):
# compute policy loss
def _policy_loss(params):
_, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params)
# compute approximate KL divergence
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean()
# compute policy loss
ratio = jnp.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip)
# compute entropy loss
entropy_loss = 0
if entropy_loss_scale:
entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean()
return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"])
(policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params)
return grad, policy_loss, entropy_loss, kl_divergence, stddev
@functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values"))
def _update_value(value_act,
value_state_dict,
sampled_states,
sampled_values,
sampled_returns,
value_loss_scale,
clip_predicted_values,
value_clip):
# compute value loss
def _value_loss(params):
predicted_values, _, _ = value_act({"states": sampled_states}, "value", params)
if clip_predicted_values:
predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip)
return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean()
value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params)
return grad, value_loss
class MAPPO(MultiAgent):
def __init__(self,
possible_agents: Sequence[str],
models: Mapping[str, Model],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None,
shared_observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None) -> None:
"""Multi-Agent Proximal Policy Optimization (MAPPO)
https://arxiv.org/abs/2103.01955
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.jax.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.jax.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
:param shared_observation_spaces: Shared observation/state space or shape (default: ``None``)
:type shared_observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
"""
# _cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = MAPPO_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(possible_agents=possible_agents,
models=models,
memories=memories,
observation_spaces=observation_spaces,
action_spaces=action_spaces,
device=device,
cfg=_cfg)
self.shared_observation_spaces = shared_observation_spaces
# models
self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents}
self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents}
for uid in self.possible_agents:
self.checkpoint_modules[uid]["policy"] = self.policies[uid]
self.checkpoint_modules[uid]["value"] = self.values[uid]
# configuration
self._learning_epochs = self._as_dict(self.cfg["learning_epochs"])
self._mini_batches = self._as_dict(self.cfg["mini_batches"])
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"])
self._ratio_clip = self._as_dict(self.cfg["ratio_clip"])
self._value_clip = self._as_dict(self.cfg["value_clip"])
self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"])
self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"])
self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"])
self._kl_threshold = self._as_dict(self.cfg["kl_threshold"])
self._learning_rate = self._as_dict(self.cfg["learning_rate"])
self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"])
self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"])
self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"])
self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"])
self._shared_state_preprocessor = self._as_dict(self.cfg["shared_state_preprocessor"])
self._shared_state_preprocessor_kwargs = self._as_dict(self.cfg["shared_state_preprocessor_kwargs"])
self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"])
self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"])
self._discount_factor = self._as_dict(self.cfg["discount_factor"])
self._lambda = self._as_dict(self.cfg["lambda"])
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"])
# set up optimizer and learning rate scheduler
self.policy_optimizer = {}
self.value_optimizer = {}
self.schedulers = {}
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
if policy is not None and value is not None:
# scheduler
scale = True
self.schedulers[uid] = None
if self._learning_rate_scheduler[uid] is not None:
if self._learning_rate_scheduler[uid] == KLAdaptiveLR:
scale = False
self.schedulers[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid])
else:
self._learning_rate[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid])
# optimizer
self.policy_optimizer[uid] = Adam(model=policy, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale)
self.value_optimizer[uid] = Adam(model=value, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale)
self.checkpoint_modules[uid]["policy_optimizer"] = self.policy_optimizer[uid]
self.checkpoint_modules[uid]["value_optimizer"] = self.value_optimizer[uid]
# set up preprocessors
if self._state_preprocessor[uid] is not None:
self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid]
else:
self._state_preprocessor[uid] = self._empty_preprocessor
if self._shared_state_preprocessor[uid] is not None:
self._shared_state_preprocessor[uid] = self._shared_state_preprocessor[uid](**self._shared_state_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["shared_state_preprocessor"] = self._shared_state_preprocessor[uid]
else:
self._shared_state_preprocessor[uid] = self._empty_preprocessor
if self._value_preprocessor[uid] is not None:
self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid]
else:
self._value_preprocessor[uid] = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memories
for uid in self.possible_agents:
self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=jnp.float32)
self.memories[uid].create_tensor(name="shared_states", size=self.shared_observation_spaces[uid], dtype=jnp.float32)
self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=jnp.float32)
self.memories[uid].create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.memories[uid].create_tensor(name="log_prob", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="values", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="returns", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="advantages", size=1, dtype=jnp.float32)
# tensors sampled during training
self._tensors_names = ["states", "shared_states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = []
self._current_shared_next_states = []
# set up models for just-in-time compilation with XLA
for uid in self.possible_agents:
self.policies[uid].apply = jax.jit(self.policies[uid].apply, static_argnums=2)
if self.values[uid] is not None:
self.values[uid].apply = jax.jit(self.values[uid].apply, static_argnums=2)
def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policies
:param states: Environment's states
:type states: dictionary of np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# # sample random actions
# # TODO: fix for stochasticity, rnn and log_prob
# if timestep < self._random_timesteps:
# return self.policy.random_act({"states": states}, role="policy")
# sample stochastic actions
data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents]
actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)}
log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)}
outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)}
if not self._jax: # numpy backend
actions = {jax.device_get(_actions) for _actions in actions}
log_prob = {jax.device_get(_log_prob) for _log_prob in log_prob}
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Mapping[str, Union[np.ndarray, jax.Array]],
actions: Mapping[str, Union[np.ndarray, jax.Array]],
rewards: Mapping[str, Union[np.ndarray, jax.Array]],
next_states: Mapping[str, Union[np.ndarray, jax.Array]],
terminated: Mapping[str, Union[np.ndarray, jax.Array]],
truncated: Mapping[str, Union[np.ndarray, jax.Array]],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: dictionary of np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: dictionary of np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: dictionary of any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memories:
shared_states = infos["shared_states"]
self._current_shared_next_states = infos["shared_next_states"]
for uid in self.possible_agents:
# reward shaping
if self._rewards_shaper is not None:
rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps)
# compute values
values, _, _ = self.values[uid].act({"states": self._shared_state_preprocessor[uid](shared_states[uid])}, role="value")
if not self._jax: # numpy backend
values = jax.device_get(values)
values = self._value_preprocessor[uid](values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap[uid]:
rewards[uid] += self._discount_factor[uid] * values * truncated[uid]
# storage transition in memory
self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid],
terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values,
shared_states=shared_states[uid])
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
memory = self.memories[uid]
# compute returns and advantages
value.training = False
last_values, _, _ = value.act({"states": self._shared_state_preprocessor[uid](self._current_shared_next_states[uid])}, role="value") # TODO: .float()
value.training = True
if not self._jax: # numpy backend
last_values = jax.device_get(last_values)
last_values = self._value_preprocessor[uid](last_values, inverse=True)
values = memory.get_tensor_by_name("values")
if self._jax:
returns, advantages = _compute_gae(rewards=memory.get_tensor_by_name("rewards"),
dones=memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor[uid],
lambda_coefficient=self._lambda[uid])
else:
returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"),
dones=memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor[uid],
lambda_coefficient=self._lambda[uid])
memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True))
memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True))
memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid])
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs[uid]):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_shared_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages \
in sampled_batches:
sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch)
sampled_shared_states = self._shared_state_preprocessor[uid](sampled_shared_states, train=not epoch)
# compute policy loss
grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(policy.act,
policy.state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
self._ratio_clip[uid],
policy.get_entropy,
self._entropy_loss_scale[uid])
kl_divergences.append(kl_divergence.item())
# early stopping with KL divergence
if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]:
break
# optimization step (policy)
self.policy_optimizer[uid] = self.policy_optimizer[uid].step(grad, policy, self.schedulers[uid]._lr if self.schedulers[uid] else None)
# compute value loss
grad, value_loss = _update_value(value.act,
value.state_dict,
sampled_shared_states,
sampled_values,
sampled_returns,
self._value_loss_scale[uid],
self._clip_predicted_values[uid],
self._value_clip[uid])
# optimization step (value)
self.value_optimizer[uid] = self.value_optimizer[uid].step(grad, value, self.schedulers[uid]._lr if self.schedulers[uid] else None)
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale[uid]:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler[uid]:
if isinstance(self.schedulers[uid], KLAdaptiveLR):
self.schedulers[uid].step(np.mean(kl_divergences))
# record data
self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
if self._entropy_loss_scale:
self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Policy / Standard deviation ({uid})", stddev.mean().item())
if self._learning_rate_scheduler[uid]:
self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid]._lr)
| 30,433 | Python | 50.846678 | 162 | 0.588309 |
Toni-SM/skrl/skrl/multi_agents/jax/mappo/__init__.py | from skrl.multi_agents.jax.mappo.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
| 74 | Python | 36.499982 | 73 | 0.824324 |
Toni-SM/skrl/skrl/multi_agents/jax/ippo/__init__.py | from skrl.multi_agents.jax.ippo.ippo import IPPO, IPPO_DEFAULT_CONFIG
| 70 | Python | 34.499983 | 69 | 0.814286 |
Toni-SM/skrl/skrl/multi_agents/jax/ippo/ippo.py | from typing import Any, Mapping, Optional, Sequence, Union
import copy
import functools
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl.memories.jax import Memory
from skrl.models.jax import Model
from skrl.multi_agents.jax import MultiAgent
from skrl.resources.optimizers.jax import Adam
from skrl.resources.schedulers.jax import KLAdaptiveLR
# [start-config-dict-jax]
IPPO_DEFAULT_CONFIG = {
"rollouts": 16, # number of rollouts before updating
"learning_epochs": 8, # number of learning epochs during each update
"mini_batches": 2, # number of mini batches during each learning epoch
"discount_factor": 0.99, # discount factor (gamma)
"lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages
"learning_rate": 1e-3, # learning rate
"learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler)
"learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3})
"state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors)
"state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space})
"value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors)
"value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1})
"random_timesteps": 0, # random exploration steps
"learning_starts": 0, # learning starts after this many steps
"grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients
"ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective
"value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True)
"clip_predicted_values": False, # clip predicted values during value loss computation
"entropy_loss_scale": 0.0, # entropy loss scaling factor
"value_loss_scale": 1.0, # value loss scaling factor
"kl_threshold": 0, # KL divergence threshold for early stopping
"rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward
"time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation)
"experiment": {
"directory": "", # experiment's parent directory
"experiment_name": "", # experiment name
"write_interval": 250, # TensorBoard writing interval (timesteps)
"checkpoint_interval": 1000, # interval for checkpoints (timesteps)
"store_separately": False, # whether to store checkpoints separately
"wandb": False, # whether to use Weights & Biases
"wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init)
}
}
# [end-config-dict-jax]
def compute_gae(rewards: np.ndarray,
dones: np.ndarray,
values: np.ndarray,
next_values: np.ndarray,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> np.ndarray:
"""Compute the Generalized Advantage Estimator (GAE)
:param rewards: Rewards obtained by the agent
:type rewards: np.ndarray
:param dones: Signals to indicate that episodes have ended
:type dones: np.ndarray
:param values: Values obtained by the agent
:type values: np.ndarray
:param next_values: Next values obtained by the agent
:type next_values: np.ndarray
:param discount_factor: Discount factor
:type discount_factor: float
:param lambda_coefficient: Lambda coefficient
:type lambda_coefficient: float
:return: Generalized Advantage Estimator
:rtype: np.ndarray
"""
advantage = 0
advantages = np.zeros_like(rewards)
not_dones = np.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages[i] = advantage
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _compute_gae(rewards: jax.Array,
dones: jax.Array,
values: jax.Array,
next_values: jax.Array,
discount_factor: float = 0.99,
lambda_coefficient: float = 0.95) -> jax.Array:
advantage = 0
advantages = jnp.zeros_like(rewards)
not_dones = jnp.logical_not(dones)
memory_size = rewards.shape[0]
# advantages computation
for i in reversed(range(memory_size)):
next_values = values[i + 1] if i < memory_size - 1 else next_values
advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage)
advantages = advantages.at[i].set(advantage)
# returns computation
returns = advantages + values
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8)
return returns, advantages
@functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale"))
def _update_policy(policy_act,
policy_state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
ratio_clip,
get_entropy,
entropy_loss_scale):
# compute policy loss
def _policy_loss(params):
_, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params)
# compute approximate KL divergence
ratio = next_log_prob - sampled_log_prob
kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean()
# compute policy loss
ratio = jnp.exp(next_log_prob - sampled_log_prob)
surrogate = sampled_advantages * ratio
surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip)
# compute entropy loss
entropy_loss = 0
if entropy_loss_scale:
entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean()
return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"])
(policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params)
return grad, policy_loss, entropy_loss, kl_divergence, stddev
@functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values"))
def _update_value(value_act,
value_state_dict,
sampled_states,
sampled_values,
sampled_returns,
value_loss_scale,
clip_predicted_values,
value_clip):
# compute value loss
def _value_loss(params):
predicted_values, _, _ = value_act({"states": sampled_states}, "value", params)
if clip_predicted_values:
predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip)
return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean()
value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params)
return grad, value_loss
class IPPO(MultiAgent):
def __init__(self,
possible_agents: Sequence[str],
models: Mapping[str, Model],
memories: Optional[Mapping[str, Memory]] = None,
observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None,
device: Optional[Union[str, jax.Device]] = None,
cfg: Optional[dict] = None) -> None:
"""Independent Proximal Policy Optimization (IPPO)
https://arxiv.org/abs/2011.09533
:param possible_agents: Name of all possible agents the environment could generate
:type possible_agents: list of str
:param models: Models used by the agents.
External keys are environment agents' names. Internal keys are the models required by the algorithm
:type models: nested dictionary of skrl.models.jax.Model
:param memories: Memories to storage the transitions.
:type memories: dictionary of skrl.memory.jax.Memory, optional
:param observation_spaces: Observation/state spaces or shapes (default: ``None``)
:type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param action_spaces: Action spaces or shapes (default: ``None``)
:type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param cfg: Configuration dictionary
:type cfg: dict
"""
# _cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object
_cfg = IPPO_DEFAULT_CONFIG
_cfg.update(cfg if cfg is not None else {})
super().__init__(possible_agents=possible_agents,
models=models,
memories=memories,
observation_spaces=observation_spaces,
action_spaces=action_spaces,
device=device,
cfg=_cfg)
# models
self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents}
self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents}
for uid in self.possible_agents:
self.checkpoint_modules[uid]["policy"] = self.policies[uid]
self.checkpoint_modules[uid]["value"] = self.values[uid]
# configuration
self._learning_epochs = self._as_dict(self.cfg["learning_epochs"])
self._mini_batches = self._as_dict(self.cfg["mini_batches"])
self._rollouts = self.cfg["rollouts"]
self._rollout = 0
self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"])
self._ratio_clip = self._as_dict(self.cfg["ratio_clip"])
self._value_clip = self._as_dict(self.cfg["value_clip"])
self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"])
self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"])
self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"])
self._kl_threshold = self._as_dict(self.cfg["kl_threshold"])
self._learning_rate = self._as_dict(self.cfg["learning_rate"])
self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"])
self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"])
self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"])
self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"])
self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"])
self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"])
self._discount_factor = self._as_dict(self.cfg["discount_factor"])
self._lambda = self._as_dict(self.cfg["lambda"])
self._random_timesteps = self.cfg["random_timesteps"]
self._learning_starts = self.cfg["learning_starts"]
self._rewards_shaper = self.cfg["rewards_shaper"]
self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"])
# set up optimizer and learning rate scheduler
self.policy_optimizer = {}
self.value_optimizer = {}
self.schedulers = {}
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
if policy is not None and value is not None:
# scheduler
scale = True
self.schedulers[uid] = None
if self._learning_rate_scheduler[uid] is not None:
if self._learning_rate_scheduler[uid] == KLAdaptiveLR:
scale = False
self.schedulers[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid])
else:
self._learning_rate[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid])
# optimizer
self.policy_optimizer[uid] = Adam(model=policy, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale)
self.value_optimizer[uid] = Adam(model=value, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale)
self.checkpoint_modules[uid]["policy_optimizer"] = self.policy_optimizer[uid]
self.checkpoint_modules[uid]["value_optimizer"] = self.value_optimizer[uid]
# set up preprocessors
if self._state_preprocessor[uid] is not None:
self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid]
else:
self._state_preprocessor[uid] = self._empty_preprocessor
if self._value_preprocessor[uid] is not None:
self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid])
self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid]
else:
self._value_preprocessor[uid] = self._empty_preprocessor
def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None:
"""Initialize the agent
"""
super().init(trainer_cfg=trainer_cfg)
self.set_mode("eval")
# create tensors in memories
for uid in self.possible_agents:
self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=jnp.float32)
self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=jnp.float32)
self.memories[uid].create_tensor(name="rewards", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="terminated", size=1, dtype=jnp.int8)
self.memories[uid].create_tensor(name="log_prob", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="values", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="returns", size=1, dtype=jnp.float32)
self.memories[uid].create_tensor(name="advantages", size=1, dtype=jnp.float32)
# tensors sampled during training
self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"]
# create temporary variables needed for storage and computation
self._current_log_prob = []
self._current_next_states = []
# set up models for just-in-time compilation with XLA
for uid in self.possible_agents:
self.policies[uid].apply = jax.jit(self.policies[uid].apply, static_argnums=2)
if self.values[uid] is not None:
self.values[uid].apply = jax.jit(self.values[uid].apply, static_argnums=2)
def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]:
"""Process the environment's states to make a decision (actions) using the main policies
:param states: Environment's states
:type states: dictionary of np.ndarray or jax.Array
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
:return: Actions
:rtype: np.ndarray or jax.Array
"""
# # sample random actions
# # TODO: fix for stochasticity, rnn and log_prob
# if timestep < self._random_timesteps:
# return self.policy.random_act({"states": states}, role="policy")
# sample stochastic actions
data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents]
actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)}
log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)}
outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)}
if not self._jax: # numpy backend
actions = {jax.device_get(_actions) for _actions in actions}
log_prob = {jax.device_get(_log_prob) for _log_prob in log_prob}
self._current_log_prob = log_prob
return actions, log_prob, outputs
def record_transition(self,
states: Mapping[str, Union[np.ndarray, jax.Array]],
actions: Mapping[str, Union[np.ndarray, jax.Array]],
rewards: Mapping[str, Union[np.ndarray, jax.Array]],
next_states: Mapping[str, Union[np.ndarray, jax.Array]],
terminated: Mapping[str, Union[np.ndarray, jax.Array]],
truncated: Mapping[str, Union[np.ndarray, jax.Array]],
infos: Mapping[str, Any],
timestep: int,
timesteps: int) -> None:
"""Record an environment transition in memory
:param states: Observations/states of the environment used to make the decision
:type states: dictionary of np.ndarray or jax.Array
:param actions: Actions taken by the agent
:type actions: dictionary of np.ndarray or jax.Array
:param rewards: Instant rewards achieved by the current actions
:type rewards: dictionary of np.ndarray or jax.Array
:param next_states: Next observations/states of the environment
:type next_states: dictionary of np.ndarray or jax.Array
:param terminated: Signals to indicate that episodes have terminated
:type terminated: dictionary of np.ndarray or jax.Array
:param truncated: Signals to indicate that episodes have been truncated
:type truncated: dictionary of np.ndarray or jax.Array
:param infos: Additional information about the environment
:type infos: dictionary of any type supported by the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps)
if self.memories:
self._current_next_states = next_states
for uid in self.possible_agents:
# reward shaping
if self._rewards_shaper is not None:
rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps)
# compute values
values, _, _ = self.values[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="value")
if not self._jax: # numpy backend
values = jax.device_get(values)
values = self._value_preprocessor[uid](values, inverse=True)
# time-limit (truncation) boostrapping
if self._time_limit_bootstrap[uid]:
rewards[uid] += self._discount_factor[uid] * values * truncated[uid]
# storage transition in memory
self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid],
terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values)
def pre_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called before the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
pass
def post_interaction(self, timestep: int, timesteps: int) -> None:
"""Callback called after the interaction with the environment
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
self._rollout += 1
if not self._rollout % self._rollouts and timestep >= self._learning_starts:
self.set_mode("train")
self._update(timestep, timesteps)
self.set_mode("eval")
# write tracking data and checkpoints
super().post_interaction(timestep, timesteps)
def _update(self, timestep: int, timesteps: int) -> None:
"""Algorithm's main update step
:param timestep: Current timestep
:type timestep: int
:param timesteps: Number of timesteps
:type timesteps: int
"""
for uid in self.possible_agents:
policy = self.policies[uid]
value = self.values[uid]
memory = self.memories[uid]
# compute returns and advantages
value.training = False
last_values, _, _ = value.act({"states": self._state_preprocessor[uid](self._current_next_states[uid])}, role="value") # TODO: .float()
value.training = True
if not self._jax: # numpy backend
last_values = jax.device_get(last_values)
last_values = self._value_preprocessor[uid](last_values, inverse=True)
values = memory.get_tensor_by_name("values")
if self._jax:
returns, advantages = _compute_gae(rewards=memory.get_tensor_by_name("rewards"),
dones=memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor[uid],
lambda_coefficient=self._lambda[uid])
else:
returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"),
dones=memory.get_tensor_by_name("terminated"),
values=values,
next_values=last_values,
discount_factor=self._discount_factor[uid],
lambda_coefficient=self._lambda[uid])
memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True))
memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True))
memory.set_tensor_by_name("advantages", advantages)
# sample mini-batches from memory
sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid])
cumulative_policy_loss = 0
cumulative_entropy_loss = 0
cumulative_value_loss = 0
# learning epochs
for epoch in range(self._learning_epochs[uid]):
kl_divergences = []
# mini-batches loop
for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches:
sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch)
# compute policy loss
grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(policy.act,
policy.state_dict,
sampled_states,
sampled_actions,
sampled_log_prob,
sampled_advantages,
self._ratio_clip[uid],
policy.get_entropy,
self._entropy_loss_scale[uid])
kl_divergences.append(kl_divergence.item())
# early stopping with KL divergence
if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]:
break
# optimization step (policy)
self.policy_optimizer[uid] = self.policy_optimizer[uid].step(grad, policy, self.schedulers[uid]._lr if self.schedulers[uid] else None)
# compute value loss
grad, value_loss = _update_value(value.act,
value.state_dict,
sampled_states,
sampled_values,
sampled_returns,
self._value_loss_scale[uid],
self._clip_predicted_values[uid],
self._value_clip[uid])
# optimization step (value)
self.value_optimizer[uid] = self.value_optimizer[uid].step(grad, value, self.schedulers[uid]._lr if self.schedulers[uid] else None)
# update cumulative losses
cumulative_policy_loss += policy_loss.item()
cumulative_value_loss += value_loss.item()
if self._entropy_loss_scale[uid]:
cumulative_entropy_loss += entropy_loss.item()
# update learning rate
if self._learning_rate_scheduler[uid]:
if isinstance(self.schedulers[uid], KLAdaptiveLR):
self.schedulers[uid].step(np.mean(kl_divergences))
# record data
self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
if self._entropy_loss_scale:
self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid]))
self.track_data(f"Policy / Standard deviation ({uid})", stddev.mean().item())
if self._learning_rate_scheduler[uid]:
self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid]._lr)
| 28,632 | Python | 49.499118 | 156 | 0.583054 |
Toni-SM/skrl/skrl/utils/control.py | import isaacgym.torch_utils as torch_utils
import torch
def ik(jacobian_end_effector,
current_position, current_orientation,
goal_position, goal_orientation,
damping_factor=0.05):
"""
Damped Least Squares method: https://www.math.ucsd.edu/~sbuss/ResearchWeb/ikmethods/iksurvey.pdf
"""
# compute position and orientation error
position_error = goal_position - current_position
q_r = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation))
orientation_error = q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1)
dpose = torch.cat([position_error, orientation_error], -1).unsqueeze(-1)
# solve damped least squares (dO = J.T * V)
transpose = torch.transpose(jacobian_end_effector, 1, 2)
lmbda = torch.eye(6).to(jacobian_end_effector.device) * (damping_factor ** 2)
return (transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ dpose)
def osc(jacobian_end_effector, mass_matrix,
current_position, current_orientation,
goal_position, goal_orientation,
current_dof_velocities,
kp=5, kv=2):
"""
https://studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/
"""
mass_matrix_end_effector = torch.inverse(jacobian_end_effector @ torch.inverse(mass_matrix) @ torch.transpose(jacobian_end_effector, 1, 2))
# compute position and orientation error
position_error = kp * (goal_position - current_position)
q_r = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation))
orientation_error = q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1)
dpose = torch.cat([position_error, orientation_error], -1)
return torch.transpose(jacobian_end_effector, 1, 2) @ mass_matrix_end_effector @ (kp * dpose).unsqueeze(-1) - kv * mass_matrix @ current_dof_velocities
| 1,888 | Python | 40.977777 | 155 | 0.692267 |
Toni-SM/skrl/skrl/utils/huggingface.py | from skrl import __version__, logger
def download_model_from_huggingface(repo_id: str, filename: str = "agent.pt") -> str:
"""Download a model from Hugging Face Hub
:param repo_id: Hugging Face user or organization name and a repo name separated by a ``/``
:type repo_id: str
:param filename: The name of the model file in the repo (default: ``"agent.pt"``)
:type filename: str, optional
:raises ImportError: The Hugging Face Hub package (huggingface-hub) is not installed
:raises huggingface_hub.utils._errors.HfHubHTTPError: Any HTTP error raised in Hugging Face Hub
:return: Local path of file or if networking is off, last version of file cached on disk
:rtype: str
Example::
# download trained agent from the skrl organization (https://huggingface.co/skrl)
>>> from skrl.utils.huggingface import download_model_from_huggingface
>>> download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO")
'/home/user/.cache/huggingface/hub/models--skrl--OmniIsaacGymEnvs-Cartpole-PPO/snapshots/892e629903de6bf3ef102ae760406a5dd0f6f873/agent.pt'
# download model (e.g. "policy.pth") from another user/organization (e.g. "org/ddpg-Pendulum-v1")
>>> from skrl.utils.huggingface import download_model_from_huggingface
>>> download_model_from_huggingface("org/ddpg-Pendulum-v1", "policy.pth")
'/home/user/.cache/huggingface/hub/models--org--ddpg-Pendulum-v1/snapshots/b44ee96f93ff2e296156b002a2ca4646e197ba32/policy.pth'
"""
logger.info(f"Downloading model from Hugging Face Hub: {repo_id}/{filename}")
try:
import huggingface_hub
except ImportError:
logger.error("Hugging Face Hub package is not installed. Use 'pip install huggingface-hub' to install it")
huggingface_hub = None
if huggingface_hub is None:
raise ImportError("Hugging Face Hub package is not installed. Use 'pip install huggingface-hub' to install it")
# download and cache the model from Hugging Face Hub
downloaded_model_file = huggingface_hub.hf_hub_download(repo_id=repo_id,
filename=filename,
library_name="skrl",
library_version=__version__)
return downloaded_model_file
| 2,401 | Python | 50.106382 | 147 | 0.66389 |
Toni-SM/skrl/skrl/utils/postprocessing.py | from typing import List, Tuple, Union
import collections
import csv
import glob
import os
import numpy as np
import torch
class MemoryFileIterator():
def __init__(self, pathname: str) -> None:
"""Python iterator for loading data from exported memories
The iterator will load the next memory file in the list of path names.
The output of the iterator is a tuple of the filename and the memory data
where the memory data is a dictionary of torch.Tensor (PyTorch), numpy.ndarray (NumPy)
or lists (CSV) depending on the format and the keys of the dictionary are the names of the variables
Supported formats:
- PyTorch (pt)
- NumPy (npz)
- Comma-separated values (csv)
Expected output shapes:
- PyTorch: (memory_size, num_envs, data_size)
- NumPy: (memory_size, num_envs, data_size)
- Comma-separated values: (memory_size * num_envs, data_size)
:param pathname: String containing a path specification for the exported memories.
Python `glob <https://docs.python.org/3/library/glob.html#glob.glob>`_ method
is used to find all files matching the path specification
:type pathname: str
"""
self.n = 0
self.file_paths = sorted(glob.glob(pathname))
def __iter__(self) -> 'MemoryFileIterator':
"""Return self to make iterable"""
return self
def __next__(self) -> Tuple[str, dict]:
"""Return next batch
:return: Tuple of file name and data
:rtype: tuple
"""
if self.n >= len(self.file_paths):
raise StopIteration
if self.file_paths[self.n].endswith(".pt"):
return self._format_torch()
elif self.file_paths[self.n].endswith(".npz"):
return self._format_numpy()
elif self.file_paths[self.n].endswith(".csv"):
return self._format_csv()
else:
raise ValueError(f"Unsupported format for {self.file_paths[self.n]}. Available formats: .pt, .csv, .npz")
def _format_numpy(self) -> Tuple[str, dict]:
"""Load numpy array from file
:return: Tuple of file name and data
:rtype: tuple
"""
filename = os.path.basename(self.file_paths[self.n])
data = np.load(self.file_paths[self.n])
self.n += 1
return filename, data
def _format_torch(self) -> Tuple[str, dict]:
"""Load PyTorch tensor from file
:return: Tuple of file name and data
:rtype: tuple
"""
filename = os.path.basename(self.file_paths[self.n])
data = torch.load(self.file_paths[self.n])
self.n += 1
return filename, data
def _format_csv(self) -> Tuple[str, dict]:
"""Load CSV file from file
:return: Tuple of file name and data
:rtype: tuple
"""
filename = os.path.basename(self.file_paths[self.n])
with open(self.file_paths[self.n], 'r') as f:
reader = csv.reader(f)
# parse header
try:
header = next(reader, None)
data = collections.defaultdict(int)
for h in header:
h.split(".")[1] # check header format
data[h.split(".")[0]] += 1
names = sorted(list(data.keys()))
sizes = [data[name] for name in names]
indexes = [(low, high) for low, high in zip(np.cumsum(sizes) - np.array(sizes), np.cumsum(sizes))]
except:
self.n += 1
return filename, {}
# parse data
data = {name: [] for name in names}
for row in reader:
for name, index in zip(names, indexes):
data[name].append([float(item) if item not in ["True", "False"] else bool(item) \
for item in row[index[0]:index[1]]])
self.n += 1
return filename, data
class TensorboardFileIterator():
def __init__(self, pathname: str, tags: Union[str, List[str]]) -> None:
"""Python iterator for loading data from Tensorboard files
The iterator will load the next Tensorboard file in the list of path names.
The iterator's output is a tuple of the directory name and the Tensorboard variables selected by the tags.
The Tensorboard data is returned as a dictionary with the tag as the key and a list of steps and values as the value
:param pathname: String containing a path specification for the Tensorboard files.
Python `glob <https://docs.python.org/3/library/glob.html#glob.glob>`_ method
is used to find all files matching the path specification
:type pathname: str
:param tags: String or list of strings containing the tags of the variables to load
:type tags: str or list of str
"""
self.n = 0
self.file_paths = sorted(glob.glob(pathname))
self.tags = [tags] if isinstance(tags, str) else tags
def __iter__(self) -> 'TensorboardFileIterator':
"""Return self to make iterable"""
return self
def __next__(self) -> Tuple[str, dict]:
"""Return next batch
:return: Tuple of directory name and data
:rtype: tuple
"""
from tensorflow.python.summary.summary_iterator import summary_iterator
if self.n >= len(self.file_paths):
raise StopIteration
file_path = self.file_paths[self.n]
self.n += 1
data = {}
for event in summary_iterator(file_path):
try:
# get Tensorboard data
step = event.step
tag = event.summary.value[0].tag
value = event.summary.value[0].simple_value
# record data
if tag in self.tags:
if not tag in data:
data[tag] = []
data[tag].append([step, value])
except Exception as e:
pass
return os.path.dirname(file_path).split(os.sep)[-1], data
| 6,210 | Python | 34.289773 | 124 | 0.56876 |
Toni-SM/skrl/skrl/utils/__init__.py | from typing import Optional
import os
import random
import sys
import time
import numpy as np
from skrl import config, logger
def set_seed(seed: Optional[int] = None, deterministic: bool = False) -> int:
"""
Set the seed for the random number generators
Due to NumPy's legacy seeding constraint the seed must be between 0 and 2**32 - 1.
Otherwise a NumPy exception (``ValueError: Seed must be between 0 and 2**32 - 1``) will be raised
Modified packages:
- random
- numpy
- torch (if available)
- jax (skrl's PRNG key: ``config.jax.key``)
Example::
# fixed seed
>>> from skrl.utils import set_seed
>>> set_seed(42)
[skrl:INFO] Seed: 42
42
# random seed
>>> from skrl.utils import set_seed
>>> set_seed()
[skrl:INFO] Seed: 1776118066
1776118066
# enable deterministic. The following environment variables should be established:
# - CUDA 10.1: CUDA_LAUNCH_BLOCKING=1
# - CUDA 10.2 or later: CUBLAS_WORKSPACE_CONFIG=:16:8 or CUBLAS_WORKSPACE_CONFIG=:4096:8
>>> from skrl.utils import set_seed
>>> set_seed(42, deterministic=True)
[skrl:INFO] Seed: 42
[skrl:WARNING] PyTorch/cuDNN deterministic algorithms are enabled. This may affect performance
42
:param seed: The seed to set. Is None, a random seed will be generated (default: ``None``)
:type seed: int, optional
:param deterministic: Whether PyTorch is configured to use deterministic algorithms (default: ``False``).
The following environment variables should be established for CUDA 10.1 (``CUDA_LAUNCH_BLOCKING=1``)
and for CUDA 10.2 or later (``CUBLAS_WORKSPACE_CONFIG=:16:8`` or ``CUBLAS_WORKSPACE_CONFIG=:4096:8``).
See PyTorch `Reproducibility <https://pytorch.org/docs/stable/notes/randomness.html>`_ for details
:type deterministic: bool, optional
:return: Seed
:rtype: int
"""
# generate a random seed
if seed is None:
try:
seed = int.from_bytes(os.urandom(4), byteorder=sys.byteorder)
except NotImplementedError:
seed = int(time.time() * 1000)
seed %= 2 ** 31 # NumPy's legacy seeding seed must be between 0 and 2**32 - 1
seed = int(seed)
logger.info(f"Seed: {seed}")
# numpy
random.seed(seed)
np.random.seed(seed)
# torch
try:
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
# On CUDA 10.1, set environment variable CUDA_LAUNCH_BLOCKING=1
# On CUDA 10.2 or later, set environment variable CUBLAS_WORKSPACE_CONFIG=:16:8 or CUBLAS_WORKSPACE_CONFIG=:4096:8
logger.warning("PyTorch/cuDNN deterministic algorithms are enabled. This may affect performance")
except ImportError:
pass
except Exception as e:
logger.warning(f"PyTorch seeding error: {e}")
# jax
config.jax.key = seed
return seed
| 3,232 | Python | 31.009901 | 128 | 0.628094 |
Toni-SM/skrl/skrl/utils/isaacgym_utils.py | from typing import List, Optional
import logging
import math
import threading
import numpy as np
import torch
try:
import flask
except ImportError:
flask = None
try:
import imageio
import isaacgym
import isaacgym.torch_utils as torch_utils
from isaacgym import gymapi
except ImportError:
imageio = None
isaacgym = None
torch_utils = None
gymapi = None
class WebViewer:
def __init__(self, host: str = "127.0.0.1", port: int = 5000) -> None:
"""
Web viewer for Isaac Gym
:param host: Host address (default: "127.0.0.1")
:type host: str
:param port: Port number (default: 5000)
:type port: int
"""
self._app = flask.Flask(__name__)
self._app.add_url_rule("/", view_func=self._route_index)
self._app.add_url_rule("/_route_stream", view_func=self._route_stream)
self._app.add_url_rule("/_route_input_event", view_func=self._route_input_event, methods=["POST"])
self._log = logging.getLogger('werkzeug')
self._log.disabled = True
self._app.logger.disabled = True
self._image = None
self._camera_id = 0
self._camera_type = gymapi.IMAGE_COLOR
self._notified = False
self._wait_for_page = True
self._pause_stream = False
self._event_load = threading.Event()
self._event_stream = threading.Event()
# start server
self._thread = threading.Thread(target=lambda: \
self._app.run(host=host, port=port, debug=False, use_reloader=False), daemon=True)
self._thread.start()
print(f"\nStarting web viewer on http://{host}:{port}/\n")
def _route_index(self) -> 'flask.Response':
"""Render the web page
:return: Flask response
:rtype: flask.Response
"""
template = """<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<style>
html, body {
width: 100%; height: 100%;
margin: 0; overflow: hidden; display: block;
background-color: #000;
}
</style>
</head>
<body>
<div>
<canvas id="canvas" tabindex='1'></canvas>
</div>
<script>
var canvas, context, image;
function sendInputRequest(data){
let xmlRequest = new XMLHttpRequest();
xmlRequest.open("POST", "{{ url_for('_route_input_event') }}", true);
xmlRequest.setRequestHeader("Content-Type", "application/json");
xmlRequest.send(JSON.stringify(data));
}
window.onload = function(){
canvas = document.getElementById("canvas");
context = canvas.getContext('2d');
image = new Image();
image.src = "{{ url_for('_route_stream') }}";
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
window.addEventListener('resize', function(){
canvas.width = window.innerWidth;
canvas.height = window.innerHeight;
}, false);
window.setInterval(function(){
let ratio = image.naturalWidth / image.naturalHeight;
context.drawImage(image, 0, 0, canvas.width, canvas.width / ratio);
}, 50);
canvas.addEventListener('keydown', function(event){
if(event.keyCode != 18)
sendInputRequest({key: event.keyCode});
}, false);
canvas.addEventListener('mousemove', function(event){
if(event.buttons){
let data = {dx: event.movementX, dy: event.movementY};
if(event.altKey && event.buttons == 1){
data.key = 18;
data.mouse = "left";
}
else if(event.buttons == 2)
data.mouse = "right";
else if(event.buttons == 4)
data.mouse = "middle";
else
return;
sendInputRequest(data);
}
}, false);
canvas.addEventListener('wheel', function(event){
sendInputRequest({mouse: "wheel", dz: Math.sign(event.deltaY)});
}, false);
}
</script>
</body>
</html>
"""
self._event_load.set()
return flask.render_template_string(template)
def _route_stream(self) -> 'flask.Response':
"""Stream the image to the web page
:return: Flask response
:rtype: flask.Response
"""
return flask.Response(self._stream(), mimetype='multipart/x-mixed-replace; boundary=frame')
def _route_input_event(self) -> 'flask.Response':
"""Handle keyboard and mouse input
:return: Flask response
:rtype: flask.Response
"""
def q_mult(q1, q2):
return [q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3],
q1[0] * q2[1] + q1[1] * q2[0] + q1[2] * q2[3] - q1[3] * q2[2],
q1[0] * q2[2] + q1[2] * q2[0] + q1[3] * q2[1] - q1[1] * q2[3],
q1[0] * q2[3] + q1[3] * q2[0] + q1[1] * q2[2] - q1[2] * q2[1]]
def q_conj(q):
return [q[0], -q[1], -q[2], -q[3]]
def qv_mult(q, v):
q2 = [0] + v
return q_mult(q_mult(q, q2), q_conj(q))[1:]
def q_from_angle_axis(angle, axis):
s = math.sin(angle / 2.0)
return [math.cos(angle / 2.0), axis[0] * s, axis[1] * s, axis[2] * s]
def p_target(p, q, a=0, b=0, c=1, d=0):
v = qv_mult(q, [1, 0, 0])
p1 = [c0 + c1 for c0, c1 in zip(p, v)]
denominator = a * (p1[0] - p[0]) + b * (p1[1] - p[1]) + c * (p1[2] - p[2])
if denominator:
t = -(a * p[0] + b * p[1] + c * p[2] + d) / denominator
return [p[0] + t * (p1[0] - p[0]), p[1] + t * (p1[1] - p[1]), p[2] + t * (p1[2] - p[2])]
return v
# get keyboard and mouse inputs
data = flask.request.get_json()
key, mouse = data.get("key", None), data.get("mouse", None)
dx, dy, dz = data.get("dx", None), data.get("dy", None), data.get("dz", None)
transform = self._gym.get_camera_transform(self._sim,
self._envs[self._camera_id],
self._cameras[self._camera_id])
# zoom in/out
if mouse == "wheel":
# compute zoom vector
vector = qv_mult([transform.r.w, transform.r.x, transform.r.y, transform.r.z],
[-0.025 * dz, 0, 0])
# update transform
transform.p.x += vector[0]
transform.p.y += vector[1]
transform.p.z += vector[2]
# orbit camera
elif mouse == "left":
# convert mouse movement to angle
dx *= 0.1 * math.pi / 180
dy *= 0.1 * math.pi / 180
# compute rotation (Z-up)
q = q_from_angle_axis(dx, [0, 0, -1])
q = q_mult(q, q_from_angle_axis(dy, [1, 0, 0]))
# apply rotation
t = p_target([transform.p.x, transform.p.y, transform.p.z],
[transform.r.w, transform.r.x, transform.r.y, transform.r.z])
p = qv_mult(q, [transform.p.x - t[0], transform.p.y - t[1], transform.p.z - t[2]])
q = q_mult(q, [transform.r.w, transform.r.x, transform.r.y, transform.r.z])
# update transform
transform.p.x = p[0] + t[0]
transform.p.y = p[1] + t[1]
transform.p.z = p[2] + t[2]
transform.r.w, transform.r.x, transform.r.y, transform.r.z = q
# pan camera
elif mouse == "right":
# convert mouse movement to angle
dx *= 0.1 * math.pi / 180
dy *= 0.1 * math.pi / 180
# compute rotation (Z-up)
q = q_from_angle_axis(dx, [0, 0, -1])
q = q_mult(q, q_from_angle_axis(dy, [1, 0, 0]))
# apply rotation
q = q_mult(q, [transform.r.w, transform.r.x, transform.r.y, transform.r.z])
# update transform
transform.r.w, transform.r.x, transform.r.y, transform.r.z = q
# walk camera
elif mouse == "middle":
# compute displacement
vector = qv_mult([transform.r.w, transform.r.x, transform.r.y, transform.r.z],
[0, 0.001 * dx, 0.001 * dy])
# update transform
transform.p.x += vector[0]
transform.p.y += vector[1]
transform.p.z += vector[2]
# pause stream (V: 86)
elif key == 86:
self._pause_stream = not self._pause_stream
return flask.Response(status=200)
# change image type (T: 84)
elif key == 84:
if self._camera_type == gymapi.IMAGE_COLOR:
self._camera_type = gymapi.IMAGE_DEPTH
elif self._camera_type == gymapi.IMAGE_DEPTH:
self._camera_type = gymapi.IMAGE_COLOR
return flask.Response(status=200)
else:
return flask.Response(status=200)
self._gym.set_camera_transform(self._cameras[self._camera_id],
self._envs[self._camera_id],
transform)
return flask.Response(status=200)
def _stream(self) -> bytes:
"""Format the image to be streamed
:return: Image encoded as Content-Type
:rtype: bytes
"""
while True:
self._event_stream.wait()
# prepare image
image = imageio.imwrite("<bytes>", self._image, format="JPEG")
# stream image
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n')
self._event_stream.clear()
self._notified = False
def setup(self, gym: 'isaacgym.gymapi.Gym', sim: 'isaacgym.gymapi.Sim', envs: List[int], cameras: List[int]) -> None:
"""Setup the web viewer
:param gym: The gym
:type gym: isaacgym.gymapi.Gym
:param sim: Simulation handle
:type sim: isaacgym.gymapi.Sim
:param envs: Environment handles
:type envs: list of ints
:param cameras: Camera handles
:type cameras: list of ints
"""
self._gym = gym
self._sim = sim
self._envs = envs
self._cameras = cameras
def render(self,
fetch_results: bool = True,
step_graphics: bool = True,
render_all_camera_sensors: bool = True,
wait_for_page_load: bool = True) -> None:
"""Render and get the image from the current camera
This function must be called after the simulation is stepped (post_physics_step).
The following Isaac Gym functions are called before get the image.
Their calling can be skipped by setting the corresponding argument to False
- fetch_results
- step_graphics
- render_all_camera_sensors
:param fetch_results: Call Gym.fetch_results method (default: True)
:type fetch_results: bool
:param step_graphics: Call Gym.step_graphics method (default: True)
:type step_graphics: bool
:param render_all_camera_sensors: Call Gym.render_all_camera_sensors method (default: True)
:type render_all_camera_sensors: bool
:param wait_for_page_load: Wait for the page to load (default: True)
:type wait_for_page_load: bool
"""
# wait for page to load
if self._wait_for_page:
if wait_for_page_load:
if not self._event_load.is_set():
print("Waiting for web page to begin loading...")
self._event_load.wait()
self._event_load.clear()
self._wait_for_page = False
# pause stream
if self._pause_stream:
return
if self._notified:
return
# isaac gym API
if fetch_results:
self._gym.fetch_results(self._sim, True)
if step_graphics:
self._gym.step_graphics(self._sim)
if render_all_camera_sensors:
self._gym.render_all_camera_sensors(self._sim)
# get image
image = self._gym.get_camera_image(self._sim,
self._envs[self._camera_id],
self._cameras[self._camera_id],
self._camera_type)
if self._camera_type == gymapi.IMAGE_COLOR:
self._image = image.reshape(image.shape[0], -1, 4)[..., :3]
elif self._camera_type == gymapi.IMAGE_DEPTH:
self._image = -image.reshape(image.shape[0], -1)
minimum = 0 if np.isinf(np.min(self._image)) else np.min(self._image)
maximum = 5 if np.isinf(np.max(self._image)) else np.max(self._image)
self._image = np.clip(1 - (self._image - minimum) / (maximum - minimum), 0, 1)
self._image = np.uint8(255 * self._image)
else:
raise ValueError("Unsupported camera type")
# notify stream thread
self._event_stream.set()
self._notified = True
def ik(jacobian_end_effector: torch.Tensor,
current_position: torch.Tensor,
current_orientation: torch.Tensor,
goal_position: torch.Tensor,
goal_orientation: Optional[torch.Tensor] = None,
damping_factor: float = 0.05,
squeeze_output: bool = True) -> torch.Tensor:
"""
Inverse kinematics using damped least squares method
:param jacobian_end_effector: End effector's jacobian
:type jacobian_end_effector: torch.Tensor
:param current_position: End effector's current position
:type current_position: torch.Tensor
:param current_orientation: End effector's current orientation
:type current_orientation: torch.Tensor
:param goal_position: End effector's goal position
:type goal_position: torch.Tensor
:param goal_orientation: End effector's goal orientation (default: None)
:type goal_orientation: torch.Tensor or None
:param damping_factor: Damping factor (default: 0.05)
:type damping_factor: float
:param squeeze_output: Squeeze output (default: True)
:type squeeze_output: bool
:return: Change in joint angles
:rtype: torch.Tensor
"""
if goal_orientation is None:
goal_orientation = current_orientation
# compute error
q = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation))
error = torch.cat([goal_position - current_position, # position error
q[:, 0:3] * torch.sign(q[:, 3]).unsqueeze(-1)], # orientation error
dim=-1).unsqueeze(-1)
# solve damped least squares (dO = J.T * V)
transpose = torch.transpose(jacobian_end_effector, 1, 2)
lmbda = torch.eye(6, device=jacobian_end_effector.device) * (damping_factor ** 2)
if squeeze_output:
return (transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error).squeeze(dim=2)
else:
return transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error
def print_arguments(args):
print("")
print("Arguments")
for a in args.__dict__:
print(f" |-- {a}: {args.__getattribute__(a)}")
def print_asset_options(asset_options: 'isaacgym.gymapi.AssetOptions', asset_name: str = ""):
attrs = ["angular_damping", "armature", "collapse_fixed_joints", "convex_decomposition_from_submeshes",
"default_dof_drive_mode", "density", "disable_gravity", "fix_base_link", "flip_visual_attachments",
"linear_damping", "max_angular_velocity", "max_linear_velocity", "mesh_normal_mode", "min_particle_mass",
"override_com", "override_inertia", "replace_cylinder_with_capsule", "tendon_limit_stiffness", "thickness",
"use_mesh_materials", "use_physx_armature", "vhacd_enabled"] # vhacd_params
print("\nAsset options{}".format(f" ({asset_name})" if asset_name else ""))
for attr in attrs:
print(" |-- {}: {}".format(attr, getattr(asset_options, attr) if hasattr(asset_options, attr) else "--"))
# vhacd attributes
if attr == "vhacd_enabled" and hasattr(asset_options, attr) and getattr(asset_options, attr):
vhacd_attrs = ["alpha", "beta", "concavity", "convex_hull_approximation", "convex_hull_downsampling",
"max_convex_hulls", "max_num_vertices_per_ch", "min_volume_per_ch", "mode", "ocl_acceleration",
"pca", "plane_downsampling", "project_hull_vertices", "resolution"]
print(" |-- vhacd_params:")
for vhacd_attr in vhacd_attrs:
print(" | |-- {}: {}".format(vhacd_attr, getattr(asset_options.vhacd_params, vhacd_attr) \
if hasattr(asset_options.vhacd_params, vhacd_attr) else "--"))
def print_sim_components(gym, sim):
print("")
print("Sim components")
print(" |-- env count:", gym.get_env_count(sim))
print(" |-- actor count:", gym.get_sim_actor_count(sim))
print(" |-- rigid body count:", gym.get_sim_rigid_body_count(sim))
print(" |-- joint count:", gym.get_sim_joint_count(sim))
print(" |-- dof count:", gym.get_sim_dof_count(sim))
print(" |-- force sensor count:", gym.get_sim_force_sensor_count(sim))
def print_env_components(gym, env):
print("")
print("Env components")
print(" |-- actor count:", gym.get_actor_count(env))
print(" |-- rigid body count:", gym.get_env_rigid_body_count(env))
print(" |-- joint count:", gym.get_env_joint_count(env))
print(" |-- dof count:", gym.get_env_dof_count(env))
def print_actor_components(gym, env, actor):
print("")
print("Actor components")
print(" |-- rigid body count:", gym.get_actor_rigid_body_count(env, actor))
print(" |-- joint count:", gym.get_actor_joint_count(env, actor))
print(" |-- dof count:", gym.get_actor_dof_count(env, actor))
print(" |-- actuator count:", gym.get_actor_actuator_count(env, actor))
print(" |-- rigid shape count:", gym.get_actor_rigid_shape_count(env, actor))
print(" |-- soft body count:", gym.get_actor_soft_body_count(env, actor))
print(" |-- tendon count:", gym.get_actor_tendon_count(env, actor))
def print_dof_properties(gymapi, props):
print("")
print("DOF properties")
print(" |-- hasLimits:", props["hasLimits"])
print(" |-- lower:", props["lower"])
print(" |-- upper:", props["upper"])
print(" |-- driveMode:", props["driveMode"])
print(" | |-- {}: gymapi.DOF_MODE_NONE".format(int(gymapi.DOF_MODE_NONE)))
print(" | |-- {}: gymapi.DOF_MODE_POS".format(int(gymapi.DOF_MODE_POS)))
print(" | |-- {}: gymapi.DOF_MODE_VEL".format(int(gymapi.DOF_MODE_VEL)))
print(" | |-- {}: gymapi.DOF_MODE_EFFORT".format(int(gymapi.DOF_MODE_EFFORT)))
print(" |-- stiffness:", props["stiffness"])
print(" |-- damping:", props["damping"])
print(" |-- velocity (max):", props["velocity"])
print(" |-- effort (max):", props["effort"])
print(" |-- friction:", props["friction"])
print(" |-- armature:", props["armature"])
def print_links_and_dofs(gym, asset):
link_dict = gym.get_asset_rigid_body_dict(asset)
dof_dict = gym.get_asset_dof_dict(asset)
print("")
print("Links")
for k in link_dict:
print(f" |-- {k}: {link_dict[k]}")
print("DOFs")
for k in dof_dict:
print(f" |-- {k}: {dof_dict[k]}")
| 20,525 | Python | 39.089844 | 122 | 0.533203 |
Toni-SM/skrl/skrl/utils/omniverse_isaacgym_utils.py | from typing import Mapping, Optional
import queue
import numpy as np
import torch
from skrl import logger
def _np_quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
return np.stack([x, y, z, w], axis=-1).reshape(shape)
def _np_quat_conjugate(a):
shape = a.shape
a = a.reshape(-1, 4)
return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape)
def _torch_quat_mul(a, b):
assert a.shape == b.shape
shape = a.shape
a = a.reshape(-1, 4)
b = b.reshape(-1, 4)
w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3]
w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3]
ww = (z1 + x1) * (x2 + y2)
yy = (w1 - y1) * (w2 + z2)
zz = (w1 + y1) * (w2 - z2)
xx = ww + yy + zz
qq = 0.5 * (xx + (z1 - x1) * (x2 - y2))
w = qq - ww + (z1 - y1) * (y2 - z2)
x = qq - xx + (x1 + w1) * (x2 + w2)
y = qq - yy + (w1 - x1) * (y2 + z2)
z = qq - zz + (z1 + y1) * (w2 - x2)
return torch.stack([w, x, y, z], dim=-1).view(shape)
def _torch_quat_conjugate(a): # wxyz
shape = a.shape
a = a.reshape(-1, 4)
return torch.cat((a[:, :1], -a[:, 1:]), dim=-1).view(shape)
def ik(jacobian_end_effector: torch.Tensor,
current_position: torch.Tensor,
current_orientation: torch.Tensor,
goal_position: torch.Tensor,
goal_orientation: Optional[torch.Tensor] = None,
method: str = "damped least-squares",
method_cfg: Mapping[str, float] = {"scale": 1, "damping": 0.05, "min_singular_value": 1e-5},
squeeze_output: bool = True,) -> torch.Tensor:
"""Differential inverse kinematics
:param jacobian_end_effector: End effector's jacobian
:type jacobian_end_effector: torch.Tensor
:param current_position: End effector's current position
:type current_position: torch.Tensor
:param current_orientation: End effector's current orientation
:type current_orientation: torch.Tensor
:param goal_position: End effector's goal position
:type goal_position: torch.Tensor
:param goal_orientation: End effector's goal orientation (default: ``None``).
If not provided, the current orientation will be used instead.
:type goal_orientation: torch.Tensor, optional
:param method: Differential inverse kinematics formulation (default: ``"damped least-squares"``).
The supported methods are described in the following table:
+----------------------------------+----------------------------------+
|IK Method |Method tag |
+==================================+==================================+
|Damped least-squares |``"damped least-squares"`` |
+----------------------------------+----------------------------------+
|Tanspose |``"transpose"`` |
+----------------------------------+----------------------------------+
|Pseduoinverse |``"pseudoinverse"`` |
+----------------------------------+----------------------------------+
|Singular-vale decomposition (SVD) |``"singular-vale decomposition"`` |
+----------------------------------+----------------------------------+
:type method: str, optional
:param method_cfg: Method configurations (default: ``{"scale": 1, "damping": 0.05, "min_singular_value": 1e-5}``)
:type method_cfg: dict, optional
:param squeeze_output: Squeeze output (default: ``True``)
:type squeeze_output: bool, optional
:return: Change in joint angles
:rtype: torch.Tensor
"""
if goal_orientation is None:
goal_orientation = current_orientation
# torch
if isinstance(jacobian_end_effector, torch.Tensor):
# compute error
q = _torch_quat_mul(goal_orientation, _torch_quat_conjugate(current_orientation))
error = torch.cat([goal_position - current_position, # position error
q[:, 1:] * torch.sign(q[:, 0]).unsqueeze(-1)], # orientation error
dim=-1).unsqueeze(-1)
scale = method_cfg.get("scale", 1.0)
# adaptive Singular Value Decomposition (SVD)
if method == "singular-vale decomposition":
min_singular_value = method_cfg.get("min_singular_value", 1e-5)
U, S, Vh = torch.linalg.svd(jacobian_end_effector) # U: 6xd, S: dxd, V: d x num_dof
inv_s = torch.where(S > min_singular_value, 1.0 / S, torch.zeros_like(S))
pseudoinverse = torch.transpose(Vh, 1, 2)[:, :, :6] @ torch.diag_embed(inv_s) @ torch.transpose(U, 1, 2)
if squeeze_output:
return (scale * pseudoinverse @ error).squeeze(dim=2)
else:
return scale * pseudoinverse @ error
# jacobian pseudoinverse
elif method == "pseudoinverse":
pseudoinverse = torch.linalg.pinv(jacobian_end_effector)
if squeeze_output:
return (scale * pseudoinverse @ error).squeeze(dim=2)
else:
return scale * pseudoinverse @ error
# jacobian transpose
elif method == "transpose":
transpose = torch.transpose(jacobian_end_effector, 1, 2)
if squeeze_output:
return (scale * transpose @ error).squeeze(dim=2)
else:
return scale * transpose @ error
# damped least-squares
elif method == "damped least-squares":
damping = method_cfg.get("damping", 0.05)
transpose = torch.transpose(jacobian_end_effector, 1, 2)
lmbda = torch.eye(jacobian_end_effector.shape[1], device=jacobian_end_effector.device) * (damping ** 2)
if squeeze_output:
return (scale * transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error).squeeze(dim=2)
else:
return scale * transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error
else:
raise ValueError("Invalid IK method")
# numpy
# TODO: test and fix this
else:
# compute error
q = _np_quat_mul(goal_orientation, _np_quat_conjugate(current_orientation))
error = np.concatenate([goal_position - current_position, # position error
q[:, 0:3] * np.sign(q[:, 3])]) # orientation error
# solve damped least squares (dO = J.T * V)
transpose = np.transpose(jacobian_end_effector, 1, 2)
lmbda = np.eye(6) * (method_cfg.get("damping", 0.05) ** 2)
if squeeze_output:
return (transpose @ np.linalg.inv(jacobian_end_effector @ transpose + lmbda) @ error)
else:
return transpose @ np.linalg.inv(jacobian_end_effector @ transpose + lmbda) @ error
def get_env_instance(headless: bool = True,
enable_livestream: bool = False,
enable_viewport: bool = False,
multi_threaded: bool = False) -> "omni.isaac.gym.vec_env.VecEnvBase":
"""
Instantiate a VecEnvBase-based object compatible with OmniIsaacGymEnvs
:param headless: Disable UI when running (default: ``True``)
:type headless: bool, optional
:param enable_livestream: Whether to enable live streaming (default: ``False``)
:type enable_livestream: bool, optional
:param enable_viewport: Whether to enable viewport (default: ``False``)
:type enable_viewport: bool, optional
:param multi_threaded: Whether to return a multi-threaded environment instance (default: ``False``)
:type multi_threaded: bool, optional
:return: Environment instance
:rtype: omni.isaac.gym.vec_env.VecEnvBase
Example::
from skrl.envs.wrappers.torch import wrap_env
from skrl.utils.omniverse_isaacgym_utils import get_env_instance
# get environment instance
env = get_env_instance(headless=True)
# parse sim configuration
from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig
sim_config = SimConfig({"test": False,
"device_id": 0,
"headless": True,
"multi_gpu": False,
"sim_device": "gpu",
"enable_livestream": False,
"task": {"name": "CustomTask",
"physics_engine": "physx",
"env": {"numEnvs": 512,
"envSpacing": 1.5,
"enableDebugVis": False,
"clipObservations": 1000.0,
"clipActions": 1.0,
"controlFrequencyInv": 4},
"sim": {"dt": 0.0083, # 1 / 120
"use_gpu_pipeline": True,
"gravity": [0.0, 0.0, -9.81],
"add_ground_plane": True,
"use_flatcache": True,
"enable_scene_query_support": False,
"enable_cameras": False,
"default_physics_material": {"static_friction": 1.0,
"dynamic_friction": 1.0,
"restitution": 0.0},
"physx": {"worker_thread_count": 4,
"solver_type": 1,
"use_gpu": True,
"solver_position_iteration_count": 4,
"solver_velocity_iteration_count": 1,
"contact_offset": 0.005,
"rest_offset": 0.0,
"bounce_threshold_velocity": 0.2,
"friction_offset_threshold": 0.04,
"friction_correlation_distance": 0.025,
"enable_sleeping": True,
"enable_stabilization": True,
"max_depenetration_velocity": 1000.0,
"gpu_max_rigid_contact_count": 524288,
"gpu_max_rigid_patch_count": 33554432,
"gpu_found_lost_pairs_capacity": 524288,
"gpu_found_lost_aggregate_pairs_capacity": 262144,
"gpu_total_aggregate_pairs_capacity": 1048576,
"gpu_max_soft_body_contacts": 1048576,
"gpu_max_particle_contacts": 1048576,
"gpu_heap_capacity": 33554432,
"gpu_temp_buffer_capacity": 16777216,
"gpu_max_num_partitions": 8}}}})
# import and setup custom task
from custom_task import CustomTask
task = CustomTask(name="CustomTask", sim_config=sim_config, env=env)
env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True)
# wrap the environment
env = wrap_env(env, "omniverse-isaacgym")
"""
from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT
from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT
class _OmniIsaacGymVecEnv(VecEnvBase):
def step(self, actions):
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone()
self._task.pre_physics_step(actions)
for _ in range(self._task.control_frequency_inv):
self._world.step(render=self._render)
self.sim_frame_count += 1
observations, rewards, dones, info = self._task.post_physics_step()
return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \
rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy()
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
class _OmniIsaacGymTrainerMT(TrainerMT):
def run(self):
pass
def stop(self):
pass
class _OmniIsaacGymVecEnvMT(VecEnvMT):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.action_queue = queue.Queue(1)
self.data_queue = queue.Queue(1)
def run(self, trainer=None):
super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer)
def _parse_data(self, data):
self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()
self._rewards = data["rew"].to(self._task.rl_device).clone()
self._dones = data["reset"].to(self._task.rl_device).clone()
self._info = data["extras"].copy()
def step(self, actions):
if self._stop:
raise TaskStopException()
actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone()
self.send_actions(actions)
data = self.get_data()
return {"obs": self._observations}, self._rewards, self._dones, self._info
def reset(self):
self._task.reset()
actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device)
return self.step(actions)[0]
def close(self):
# end stop signal to main thread
self.send_actions(None)
self.stop = True
if multi_threaded:
try:
return _OmniIsaacGymVecEnvMT(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport)
except TypeError:
logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)")
return _OmniIsaacGymVecEnvMT(headless=headless) # Isaac Sim 2022.2.0 and earlier
else:
try:
return _OmniIsaacGymVecEnv(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport)
except TypeError:
logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)")
return _OmniIsaacGymVecEnv(headless=headless) # Isaac Sim 2022.2.0 and earlier
| 16,269 | Python | 47.858859 | 133 | 0.487737 |
Toni-SM/skrl/skrl/utils/model_instantiators/torch/__init__.py | from typing import Any, Mapping, Optional, Sequence, Tuple, Union
from enum import Enum
import gym
import gymnasium
import torch
import torch.nn as nn
from skrl.models.torch import Model # noqa
from skrl.models.torch import CategoricalMixin, DeterministicMixin, GaussianMixin, MultivariateGaussianMixin # noqa
__all__ = ["categorical_model", "deterministic_model", "gaussian_model", "multivariate_gaussian_model", "Shape"]
class Shape(Enum):
"""
Enum to select the shape of the model's inputs and outputs
"""
ONE = 1
STATES = 0
OBSERVATIONS = 0
ACTIONS = -1
STATES_ACTIONS = -2
def _get_activation_function(activation: str) -> nn.Module:
"""Get the activation function
Supported activation functions:
- "elu"
- "leaky_relu"
- "relu"
- "selu"
- "sigmoid"
- "softmax"
- "softplus"
- "softsign"
- "tanh"
:param activation: activation function name.
If activation is an empty string, a placeholder will be returned (``torch.nn.Identity()``)
:type activation: str
:raises: ValueError if activation is not a valid activation function
:return: activation function
:rtype: nn.Module
"""
if not activation:
return torch.nn.Identity()
elif activation == "relu":
return torch.nn.ReLU()
elif activation == "tanh":
return torch.nn.Tanh()
elif activation == "sigmoid":
return torch.nn.Sigmoid()
elif activation == "leaky_relu":
return torch.nn.LeakyReLU()
elif activation == "elu":
return torch.nn.ELU()
elif activation == "softplus":
return torch.nn.Softplus()
elif activation == "softsign":
return torch.nn.Softsign()
elif activation == "selu":
return torch.nn.SELU()
elif activation == "softmax":
return torch.nn.Softmax()
else:
raise ValueError(f"Unknown activation function: {activation}")
def _get_num_units_by_shape(model: Model, shape: Shape) -> int:
"""Get the number of units in a layer by shape
:param model: Model to get the number of units for
:type model: Model
:param shape: Shape of the layer
:type shape: Shape or int
:return: Number of units in the layer
:rtype: int
"""
num_units = {Shape.ONE: 1,
Shape.STATES: model.num_observations,
Shape.ACTIONS: model.num_actions,
Shape.STATES_ACTIONS: model.num_observations + model.num_actions}
try:
return num_units[shape]
except:
return shape
def _generate_sequential(model: Model,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Union[str, None] = "tanh",
output_scale: Optional[int] = None) -> nn.Sequential:
"""Generate a sequential model
:param model: model to generate sequential model for
:type model: Model
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: None).
If None, the output layer will not be scaled
:type output_scale: int, optional
:return: sequential model
:rtype: nn.Sequential
"""
# input layer
input_layer = [nn.Linear(_get_num_units_by_shape(model, input_shape), hiddens[0])]
# hidden layers
hidden_layers = []
for i in range(len(hiddens) - 1):
hidden_layers.append(_get_activation_function(hidden_activation[i]))
hidden_layers.append(nn.Linear(hiddens[i], hiddens[i + 1]))
hidden_layers.append(_get_activation_function(hidden_activation[-1]))
# output layer
output_layer = [nn.Linear(hiddens[-1], _get_num_units_by_shape(model, output_shape))]
if output_activation is not None:
output_layer.append(_get_activation_function(output_activation))
return nn.Sequential(*input_layer, *hidden_layers, *output_layer)
def gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
clip_actions: bool = False,
clip_log_std: bool = True,
min_log_std: float = -20,
max_log_std: float = 2,
initial_log_std: float = 0,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = "tanh",
output_scale: float = 1.0) -> Model:
"""Instantiate a Gaussian model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param clip_actions: Flag to indicate whether the actions should be clipped (default: False)
:type clip_actions: bool, optional
:param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True)
:type clip_log_std: bool, optional
:param min_log_std: Minimum value of the log standard deviation (default: -20)
:type min_log_std: float, optional
:param max_log_std: Maximum value of the log standard deviation (default: 2)
:type max_log_std: float, optional
:param initial_log_std: Initial value for the log standard deviation (default: 0)
:type initial_log_std: float, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: 1.0).
If None, the output layer will not be scaled
:type output_scale: float, optional
:return: Gaussian model instance
:rtype: Model
"""
class GaussianModel(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions,
clip_log_std, min_log_std, max_log_std, reduction="sum"):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
self.instantiator_output_scale = metadata["output_scale"]
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"],
output_scale=metadata["output_scale"])
self.log_std_parameter = nn.Parameter(metadata["initial_log_std"] \
* torch.ones(_get_num_units_by_shape(self, metadata["output_shape"])))
def compute(self, inputs, role=""):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1))
return output * self.instantiator_output_scale, self.log_std_parameter, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation,
"output_scale": output_scale,
"initial_log_std": initial_log_std}
return GaussianModel(observation_space=observation_space,
action_space=action_space,
device=device,
clip_actions=clip_actions,
clip_log_std=clip_log_std,
min_log_std=min_log_std,
max_log_std=max_log_std)
def multivariate_gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
clip_actions: bool = False,
clip_log_std: bool = True,
min_log_std: float = -20,
max_log_std: float = 2,
initial_log_std: float = 0,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = "tanh",
output_scale: float = 1.0) -> Model:
"""Instantiate a multivariate Gaussian model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param clip_actions: Flag to indicate whether the actions should be clipped (default: False)
:type clip_actions: bool, optional
:param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True)
:type clip_log_std: bool, optional
:param min_log_std: Minimum value of the log standard deviation (default: -20)
:type min_log_std: float, optional
:param max_log_std: Maximum value of the log standard deviation (default: 2)
:type max_log_std: float, optional
:param initial_log_std: Initial value for the log standard deviation (default: 0)
:type initial_log_std: float, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: 1.0).
If None, the output layer will not be scaled
:type output_scale: float, optional
:return: Multivariate Gaussian model instance
:rtype: Model
"""
class MultivariateGaussianModel(MultivariateGaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions,
clip_log_std, min_log_std, max_log_std):
Model.__init__(self, observation_space, action_space, device)
MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std)
self.instantiator_output_scale = metadata["output_scale"]
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"],
output_scale=metadata["output_scale"])
self.log_std_parameter = nn.Parameter(metadata["initial_log_std"] \
* torch.ones(_get_num_units_by_shape(self, metadata["output_shape"])))
def compute(self, inputs, role=""):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1))
return output * self.instantiator_output_scale, self.log_std_parameter, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation,
"output_scale": output_scale,
"initial_log_std": initial_log_std}
return MultivariateGaussianModel(observation_space=observation_space,
action_space=action_space,
device=device,
clip_actions=clip_actions,
clip_log_std=clip_log_std,
min_log_std=min_log_std,
max_log_std=max_log_std)
def deterministic_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
clip_actions: bool = False,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = "tanh",
output_scale: float = 1.0) -> Model:
"""Instantiate a deterministic model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: False)
:type clip_actions: bool, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: 1.0).
If None, the output layer will not be scaled
:type output_scale: float, optional
:return: Deterministic model instance
:rtype: Model
"""
class DeterministicModel(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions):
Model.__init__(self, observation_space, action_space, device)
DeterministicMixin.__init__(self, clip_actions)
self.instantiator_output_scale = metadata["output_scale"]
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"],
output_scale=metadata["output_scale"])
def compute(self, inputs, role=""):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1))
return output * self.instantiator_output_scale, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation,
"output_scale": output_scale}
return DeterministicModel(observation_space=observation_space,
action_space=action_space,
device=device,
clip_actions=clip_actions)
def categorical_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
unnormalized_log_prob: bool = True,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = None) -> Model:
"""Instantiate a categorical model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: True).
If True, the model's output is interpreted as unnormalized log probabilities
(it can be any real number), otherwise as normalized probabilities
(the output must be non-negative, finite and have a non-zero sum)
:type unnormalized_log_prob: bool, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: None)
:type output_activation: str or None, optional
:return: Categorical model instance
:rtype: Model
"""
class CategoricalModel(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob):
Model.__init__(self, observation_space, action_space, device)
CategoricalMixin.__init__(self, unnormalized_log_prob)
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"])
def compute(self, inputs, role=""):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1))
return output, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation}
return CategoricalModel(observation_space=observation_space,
action_space=action_space,
device=device,
unnormalized_log_prob=unnormalized_log_prob)
def shared_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, torch.device]] = None,
structure: str = "",
roles: Sequence[str] = [],
parameters: Sequence[Mapping[str, Any]] = []) -> Model:
"""Instantiate a shared model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param structure: Shared model structure (default: ``""``).
Note: this parameter is ignored for the moment
:type structure: str, optional
:param roles: Organized list of model roles (default: ``[]``)
:type roles: sequence of strings, optional
:param parameters: Organized list of model instantiator parameters (default: ``[]``)
:type parameters: sequence of dict, optional
:return: Shared model instance
:rtype: Model
"""
class GaussianDeterministicModel(GaussianMixin, DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, roles, metadata):
Model.__init__(self, observation_space, action_space, device)
GaussianMixin.__init__(self,
clip_actions=metadata[0]["clip_actions"],
clip_log_std=metadata[0]["clip_log_std"],
min_log_std=metadata[0]["min_log_std"],
max_log_std=metadata[0]["max_log_std"],
role=roles[0])
DeterministicMixin.__init__(self, clip_actions=metadata[1]["clip_actions"], role=roles[1])
self._roles = roles
self.instantiator_input_type = metadata[0]["input_shape"].value
self.instantiator_output_scales = [m["output_scale"] for m in metadata]
# shared layers/network
self.net = _generate_sequential(model=self,
input_shape=metadata[0]["input_shape"],
hiddens=metadata[0]["hiddens"][:-1],
hidden_activation=metadata[0]["hidden_activation"][:-1],
output_shape=metadata[0]["hiddens"][-1],
output_activation=metadata[0]["hidden_activation"][-1])
# separated layers ("policy")
mean_layers = [nn.Linear(metadata[0]["hiddens"][-1], _get_num_units_by_shape(self, metadata[0]["output_shape"]))]
if metadata[0]["output_activation"] is not None:
mean_layers.append(_get_activation_function(metadata[0]["output_activation"]))
self.mean_net = nn.Sequential(*mean_layers)
self.log_std_parameter = nn.Parameter(metadata[0]["initial_log_std"] \
* torch.ones(_get_num_units_by_shape(self, metadata[0]["output_shape"])))
# separated layer ("value")
value_layers = [nn.Linear(metadata[1]["hiddens"][-1], _get_num_units_by_shape(self, metadata[1]["output_shape"]))]
if metadata[1]["output_activation"] is not None:
value_layers.append(_get_activation_function(metadata[1]["output_activation"]))
self.value_net = nn.Sequential(*value_layers)
def act(self, inputs, role):
if role == self._roles[0]:
return GaussianMixin.act(self, inputs, role)
elif role == self._roles[1]:
return DeterministicMixin.act(self, inputs, role)
def compute(self, inputs, role):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1))
if role == self._roles[0]:
return self.instantiator_output_scales[0] * self.mean_net(output), self.log_std_parameter, {}
elif role == self._roles[1]:
return self.instantiator_output_scales[1] * self.value_net(output), {}
# TODO: define the model using the specified structure
return GaussianDeterministicModel(observation_space=observation_space,
action_space=action_space,
device=device,
roles=roles,
metadata=parameters)
| 30,707 | Python | 52.3125 | 126 | 0.586446 |
Toni-SM/skrl/skrl/utils/model_instantiators/jax/__init__.py | from typing import Any, Mapping, Optional, Sequence, Tuple, Union
import sys
from enum import Enum
import gym
import gymnasium
import flax.linen as nn
import jax
import jax.numpy as jnp
from skrl.models.jax import Model # noqa
from skrl.models.jax import CategoricalMixin, DeterministicMixin, GaussianMixin # noqa
__all__ = ["categorical_model", "deterministic_model", "gaussian_model", "Shape"]
class Shape(Enum):
"""
Enum to select the shape of the model's inputs and outputs
"""
ONE = 1
STATES = 0
OBSERVATIONS = 0
ACTIONS = -1
STATES_ACTIONS = -2
def _get_activation_function(activation: str) -> nn.Module:
"""Get the activation function
Supported activation functions:
- "elu"
- "leaky_relu"
- "relu"
- "selu"
- "sigmoid"
- "softmax"
- "softplus"
- "softsign"
- "tanh"
:param activation: activation function name.
If activation is an empty string, a placeholder will be returned (``lambda x: x``)
:type activation: str
:raises: ValueError if activation is not a valid activation function
:return: activation function
:rtype: nn.Module
"""
if not activation:
return lambda x: x
elif activation == "relu":
return nn.relu
elif activation == "tanh":
return nn.tanh
elif activation == "sigmoid":
return nn.sigmoid
elif activation == "leaky_relu":
return nn.leaky_relu
elif activation == "elu":
return nn.elu
elif activation == "softplus":
return nn.softplus
elif activation == "softsign":
return nn.soft_sign
elif activation == "selu":
return nn.selu
elif activation == "softmax":
return nn.softmax
else:
raise ValueError(f"Unknown activation function: {activation}")
def _get_num_units_by_shape(model: Model, shape: Shape) -> int:
"""Get the number of units in a layer by shape
:param model: Model to get the number of units for
:type model: Model
:param shape: Shape of the layer
:type shape: Shape or int
:return: Number of units in the layer
:rtype: int
"""
num_units = {Shape.ONE: 1,
Shape.STATES: model.num_observations,
Shape.ACTIONS: model.num_actions,
Shape.STATES_ACTIONS: model.num_observations + model.num_actions}
try:
return num_units[shape]
except:
return shape
def _generate_sequential(model: Model,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Union[str, None] = "tanh",
output_scale: Optional[int] = None) -> nn.Sequential:
"""Generate a sequential model
:param model: model to generate sequential model for
:type model: Model
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: None).
If None, the output layer will not be scaled
:type output_scale: int, optional
:return: sequential model
:rtype: nn.Sequential
"""
# input layer
input_layer = [nn.Dense(hiddens[0])]
# hidden layers
hidden_layers = []
for i in range(len(hiddens) - 1):
hidden_layers.append(_get_activation_function(hidden_activation[i]))
hidden_layers.append(nn.Dense(hiddens[i + 1]))
hidden_layers.append(_get_activation_function(hidden_activation[-1]))
# output layer
output_layer = [nn.Dense(_get_num_units_by_shape(model, output_shape))]
if output_activation is not None:
output_layer.append(_get_activation_function(output_activation))
return nn.Sequential(input_layer + hidden_layers + output_layer)
def gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
clip_actions: bool = False,
clip_log_std: bool = True,
min_log_std: float = -20,
max_log_std: float = 2,
initial_log_std: float = 0,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = "tanh",
output_scale: float = 1.0) -> Model:
"""Instantiate a Gaussian model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param clip_actions: Flag to indicate whether the actions should be clipped (default: False)
:type clip_actions: bool, optional
:param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True)
:type clip_log_std: bool, optional
:param min_log_std: Minimum value of the log standard deviation (default: -20)
:type min_log_std: float, optional
:param max_log_std: Maximum value of the log standard deviation (default: 2)
:type max_log_std: float, optional
:param initial_log_std: Initial value for the log standard deviation (default: 0)
:type initial_log_std: float, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: 1.0).
If None, the output layer will not be scaled
:type output_scale: float, optional
:return: Gaussian model instance
:rtype: Model
"""
class GaussianModel(GaussianMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False,
clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction)
# override the hash method for Python versions prior to 3.8 to avoid the following error:
# TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes.
if sys.version_info < (3, 8):
def __hash__(self):
return id(self)
def setup(self):
self.instantiator_output_scale = metadata["output_scale"]
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"],
output_scale=metadata["output_scale"])
self.log_std_parameter = self.param("log_std_parameter", lambda _: metadata["initial_log_std"] \
* jnp.ones(_get_num_units_by_shape(self, metadata["output_shape"])))
def __call__(self, inputs, role):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1))
return output * self.instantiator_output_scale, self.log_std_parameter, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation,
"output_scale": output_scale,
"initial_log_std": initial_log_std}
return GaussianModel(observation_space=observation_space,
action_space=action_space,
device=device,
clip_actions=clip_actions,
clip_log_std=clip_log_std,
min_log_std=min_log_std,
max_log_std=max_log_std)
def deterministic_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
clip_actions: bool = False,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = "tanh",
output_scale: float = 1.0) -> Model:
"""Instantiate a deterministic model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: False)
:type clip_actions: bool, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: "tanh")
:type output_activation: str or None, optional
:param output_scale: Scale of the output layer (default: 1.0).
If None, the output layer will not be scaled
:type output_scale: float, optional
:return: Deterministic model instance
:rtype: Model
"""
class DeterministicModel(DeterministicMixin, Model):
def __init__(self, observation_space, action_space, device, clip_actions=False, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
DeterministicMixin.__init__(self, clip_actions)
# override the hash method for Python versions prior to 3.8 to avoid the following error:
# TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes.
if sys.version_info < (3, 8):
def __hash__(self):
return id(self)
def setup(self):
self.instantiator_output_scale = metadata["output_scale"]
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"],
output_scale=metadata["output_scale"])
def __call__(self, inputs, role):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1))
return output * self.instantiator_output_scale, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation,
"output_scale": output_scale}
return DeterministicModel(observation_space=observation_space,
action_space=action_space,
device=device,
clip_actions=clip_actions)
def categorical_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None,
device: Optional[Union[str, jax.Device]] = None,
unnormalized_log_prob: bool = True,
input_shape: Shape = Shape.STATES,
hiddens: list = [256, 256],
hidden_activation: list = ["relu", "relu"],
output_shape: Shape = Shape.ACTIONS,
output_activation: Optional[str] = None) -> Model:
"""Instantiate a categorical model
:param observation_space: Observation/state space or shape (default: None).
If it is not None, the num_observations property will contain the size of that space
:type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param action_space: Action space or shape (default: None).
If it is not None, the num_actions property will contain the size of that space
:type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: True).
If True, the model's output is interpreted as unnormalized log probabilities
(it can be any real number), otherwise as normalized probabilities
(the output must be non-negative, finite and have a non-zero sum)
:type unnormalized_log_prob: bool, optional
:param input_shape: Shape of the input (default: Shape.STATES)
:type input_shape: Shape, optional
:param hiddens: Number of hidden units in each hidden layer
:type hiddens: int or list of ints
:param hidden_activation: Activation function for each hidden layer (default: "relu").
:type hidden_activation: list of strings
:param output_shape: Shape of the output (default: Shape.ACTIONS)
:type output_shape: Shape, optional
:param output_activation: Activation function for the output layer (default: None)
:type output_activation: str or None, optional
:return: Categorical model instance
:rtype: Model
"""
class CategoricalModel(CategoricalMixin, Model):
def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, **kwargs):
Model.__init__(self, observation_space, action_space, device, **kwargs)
CategoricalMixin.__init__(self, unnormalized_log_prob)
# override the hash method for Python versions prior to 3.8 to avoid the following error:
# TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes.
if sys.version_info < (3, 8):
def __hash__(self):
return id(self)
def setup(self):
self.instantiator_input_type = metadata["input_shape"].value
self.net = _generate_sequential(model=self,
input_shape=metadata["input_shape"],
hiddens=metadata["hiddens"],
hidden_activation=metadata["hidden_activation"],
output_shape=metadata["output_shape"],
output_activation=metadata["output_activation"])
def __call__(self, inputs, role):
if self.instantiator_input_type == 0:
output = self.net(inputs["states"])
elif self.instantiator_input_type == -1:
output = self.net(inputs["taken_actions"])
elif self.instantiator_input_type == -2:
output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1))
return output, {}
metadata = {"input_shape": input_shape,
"hiddens": hiddens,
"hidden_activation": hidden_activation,
"output_shape": output_shape,
"output_activation": output_activation}
return CategoricalModel(observation_space=observation_space,
action_space=action_space,
device=device,
unnormalized_log_prob=unnormalized_log_prob)
| 20,151 | Python | 48.271394 | 116 | 0.599325 |
Toni-SM/skrl/skrl/memories/torch/base.py | from typing import List, Optional, Tuple, Union
import csv
import datetime
import functools
import operator
import os
import gym
import gymnasium
import numpy as np
import torch
from torch.utils.data.sampler import BatchSampler
class Memory:
def __init__(self,
memory_size: int,
num_envs: int = 1,
device: Optional[Union[str, torch.device]] = None,
export: bool = False,
export_format: str = "pt",
export_directory: str = "") -> None:
"""Base class representing a memory with circular buffers
Buffers are torch tensors with shape (memory size, number of environments, data size).
Circular buffers are implemented with two integers: a memory index and an environment index
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: ``1``)
:type num_envs: int, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param export: Export the memory to a file (default: ``False``).
If True, the memory will be exported when the memory is filled
:type export: bool, optional
:param export_format: Export format (default: ``"pt"``).
Supported formats: torch (pt), numpy (np), comma separated values (csv)
:type export_format: str, optional
:param export_directory: Directory where the memory will be exported (default: ``""``).
If empty, the agent's experiment directory will be used
:type export_directory: str, optional
:raises ValueError: The export format is not supported
"""
self.memory_size = memory_size
self.num_envs = num_envs
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device)
# internal variables
self.filled = False
self.env_index = 0
self.memory_index = 0
self.tensors = {}
self.tensors_view = {}
self.tensors_keep_dimensions = {}
self.sampling_indexes = None
self.all_sequence_indexes = np.concatenate([np.arange(i, memory_size * num_envs + i, num_envs) for i in range(num_envs)])
# exporting data
self.export = export
self.export_format = export_format
self.export_directory = export_directory
if not self.export_format in ["pt", "np", "csv"]:
raise ValueError(f"Export format not supported ({self.export_format})")
def __len__(self) -> int:
"""Compute and return the current (valid) size of the memory
The valid size is calculated as the ``memory_size * num_envs`` if the memory is full (filled).
Otherwise, the ``memory_index * num_envs + env_index`` is returned
:return: Valid size
:rtype: int
"""
return self.memory_size * self.num_envs if self.filled else self.memory_index * self.num_envs + self.env_index
def _get_space_size(self,
space: Union[int, Tuple[int], gym.Space, gymnasium.Space],
keep_dimensions: bool = False) -> Union[Tuple, int]:
"""Get the size (number of elements) of a space
:param space: Space or shape from which to obtain the number of elements
:type space: int, tuple or list of integers, gym.Space, or gymnasium.Space
:param keep_dimensions: Whether or not to keep the space dimensions (default: ``False``)
:type keep_dimensions: bool, optional
:raises ValueError: If the space is not supported
:return: Size of the space. If ``keep_dimensions`` is True, the space size will be a tuple
:rtype: int or tuple of int
"""
if type(space) in [int, float]:
return (int(space),) if keep_dimensions else int(space)
elif type(space) in [tuple, list]:
return tuple(space) if keep_dimensions else np.prod(space)
elif issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
return (1,) if keep_dimensions else 1
elif issubclass(type(space), gym.spaces.MultiDiscrete):
return space.nvec.shape[0]
elif issubclass(type(space), gym.spaces.Box):
return tuple(space.shape) if keep_dimensions else np.prod(space.shape)
elif issubclass(type(space), gym.spaces.Dict):
if keep_dimensions:
raise ValueError("keep_dimensions=True cannot be used with Dict spaces")
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
elif issubclass(type(space), gymnasium.Space):
if issubclass(type(space), gymnasium.spaces.Discrete):
return (1,) if keep_dimensions else 1
elif issubclass(type(space), gymnasium.spaces.MultiDiscrete):
return space.nvec.shape[0]
elif issubclass(type(space), gymnasium.spaces.Box):
return tuple(space.shape) if keep_dimensions else np.prod(space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
if keep_dimensions:
raise ValueError("keep_dimensions=True cannot be used with Dict spaces")
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
raise ValueError(f"Space type {type(space)} not supported")
def share_memory(self) -> None:
"""Share the tensors between processes
"""
for tensor in self.tensors.values():
if not tensor.is_cuda:
tensor.share_memory_()
def get_tensor_names(self) -> Tuple[str]:
"""Get the name of the internal tensors in alphabetical order
:return: Tensor names without internal prefix (_tensor_)
:rtype: tuple of strings
"""
return sorted(self.tensors.keys())
def get_tensor_by_name(self, name: str, keepdim: bool = True) -> torch.Tensor:
"""Get a tensor by its name
:param name: Name of the tensor to retrieve
:type name: str
:param keepdim: Keep the tensor's shape (memory size, number of environments, size) (default: ``True``)
If False, the returned tensor will have a shape of (memory size * number of environments, size)
:type keepdim: bool, optional
:raises KeyError: The tensor does not exist
:return: Tensor
:rtype: torch.Tensor
"""
return self.tensors[name] if keepdim else self.tensors_view[name]
def set_tensor_by_name(self, name: str, tensor: torch.Tensor) -> None:
"""Set a tensor by its name
:param name: Name of the tensor to set
:type name: str
:param tensor: Tensor to set
:type tensor: torch.Tensor
:raises KeyError: The tensor does not exist
"""
with torch.no_grad():
self.tensors[name].copy_(tensor)
def create_tensor(self,
name: str,
size: Union[int, Tuple[int], gym.Space, gymnasium.Space],
dtype: Optional[torch.dtype] = None,
keep_dimensions: bool = False) -> bool:
"""Create a new internal tensor in memory
The tensor will have a 3-components shape (memory size, number of environments, size).
The internal representation will use _tensor_<name> as the name of the class property
:param name: Tensor name (the name has to follow the python PEP 8 style)
:type name: str
:param size: Number of elements in the last dimension (effective data size).
The product of the elements will be computed for sequences or gym/gymnasium spaces
:type size: int, tuple or list of integers, gym.Space, or gymnasium.Space
:param dtype: Data type (torch.dtype) (default: ``None``).
If None, the global default torch data type will be used
:type dtype: torch.dtype or None, optional
:param keep_dimensions: Whether or not to keep the dimensions defined through the size parameter (default: ``False``)
:type keep_dimensions: bool, optional
:raises ValueError: The tensor name exists already but the size or dtype are different
:return: True if the tensor was created, otherwise False
:rtype: bool
"""
# compute data size
size = self._get_space_size(size, keep_dimensions)
# check dtype and size if the tensor exists
if name in self.tensors:
tensor = self.tensors[name]
if tensor.size(-1) != size:
raise ValueError(f"Size of tensor {name} ({size}) doesn't match the existing one ({tensor.size(-1)})")
if dtype is not None and tensor.dtype != dtype:
raise ValueError(f"Dtype of tensor {name} ({dtype}) doesn't match the existing one ({tensor.dtype})")
return False
# define tensor shape
tensor_shape = (self.memory_size, self.num_envs, *size) if keep_dimensions else (self.memory_size, self.num_envs, size)
view_shape = (-1, *size) if keep_dimensions else (-1, size)
# create tensor (_tensor_<name>) and add it to the internal storage
setattr(self, f"_tensor_{name}", torch.zeros(tensor_shape, device=self.device, dtype=dtype))
# update internal variables
self.tensors[name] = getattr(self, f"_tensor_{name}")
self.tensors_view[name] = self.tensors[name].view(*view_shape)
self.tensors_keep_dimensions[name] = keep_dimensions
# fill the tensors (float tensors) with NaN
for tensor in self.tensors.values():
if torch.is_floating_point(tensor):
tensor.fill_(float("nan"))
return True
def reset(self) -> None:
"""Reset the memory by cleaning internal indexes and flags
Old data will be retained until overwritten, but access through the available methods will not be guaranteed
Default values of the internal indexes and flags
- filled: False
- env_index: 0
- memory_index: 0
"""
self.filled = False
self.env_index = 0
self.memory_index = 0
def add_samples(self, **tensors: torch.Tensor) -> None:
"""Record samples in memory
Samples should be a tensor with 2-components shape (number of environments, data size).
All tensors must be of the same shape
According to the number of environments, the following classification is made:
- one environment:
Store a single sample (tensors with one dimension) and increment the environment index (second index) by one
- number of environments less than num_envs:
Store the samples and increment the environment index (second index) by the number of the environments
- number of environments equals num_envs:
Store the samples and increment the memory index (first index) by one
:param tensors: Sampled data as key-value arguments where the keys are the names of the tensors to be modified.
Non-existing tensors will be skipped
:type tensors: dict
:raises ValueError: No tensors were provided or the tensors have incompatible shapes
"""
if not tensors:
raise ValueError("No samples to be recorded in memory. Pass samples as key-value arguments (where key is the tensor name)")
# dimensions and shapes of the tensors (assume all tensors have the dimensions of the first tensor)
tmp = tensors.get("states", tensors[next(iter(tensors))]) # ask for states first
dim, shape = tmp.ndim, tmp.shape
# multi environment (number of environments equals num_envs)
if dim == 2 and shape[0] == self.num_envs:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name][self.memory_index].copy_(tensor)
self.memory_index += 1
# multi environment (number of environments less than num_envs)
elif dim == 2 and shape[0] < self.num_envs:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name][self.memory_index, self.env_index:self.env_index + tensor.shape[0]].copy_(tensor)
self.env_index += tensor.shape[0]
# single environment - multi sample (number of environments greater than num_envs (num_envs = 1))
elif dim == 2 and self.num_envs == 1:
for name, tensor in tensors.items():
if name in self.tensors:
num_samples = min(shape[0], self.memory_size - self.memory_index)
remaining_samples = shape[0] - num_samples
# copy the first n samples
self.tensors[name][self.memory_index:self.memory_index + num_samples].copy_(tensor[:num_samples].unsqueeze(dim=1))
self.memory_index += num_samples
# storage remaining samples
if remaining_samples > 0:
self.tensors[name][:remaining_samples].copy_(tensor[num_samples:].unsqueeze(dim=1))
self.memory_index = remaining_samples
# single environment
elif dim == 1:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name][self.memory_index, self.env_index].copy_(tensor)
self.env_index += 1
else:
raise ValueError(f"Expected shape (number of environments = {self.num_envs}, data size), got {shape}")
# update indexes and flags
if self.env_index >= self.num_envs:
self.env_index = 0
self.memory_index += 1
if self.memory_index >= self.memory_size:
self.memory_index = 0
self.filled = True
# export tensors to file
if self.export:
self.save(directory=self.export_directory, format=self.export_format)
def sample(self,
names: Tuple[str],
batch_size: int,
mini_batches: int = 1,
sequence_length: int = 1) -> List[List[torch.Tensor]]:
"""Data sampling method to be implemented by the inheriting classes
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:param sequence_length: Length of each sequence (default: ``1``)
:type sequence_length: int, optional
:raises NotImplementedError: The method has not been implemented
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of torch.Tensor list
"""
raise NotImplementedError("The sampling method (.sample()) is not implemented")
def sample_by_index(self, names: Tuple[str], indexes: Union[tuple, np.ndarray, torch.Tensor], mini_batches: int = 1) -> List[List[torch.Tensor]]:
"""Sample data from memory according to their indexes
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param indexes: Indexes used for sampling
:type indexes: tuple or list, numpy.ndarray or torch.Tensor
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (number of indexes, data size)
:rtype: list of torch.Tensor list
"""
if mini_batches > 1:
batches = BatchSampler(indexes, batch_size=len(indexes) // mini_batches, drop_last=True)
return [[self.tensors_view[name][batch] for name in names] for batch in batches]
return [[self.tensors_view[name][indexes] for name in names]]
def sample_all(self, names: Tuple[str], mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]:
"""Sample all data from memory
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:param sequence_length: Length of each sequence (default: ``1``)
:type sequence_length: int, optional
:return: Sampled data from memory.
The sampled tensors will have the following shape: (memory size * number of environments, data size)
:rtype: list of torch.Tensor list
"""
# sequential order
if sequence_length > 1:
if mini_batches > 1:
batches = BatchSampler(self.all_sequence_indexes, batch_size=len(self.all_sequence_indexes) // mini_batches, drop_last=True)
return [[self.tensors_view[name][batch] for name in names] for batch in batches]
return [[self.tensors_view[name][self.all_sequence_indexes] for name in names]]
# default order
if mini_batches > 1:
indexes = np.arange(self.memory_size * self.num_envs)
batches = BatchSampler(indexes, batch_size=len(indexes) // mini_batches, drop_last=True)
return [[self.tensors_view[name][batch] for name in names] for batch in batches]
return [[self.tensors_view[name] for name in names]]
def get_sampling_indexes(self) -> Union[tuple, np.ndarray, torch.Tensor]:
"""Get the last indexes used for sampling
:return: Last sampling indexes
:rtype: tuple or list, numpy.ndarray or torch.Tensor
"""
return self.sampling_indexes
def save(self, directory: str = "", format: str = "pt") -> None:
"""Save the memory to a file
Supported formats:
- PyTorch (pt)
- NumPy (npz)
- Comma-separated values (csv)
:param directory: Path to the folder where the memory will be saved.
If not provided, the directory defined in the constructor will be used
:type directory: str
:param format: Format of the file where the memory will be saved (default: ``"pt"``)
:type format: str, optional
:raises ValueError: If the format is not supported
"""
if not directory:
directory = self.export_directory
os.makedirs(os.path.join(directory, "memories"), exist_ok=True)
memory_path = os.path.join(directory, "memories", \
"{}_memory_{}.{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), hex(id(self)), format))
# torch
if format == "pt":
torch.save({name: self.tensors[name] for name in self.get_tensor_names()}, memory_path)
# numpy
elif format == "npz":
np.savez(memory_path, **{name: self.tensors[name].cpu().numpy() for name in self.get_tensor_names()})
# comma-separated values
elif format == "csv":
# open csv writer # TODO: support keeping the dimensions
with open(memory_path, "a") as file:
writer = csv.writer(file)
names = self.get_tensor_names()
# write headers
headers = [[f"{name}.{i}" for i in range(self.tensors_view[name].shape[-1])] for name in names]
writer.writerow([item for sublist in headers for item in sublist])
# write rows
for i in range(len(self)):
writer.writerow(functools.reduce(operator.iconcat, [self.tensors_view[name][i].tolist() for name in names], []))
# unsupported format
else:
raise ValueError(f"Unsupported format: {format}. Available formats: pt, csv, npz")
def load(self, path: str) -> None:
"""Load the memory from a file
Supported formats:
- PyTorch (pt)
- NumPy (npz)
- Comma-separated values (csv)
:param path: Path to the file where the memory will be loaded
:type path: str
:raises ValueError: If the format is not supported
"""
# torch
if path.endswith(".pt"):
data = torch.load(path)
for name in self.get_tensor_names():
setattr(self, f"_tensor_{name}", data[name])
# numpy
elif path.endswith(".npz"):
data = np.load(path)
for name in data:
setattr(self, f"_tensor_{name}", torch.tensor(data[name]))
# comma-separated values
elif path.endswith(".csv"):
# TODO: load the memory from a csv
pass
# unsupported format
else:
raise ValueError(f"Unsupported format: {path}")
| 21,679 | Python | 45.226013 | 149 | 0.6087 |
Toni-SM/skrl/skrl/memories/torch/__init__.py | from skrl.memories.torch.base import Memory # isort:skip
from skrl.memories.torch.random import RandomMemory
| 111 | Python | 26.999993 | 57 | 0.81982 |
Toni-SM/skrl/skrl/memories/torch/random.py | from typing import List, Optional, Tuple, Union
import torch
from skrl.memories.torch import Memory
class RandomMemory(Memory):
def __init__(self,
memory_size: int,
num_envs: int = 1,
device: Optional[Union[str, torch.device]] = None,
export: bool = False,
export_format: str = "pt",
export_directory: str = "",
replacement=True) -> None:
"""Random sampling memory
Sample a batch from memory randomly
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: ``1``)
:type num_envs: int, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or torch.device, optional
:param export: Export the memory to a file (default: ``False``).
If True, the memory will be exported when the memory is filled
:type export: bool, optional
:param export_format: Export format (default: ``"pt"``).
Supported formats: torch (pt), numpy (np), comma separated values (csv)
:type export_format: str, optional
:param export_directory: Directory where the memory will be exported (default: ``""``).
If empty, the agent's experiment directory will be used
:type export_directory: str, optional
:param replacement: Flag to indicate whether the sample is with or without replacement (default: ``True``).
Replacement implies that a value can be selected multiple times (the batch size is always guaranteed).
Sampling without replacement will return a batch of maximum memory size if the memory size is less than the requested batch size
:type replacement: bool, optional
:raises ValueError: The export format is not supported
"""
super().__init__(memory_size, num_envs, device, export, export_format, export_directory)
self._replacement = replacement
def sample(self,
names: Tuple[str],
batch_size: int,
mini_batches: int = 1,
sequence_length: int = 1) -> List[List[torch.Tensor]]:
"""Sample a batch from memory randomly
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:param sequence_length: Length of each sequence (default: ``1``)
:type sequence_length: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of torch.Tensor list
"""
# compute valid memory sizes
size = len(self)
if sequence_length > 1:
sequence_indexes = torch.arange(0, self.num_envs * sequence_length, self.num_envs)
size -= sequence_indexes[-1].item()
# generate random indexes
if self._replacement:
indexes = torch.randint(0, size, (batch_size,))
else:
# details about the random sampling performance can be found here:
# https://discuss.pytorch.org/t/torch-equivalent-of-numpy-random-choice/16146/19
indexes = torch.randperm(size, dtype=torch.long)[:batch_size]
# generate sequence indexes
if sequence_length > 1:
indexes = (sequence_indexes.repeat(indexes.shape[0], 1) + indexes.view(-1, 1)).view(-1)
self.sampling_indexes = indexes
return self.sample_by_index(names=names, indexes=indexes, mini_batches=mini_batches)
| 4,190 | Python | 46.624999 | 156 | 0.609785 |
Toni-SM/skrl/skrl/memories/jax/base.py | from typing import List, Mapping, Optional, Tuple, Union
import csv
import datetime
import functools
import operator
import os
import gym
import gymnasium
import jax
import jax.numpy as jnp
import numpy as np
from skrl import config
# https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function
@jax.jit
def _copyto(dst, src):
"""NumPy function <function copyto at 0x7f804ee03430> not yet implemented
"""
return dst.at[:].set(src)
@jax.jit
def _copyto_i(dst, src, i):
return dst.at[i].set(src)
@jax.jit
def _copyto_i_j(dst, src, i, j):
return dst.at[i, j].set(src)
class Memory:
def __init__(self,
memory_size: int,
num_envs: int = 1,
device: Optional[jax.Device] = None,
export: bool = False,
export_format: str = "pt", # TODO: set default format for jax
export_directory: str = "") -> None:
"""Base class representing a memory with circular buffers
Buffers are jax or numpy arrays with shape (memory size, number of environments, data size).
Circular buffers are implemented with two integers: a memory index and an environment index
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: ``1``)
:type num_envs: int, optional
:param device: Device on which a tensor/array is or will be allocated (default: ``None``).
If None, the device will be either ``"cuda"`` if available or ``"cpu"``
:type device: str or jax.Device, optional
:param export: Export the memory to a file (default: ``False``).
If True, the memory will be exported when the memory is filled
:type export: bool, optional
:param export_format: Export format (default: ``"pt"``).
Supported formats: torch (pt), numpy (np), comma separated values (csv)
:type export_format: str, optional
:param export_directory: Directory where the memory will be exported (default: ``""``).
If empty, the agent's experiment directory will be used
:type export_directory: str, optional
:raises ValueError: The export format is not supported
"""
self._jax = config.jax.backend == "jax"
self.memory_size = memory_size
self.num_envs = num_envs
if device is None:
self.device = jax.devices()[0]
else:
self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0]
# internal variables
self.filled = False
self.env_index = 0
self.memory_index = 0
self.tensors = {}
self.tensors_view = {}
self.tensors_keep_dimensions = {}
self._views = True # whether the views are not array copies
self.sampling_indexes = None
self.all_sequence_indexes = np.concatenate([np.arange(i, memory_size * num_envs + i, num_envs) for i in range(num_envs)])
# exporting data
self.export = export
self.export_format = export_format
self.export_directory = export_directory
if not self.export_format in ["pt", "np", "csv"]:
raise ValueError(f"Export format not supported ({self.export_format})")
def __len__(self) -> int:
"""Compute and return the current (valid) size of the memory
The valid size is calculated as the ``memory_size * num_envs`` if the memory is full (filled).
Otherwise, the ``memory_index * num_envs + env_index`` is returned
:return: Valid size
:rtype: int
"""
return self.memory_size * self.num_envs if self.filled else self.memory_index * self.num_envs + self.env_index
def _get_space_size(self,
space: Union[int, Tuple[int], gym.Space, gymnasium.Space],
keep_dimensions: bool = False) -> Union[Tuple, int]:
"""Get the size (number of elements) of a space
:param space: Space or shape from which to obtain the number of elements
:type space: int, tuple or list of integers, gym.Space, or gymnasium.Space
:param keep_dimensions: Whether or not to keep the space dimensions (default: ``False``)
:type keep_dimensions: bool, optional
:raises ValueError: If the space is not supported
:return: Size of the space. If ``keep_dimensions`` is True, the space size will be a tuple
:rtype: int or tuple of int
"""
if type(space) in [int, float]:
return (int(space),) if keep_dimensions else int(space)
elif type(space) in [tuple, list]:
return tuple(space) if keep_dimensions else np.prod(space)
elif issubclass(type(space), gym.Space):
if issubclass(type(space), gym.spaces.Discrete):
return (1,) if keep_dimensions else 1
elif issubclass(type(space), gym.spaces.MultiDiscrete):
return space.nvec.shape[0]
elif issubclass(type(space), gym.spaces.Box):
return tuple(space.shape) if keep_dimensions else np.prod(space.shape)
elif issubclass(type(space), gym.spaces.Dict):
if keep_dimensions:
raise ValueError("keep_dimensions=True cannot be used with Dict spaces")
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
elif issubclass(type(space), gymnasium.Space):
if issubclass(type(space), gymnasium.spaces.Discrete):
return (1,) if keep_dimensions else 1
elif issubclass(type(space), gymnasium.spaces.MultiDiscrete):
return space.nvec.shape[0]
elif issubclass(type(space), gymnasium.spaces.Box):
return tuple(space.shape) if keep_dimensions else np.prod(space.shape)
elif issubclass(type(space), gymnasium.spaces.Dict):
if keep_dimensions:
raise ValueError("keep_dimensions=True cannot be used with Dict spaces")
return sum([self._get_space_size(space.spaces[key]) for key in space.spaces])
raise ValueError(f"Space type {type(space)} not supported")
def _get_tensors_view(self, name):
return self.tensors_view[name] if self._views else self.tensors[name].reshape(-1, self.tensors[name].shape[-1])
def share_memory(self) -> None:
"""Share the tensors between processes
"""
for tensor in self.tensors.values():
pass
def get_tensor_names(self) -> Tuple[str]:
"""Get the name of the internal tensors in alphabetical order
:return: Tensor names without internal prefix (_tensor_)
:rtype: tuple of strings
"""
return sorted(self.tensors.keys())
def get_tensor_by_name(self, name: str, keepdim: bool = True) -> Union[np.ndarray, jax.Array]:
"""Get a tensor by its name
:param name: Name of the tensor to retrieve
:type name: str
:param keepdim: Keep the tensor's shape (memory size, number of environments, size) (default: ``True``)
If False, the returned tensor will have a shape of (memory size * number of environments, size)
:type keepdim: bool, optional
:raises KeyError: The tensor does not exist
:return: Tensor
:rtype: np.ndarray or jax.Array
"""
return self.tensors[name] if keepdim else self._get_tensors_view(name)
def set_tensor_by_name(self, name: str, tensor: Union[np.ndarray, jax.Array]) -> None:
"""Set a tensor by its name
:param name: Name of the tensor to set
:type name: str
:param tensor: Tensor to set
:type tensor: np.ndarray or jax.Array
:raises KeyError: The tensor does not exist
"""
if self._jax:
self.tensors[name] = _copyto(self.tensors[name], tensor)
else:
np.copyto(self.tensors[name], tensor)
def create_tensor(self,
name: str,
size: Union[int, Tuple[int], gym.Space, gymnasium.Space],
dtype: Optional[np.dtype] = None,
keep_dimensions: bool = False) -> bool:
"""Create a new internal tensor in memory
The tensor will have a 3-components shape (memory size, number of environments, size).
The internal representation will use _tensor_<name> as the name of the class property
:param name: Tensor name (the name has to follow the python PEP 8 style)
:type name: str
:param size: Number of elements in the last dimension (effective data size).
The product of the elements will be computed for sequences or gym/gymnasium spaces
:type size: int, tuple or list of integers or gym.Space
:param dtype: Data type (np.dtype) (default: ``None``).
If None, the global default jax.numpy.float32 data type will be used
:type dtype: np.dtype or None, optional
:param keep_dimensions: Whether or not to keep the dimensions defined through the size parameter (default: ``False``)
:type keep_dimensions: bool, optional
:raises ValueError: The tensor name exists already but the size or dtype are different
:return: True if the tensor was created, otherwise False
:rtype: bool
"""
# compute data size
size = self._get_space_size(size, keep_dimensions)
# check dtype and size if the tensor exists
if name in self.tensors:
tensor = self.tensors[name]
if tensor.shape[-1] != size:
raise ValueError(f"Size of tensor {name} ({size}) doesn't match the existing one ({tensor.shape[-1]})")
if dtype is not None and tensor.dtype != dtype:
raise ValueError(f"Dtype of tensor {name} ({dtype}) doesn't match the existing one ({tensor.dtype})")
return False
# define tensor shape
tensor_shape = (self.memory_size, self.num_envs, *size) if keep_dimensions else (self.memory_size, self.num_envs, size)
view_shape = (-1, *size) if keep_dimensions else (-1, size)
# create tensor (_tensor_<name>) and add it to the internal storage
if self._jax:
setattr(self, f"_tensor_{name}", jnp.zeros(tensor_shape, dtype=dtype))
else:
setattr(self, f"_tensor_{name}", np.zeros(tensor_shape, dtype=dtype))
# update internal variables
self.tensors[name] = getattr(self, f"_tensor_{name}")
self.tensors_view[name] = self.tensors[name].reshape(*view_shape)
self.tensors_keep_dimensions[name] = keep_dimensions
# fill the tensors (float tensors) with NaN
for name, tensor in self.tensors.items():
if tensor.dtype == np.float32 or tensor.dtype == np.float64:
if self._jax:
self.tensors[name] = _copyto(self.tensors[name], float("nan"))
else:
self.tensors[name].fill(float("nan"))
# check views
if self._jax:
self._views = False # TODO: check if views are available
else:
self._views = self._views and self.tensors_view[name].base is self.tensors[name]
return True
def reset(self) -> None:
"""Reset the memory by cleaning internal indexes and flags
Old data will be retained until overwritten, but access through the available methods will not be guaranteed
Default values of the internal indexes and flags
- filled: False
- env_index: 0
- memory_index: 0
"""
self.filled = False
self.env_index = 0
self.memory_index = 0
def add_samples(self, **tensors: Mapping[str, Union[np.ndarray, jax.Array]]) -> None:
"""Record samples in memory
Samples should be a tensor with 2-components shape (number of environments, data size).
All tensors must be of the same shape
According to the number of environments, the following classification is made:
- one environment:
Store a single sample (tensors with one dimension) and increment the environment index (second index) by one
- number of environments less than num_envs:
Store the samples and increment the environment index (second index) by the number of the environments
- number of environments equals num_envs:
Store the samples and increment the memory index (first index) by one
:param tensors: Sampled data as key-value arguments where the keys are the names of the tensors to be modified.
Non-existing tensors will be skipped
:type tensors: dict
:raises ValueError: No tensors were provided or the tensors have incompatible shapes
"""
if not tensors:
raise ValueError("No samples to be recorded in memory. Pass samples as key-value arguments (where key is the tensor name)")
# dimensions and shapes of the tensors (assume all tensors have the dimensions of the first tensor)
tmp = tensors.get("states", tensors[next(iter(tensors))]) # ask for states first
dim, shape = tmp.ndim, tmp.shape
# multi environment (number of environments equals num_envs)
if dim == 2 and shape[0] == self.num_envs:
if self._jax:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name] = _copyto_i(self.tensors[name], tensor, self.memory_index)
else:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name][self.memory_index] = tensor
self.memory_index += 1
# multi environment (number of environments less than num_envs)
elif dim == 2 and shape[0] < self.num_envs:
raise NotImplementedError # TODO:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name] = self.tensors[name].at[self.memory_index, self.env_index:self.env_index + tensor.shape[0]].set(tensor)
self.env_index += tensor.shape[0]
# single environment - multi sample (number of environments greater than num_envs (num_envs = 1))
elif dim == 2 and self.num_envs == 1:
raise NotImplementedError # TODO:
for name, tensor in tensors.items():
if name in self.tensors:
num_samples = min(shape[0], self.memory_size - self.memory_index)
remaining_samples = shape[0] - num_samples
# copy the first n samples
self.tensors[name] = self.tensors[name].at[self.memory_index:self.memory_index + num_samples].set(tensor[:num_samples].unsqueeze(dim=1))
self.memory_index += num_samples
# storage remaining samples
if remaining_samples > 0:
self.tensors[name] = self.tensors[name].at[:remaining_samples].set(tensor[num_samples:].unsqueeze(dim=1))
self.memory_index = remaining_samples
# single environment
elif dim == 1:
if self._jax:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name] = _copyto_i_j(self.tensors[name], tensor, self.memory_index, self.env_index)
else:
for name, tensor in tensors.items():
if name in self.tensors:
self.tensors[name][self.memory_index, self.env_index] = tensor
self.env_index += 1
else:
raise ValueError(f"Expected shape (number of environments = {self.num_envs}, data size), got {shape}")
# update indexes and flags
if self.env_index >= self.num_envs:
self.env_index = 0
self.memory_index += 1
if self.memory_index >= self.memory_size:
self.memory_index = 0
self.filled = True
# export tensors to file
if self.export:
self.save(directory=self.export_directory, format=self.export_format)
def sample(self,
names: Tuple[str],
batch_size: int,
mini_batches: int = 1,
sequence_length: int = 1) -> List[List[Union[np.ndarray, jax.Array]]]:
"""Data sampling method to be implemented by the inheriting classes
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:param sequence_length: Length of each sequence (default: ``1``)
:type sequence_length: int, optional
:raises NotImplementedError: The method has not been implemented
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of np.ndarray or jax.Array list
"""
raise NotImplementedError("The sampling method (.sample()) is not implemented")
def sample_by_index(self, names: Tuple[str], indexes: Union[tuple, np.ndarray, jax.Array], mini_batches: int = 1) -> List[List[Union[np.ndarray, jax.Array]]]:
"""Sample data from memory according to their indexes
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param indexes: Indexes used for sampling
:type indexes: tuple or list, np.ndarray or jax.Array
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (number of indexes, data size)
:rtype: list of np.ndarray or jax.Array list
"""
if mini_batches > 1:
batches = np.array_split(indexes, mini_batches)
views = [self._get_tensors_view(name) for name in names]
return [[view[batch] for view in views] for batch in batches]
return [[self._get_tensors_view(name)[indexes] for name in names]]
def sample_all(self, names: Tuple[str], mini_batches: int = 1, sequence_length: int = 1) -> List[List[Union[np.ndarray, jax.Array]]]:
"""Sample all data from memory
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:param sequence_length: Length of each sequence (default: ``1``)
:type sequence_length: int, optional
:return: Sampled data from memory.
The sampled tensors will have the following shape: (memory size * number of environments, data size)
:rtype: list of np.ndarray or jax.Array list
"""
# sequential order
if sequence_length > 1:
if mini_batches > 1:
batches = np.array_split(self.all_sequence_indexes, len(self.all_sequence_indexes) // mini_batches)
return [[self._get_tensors_view(name)[batch] for name in names] for batch in batches]
return [[self._get_tensors_view(name)[self.all_sequence_indexes] for name in names]]
# default order
if mini_batches > 1:
indexes = np.arange(self.memory_size * self.num_envs)
batches = np.array_split(indexes, mini_batches)
views = [self._get_tensors_view(name) for name in names]
return [[view[batch] for view in views] for batch in batches]
return [[self._get_tensors_view(name) for name in names]]
def get_sampling_indexes(self) -> Union[tuple, np.ndarray, jax.Array]:
"""Get the last indexes used for sampling
:return: Last sampling indexes
:rtype: tuple or list, np.ndarray or jax.Array
"""
return self.sampling_indexes
def save(self, directory: str = "", format: str = "pt") -> None:
"""Save the memory to a file
Supported formats:
- PyTorch (pt)
- NumPy (npz)
- Comma-separated values (csv)
:param directory: Path to the folder where the memory will be saved.
If not provided, the directory defined in the constructor will be used
:type directory: str
:param format: Format of the file where the memory will be saved (default: ``"pt"``)
:type format: str, optional
:raises ValueError: If the format is not supported
"""
if not directory:
directory = self.export_directory
os.makedirs(os.path.join(directory, "memories"), exist_ok=True)
memory_path = os.path.join(directory, "memories", \
"{}_memory_{}.{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), hex(id(self)), format))
# torch
if format == "pt":
import torch
torch.save({name: self.tensors[name] for name in self.get_tensor_names()}, memory_path)
# numpy
elif format == "npz":
np.savez(memory_path, **{name: self.tensors[name].cpu().numpy() for name in self.get_tensor_names()})
# comma-separated values
elif format == "csv":
# open csv writer # TODO: support keeping the dimensions
with open(memory_path, "a") as file:
writer = csv.writer(file)
names = self.get_tensor_names()
# write headers
headers = [[f"{name}.{i}" for i in range(self.tensors[name].shape[-1])] for name in names]
writer.writerow([item for sublist in headers for item in sublist])
# write rows
for i in range(len(self)):
writer.writerow(functools.reduce(operator.iconcat, [self.tensors[name].reshape(-1, self.tensors[name].shape[-1])[i].tolist() for name in names], []))
# unsupported format
else:
raise ValueError(f"Unsupported format: {format}. Available formats: pt, csv, npz")
def load(self, path: str) -> None:
"""Load the memory from a file
Supported formats:
- PyTorch (pt)
- NumPy (npz)
- Comma-separated values (csv)
:param path: Path to the file where the memory will be loaded
:type path: str
:raises ValueError: If the format is not supported
"""
# torch
if path.endswith(".pt"):
import torch
data = torch.load(path)
for name in self.get_tensor_names():
setattr(self, f"_tensor_{name}", jnp.array(data[name].cpu().numpy()))
# numpy
elif path.endswith(".npz"):
data = np.load(path)
for name in data:
setattr(self, f"_tensor_{name}", jnp.array(data[name]))
# comma-separated values
elif path.endswith(".csv"):
# TODO: load the memory from a csv
pass
# unsupported format
else:
raise ValueError(f"Unsupported format: {path}")
| 23,884 | Python | 44.582061 | 169 | 0.603082 |
Toni-SM/skrl/skrl/memories/jax/__init__.py | from skrl.memories.jax.base import Memory # isort:skip
from skrl.memories.jax.random import RandomMemory
| 107 | Python | 25.999994 | 55 | 0.813084 |
Toni-SM/skrl/skrl/memories/jax/random.py | from typing import List, Optional, Tuple
import jax
import numpy as np
from skrl.memories.jax import Memory
class RandomMemory(Memory):
def __init__(self,
memory_size: int,
num_envs: int = 1,
device: Optional[jax.Device] = None,
export: bool = False,
export_format: str = "pt",
export_directory: str = "",
replacement=True) -> None:
"""Random sampling memory
Sample a batch from memory randomly
:param memory_size: Maximum number of elements in the first dimension of each internal storage
:type memory_size: int
:param num_envs: Number of parallel environments (default: ``1``)
:type num_envs: int, optional
:param device: Device on which an array is or will be allocated (default: ``None``)
:type device: jax.Device, optional
:param export: Export the memory to a file (default: ``False``).
If True, the memory will be exported when the memory is filled
:type export: bool, optional
:param export_format: Export format (default: ``"pt"``).
Supported formats: torch (pt), numpy (np), comma separated values (csv)
:type export_format: str, optional
:param export_directory: Directory where the memory will be exported (default: ``""``).
If empty, the agent's experiment directory will be used
:type export_directory: str, optional
:param replacement: Flag to indicate whether the sample is with or without replacement (default: ``True``).
Replacement implies that a value can be selected multiple times (the batch size is always guaranteed).
Sampling without replacement will return a batch of maximum memory size if the memory size is less than the requested batch size
:type replacement: bool, optional
:raises ValueError: The export format is not supported
"""
super().__init__(memory_size, num_envs, device, export, export_format, export_directory)
self._replacement = replacement
def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[jax.Array]]:
"""Sample a batch from memory randomly
:param names: Tensors names from which to obtain the samples
:type names: tuple or list of strings
:param batch_size: Number of element to sample
:type batch_size: int
:param mini_batches: Number of mini-batches to sample (default: ``1``)
:type mini_batches: int, optional
:return: Sampled data from tensors sorted according to their position in the list of names.
The sampled tensors will have the following shape: (batch size, data size)
:rtype: list of jax.Array list
"""
# generate random indexes
if self._replacement:
indexes = np.random.randint(0, len(self), (batch_size,))
else:
indexes = np.random.permutation(len(self))[:batch_size]
return self.sample_by_index(names=names, indexes=indexes, mini_batches=mini_batches)
| 3,247 | Python | 46.072463 | 156 | 0.619341 |
Toni-SM/skrl/tests/test_agents.py | import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
import torch
from skrl.agents.torch import Agent
from skrl.agents.torch.a2c import A2C
from skrl.agents.torch.amp import AMP
from skrl.agents.torch.cem import CEM
from skrl.agents.torch.ddpg import DDPG
from skrl.agents.torch.dqn import DDQN, DQN
from skrl.agents.torch.ppo import PPO
from skrl.agents.torch.q_learning import Q_LEARNING
from skrl.agents.torch.sac import SAC
from skrl.agents.torch.sarsa import SARSA
from skrl.agents.torch.td3 import TD3
from skrl.agents.torch.trpo import TRPO
from .utils import DummyModel
@pytest.fixture
def classes_and_kwargs():
return [(A2C, {"models": {"policy": DummyModel()}}),
(AMP, {"models": {"policy": DummyModel()}}),
(CEM, {"models": {"policy": DummyModel()}}),
(DDPG, {"models": {"policy": DummyModel()}}),
(DQN, {"models": {"policy": DummyModel()}}),
(DDQN, {"models": {"policy": DummyModel()}}),
(PPO, {"models": {"policy": DummyModel()}}),
(Q_LEARNING, {"models": {"policy": DummyModel()}}),
(SAC, {"models": {"policy": DummyModel()}}),
(SARSA, {"models": {"policy": DummyModel()}}),
(TD3, {"models": {"policy": DummyModel()}}),
(TRPO, {"models": {"policy": DummyModel()}})]
def test_agent(capsys, classes_and_kwargs):
for klass, kwargs in classes_and_kwargs:
cfg = {"learning_starts": 1,
"experiment": {"write_interval": 0}}
agent: Agent = klass(cfg=cfg, **kwargs)
agent.init()
agent.pre_interaction(timestep=0, timesteps=1)
# agent.act(None, timestep=0, timestesps=1)
agent.record_transition(states=torch.tensor([]),
actions=torch.tensor([]),
rewards=torch.tensor([]),
next_states=torch.tensor([]),
terminated=torch.tensor([]),
truncated=torch.tensor([]),
infos={},
timestep=0,
timesteps=1)
agent.post_interaction(timestep=0, timesteps=1)
| 2,251 | Python | 37.169491 | 63 | 0.561972 |
Toni-SM/skrl/tests/test_examples_gymnasium.py | import os
import subprocess
import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
EXAMPLE_DIR = "gymnasium"
SCRIPTS = ["ddpg_gymnasium_pendulum.py",
"cem_gymnasium_cartpole.py",
"dqn_gymnasium_cartpole.py",
"q_learning_gymnasium_frozen_lake.py"]
EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples"))
COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS]
@pytest.mark.parametrize("command", COMMANDS)
def test_scripts(capsys, command):
try:
import gymnasium
except ImportError as e:
warnings.warn(f"\n\nUnable to import gymnasium ({e}).\nThis test will be skipped\n")
return
subprocess.run(command, shell=True, check=True)
| 837 | Python | 30.037036 | 124 | 0.690562 |
Toni-SM/skrl/tests/test_examples_isaacsim.py | import os
import subprocess
import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
# See the following link for Omniverse Isaac Sim Python environment
# https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html
PYTHON_ENVIRONMENT = "./python.sh"
EXAMPLE_DIR = "isaacsim"
SCRIPTS = ["cartpole_example_skrl.py"]
EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples"))
COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS]
@pytest.mark.parametrize("command", COMMANDS)
def test_scripts(capsys, command):
try:
from omni.isaac.kit import SimulationApp
except ImportError as e:
warnings.warn(f"\n\nUnable to import SimulationApp ({e}).\nThis test will be skipped\n")
return
subprocess.run(command, shell=True, check=True)
| 932 | Python | 32.321427 | 124 | 0.732833 |
Toni-SM/skrl/tests/test_examples_omniisaacgym.py | import os
import subprocess
import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
# See the following link for Omniverse Isaac Sim Python environment
# https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html
PYTHON_ENVIRONMENT = "./python.sh"
EXAMPLE_DIR = "omniisaacgym"
SCRIPTS = ["ppo_cartpole.py"]
EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples"))
COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)} headless=True num_envs=64" for script in SCRIPTS]
@pytest.mark.parametrize("command", COMMANDS)
def test_scripts(capsys, command):
try:
import omniisaacgymenvs
except ImportError as e:
warnings.warn(f"\n\nUnable to import omniisaacgymenvs ({e}).\nThis test will be skipped\n")
return
subprocess.run(command, shell=True, check=True)
| 939 | Python | 32.571427 | 134 | 0.734824 |
Toni-SM/skrl/tests/test_envs.py | import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
import torch
from skrl.envs.torch import Wrapper, wrap_env
from .utils import DummyEnv
@pytest.fixture
def classes_and_kwargs():
return []
@pytest.mark.parametrize("wrapper", ["gym", "gymnasium", "dm", "robosuite", \
"isaacgym-preview2", "isaacgym-preview3", "isaacgym-preview4", "omniverse-isaacgym"])
def test_wrap_env(capsys, classes_and_kwargs, wrapper):
env = DummyEnv(num_envs=1)
try:
env: Wrapper = wrap_env(env=env, wrapper=wrapper)
except ValueError as e:
warnings.warn(f"{e}. This test will be skipped for '{wrapper}'")
except ModuleNotFoundError as e:
warnings.warn(f"{e}. The '{wrapper}' wrapper module is not found. This test will be skipped")
env.observation_space
env.action_space
env.state_space
env.num_envs
env.device
| 903 | Python | 24.828571 | 101 | 0.69103 |
Toni-SM/skrl/tests/test_examples_robosuite.py | import os
import subprocess
import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
EXAMPLE_DIR = "robosuite"
SCRIPTS = []
EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples"))
COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS]
@pytest.mark.parametrize("command", COMMANDS)
def test_scripts(capsys, command):
try:
import gym
except ImportError as e:
warnings.warn(f"\n\nUnable to import gym ({e}).\nThis test will be skipped\n")
return
subprocess.run(command, shell=True, check=True)
| 667 | Python | 26.833332 | 124 | 0.703148 |
Toni-SM/skrl/tests/test_resources_schedulers.py | import warnings
import hypothesis
import hypothesis.strategies as st
import pytest
import torch
from skrl.resources.schedulers.torch import KLAdaptiveRL
@pytest.fixture
def classes_and_kwargs():
return [(KLAdaptiveRL, {})]
@pytest.mark.parametrize("optimizer", [torch.optim.Adam([torch.ones((1,))], lr=0.1),
torch.optim.SGD([torch.ones((1,))], lr=0.1)])
def test_step(capsys, classes_and_kwargs, optimizer):
for klass, kwargs in classes_and_kwargs:
scheduler = klass(optimizer, **kwargs)
scheduler.step(0.0)
| 578 | Python | 24.173912 | 84 | 0.66782 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.