file_path
stringlengths
21
224
content
stringlengths
0
80.8M
Toni-SM/skrl/skrl/agents/torch/dqn/dqn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import math import gym import gymnasium import torch import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DQN_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "update_interval": 1, # agent update interval "target_update_interval": 10, # target network update interval "exploration": { "initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration "final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration "timesteps": 1000, # timesteps for epsilon-greedy decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DQN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Deep Q-Network (DQN) https://arxiv.org/abs/1312.5602 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DQN_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.q_network = self.models.get("q_network", None) self.target_q_network = self.models.get("target_q_network", None) # checkpoint models self.checkpoint_modules["q_network"] = self.q_network self.checkpoint_modules["target_q_network"] = self.target_q_network if self.target_q_network is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_q_network.freeze_parameters(True) # update target networks (hard update) self.target_q_network.update_parameters(self.q_network, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._update_interval = self.cfg["update_interval"] self._target_update_interval = self.cfg["target_update_interval"] self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"] self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizer and learning rate scheduler if self.q_network is not None: self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.int64) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ states = self._state_preprocessor(states) if not self._exploration_timesteps: return torch.argmax(self.q_network.act({"states": states}, role="q_network")[0], dim=1, keepdim=True), None, None # sample random actions actions = self.q_network.random_act({"states": states}, role="q_network")[0] if timestep < self._random_timesteps: return actions, None, None # sample actions with epsilon-greedy policy epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \ * math.exp(-1.0 * timestep / self._exploration_timesteps) indexes = (torch.rand(states.shape[0], device=self.device) >= epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.argmax(self.q_network.act({"states": states[indexes]}, role="q_network")[0], dim=1, keepdim=True) # record epsilon self.track_data("Exploration / Exploration epsilon", epsilon) return actions, None, None def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts and not timestep % self._update_interval: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network") target_q_values = torch.max(next_q_values, dim=-1, keepdim=True)[0] target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute Q-network loss q_values = torch.gather(self.q_network.act({"states": sampled_states}, role="q_network")[0], dim=1, index=sampled_actions.long()) q_network_loss = F.mse_loss(q_values, target_values) # optimize Q-network self.optimizer.zero_grad() q_network_loss.backward() self.optimizer.step() # update target network if not timestep % self._target_update_interval: self.target_q_network.update_parameters(self.q_network, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Q-network loss", q_network_loss.item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/dqn/__init__.py
from skrl.agents.torch.dqn.ddqn import DDQN, DDQN_DEFAULT_CONFIG from skrl.agents.torch.dqn.dqn import DQN, DQN_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/torch/dqn/ddqn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import math import gym import gymnasium import torch import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] DDQN_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "update_interval": 1, # agent update interval "target_update_interval": 10, # target network update interval "exploration": { "initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration "final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration "timesteps": 1000, # timesteps for epsilon-greedy decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class DDQN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Double Deep Q-Network (DDQN) https://ojs.aaai.org/index.php/AAAI/article/view/10295 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(DDQN_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.q_network = self.models.get("q_network", None) self.target_q_network = self.models.get("target_q_network", None) # checkpoint models self.checkpoint_modules["q_network"] = self.q_network self.checkpoint_modules["target_q_network"] = self.target_q_network if self.target_q_network is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_q_network.freeze_parameters(True) # update target networks (hard update) self.target_q_network.update_parameters(self.q_network, polyak=1) # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._update_interval = self.cfg["update_interval"] self._target_update_interval = self.cfg["target_update_interval"] self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"] self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizer and learning rate scheduler if self.q_network is not None: self.optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.int64) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ states = self._state_preprocessor(states) if not self._exploration_timesteps: return torch.argmax(self.q_network.act({"states": states}, role="q_network")[0], dim=1, keepdim=True), None, None # sample random actions actions = self.q_network.random_act({"states": states}, role="q_network")[0] if timestep < self._random_timesteps: return actions, None, None # sample actions with epsilon-greedy policy epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \ * math.exp(-1.0 * timestep / self._exploration_timesteps) indexes = (torch.rand(states.shape[0], device=self.device) >= epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.argmax(self.q_network.act({"states": states[indexes]}, role="q_network")[0], dim=1, keepdim=True) # record epsilon self.track_data("Exploration / Exploration epsilon", epsilon) return actions, None, None def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts and not timestep % self._update_interval: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values with torch.no_grad(): next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network") target_q_values = torch.gather(next_q_values, dim=1, index=torch.argmax(self.q_network.act({"states": sampled_next_states}, \ role="q_network")[0], dim=1, keepdim=True)) target_values = sampled_rewards + self._discount_factor * sampled_dones.logical_not() * target_q_values # compute Q-network loss q_values = torch.gather(self.q_network.act({"states": sampled_states}, role="q_network")[0], dim=1, index=sampled_actions.long()) q_network_loss = F.mse_loss(q_values, target_values) # optimize Q-network self.optimizer.zero_grad() q_network_loss.backward() self.optimizer.step() # update target network if not timestep % self._target_update_interval: self.target_q_network.update_parameters(self.q_network, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Q-network loss", q_network_loss.item()) self.track_data("Target / Target (max)", torch.max(target_values).item()) self.track_data("Target / Target (min)", torch.min(target_values).item()) self.track_data("Target / Target (mean)", torch.mean(target_values).item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/sarsa/__init__.py
from skrl.agents.torch.sarsa.sarsa import SARSA, SARSA_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/torch/sarsa/sarsa.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import torch from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] SARSA_DEFAULT_CONFIG = { "discount_factor": 0.99, # discount factor (gamma) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "learning_rate": 0.5, # learning rate (alpha) "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class SARSA(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """State Action Reward State Action (SARSA) https://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.17.2539 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(SARSA_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration self._discount_factor = self.cfg["discount_factor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._learning_rate = self.cfg["learning_rate"] self._rewards_shaper = self.cfg["rewards_shaper"] # create temporary variables needed for storage and computation self._current_states = None self._current_actions = None self._current_rewards = None self._current_next_states = None self._current_dones = None def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample actions from policy return self.policy.act({"states": states}, role="policy") def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self._current_states = states self._current_actions = actions self._current_rewards = rewards self._current_next_states = next_states self._current_dones = terminated + truncated if self.memory is not None: self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self._update(timestep, timesteps) # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ q_table = self.policy.table() env_ids = torch.arange(self._current_rewards.shape[0]).view(-1, 1) # compute next actions next_actions = self.policy.act({"states": self._current_next_states}, role="policy")[0] # update Q-table q_table[env_ids, self._current_states, self._current_actions] += self._learning_rate \ * (self._current_rewards + self._discount_factor * self._current_dones.logical_not() \ * q_table[env_ids, self._current_next_states, next_actions] \ - q_table[env_ids, self._current_states, self._current_actions])
Toni-SM/skrl/skrl/agents/torch/a2c/a2c.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] A2C_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "mini_batches": 1, # number of mini batches to use for updating "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "entropy_loss_scale": 0.0, # entropy loss scaling factor "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class A2C(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Advantage Actor Critic (A2C) https://arxiv.org/abs/1602.01783 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(A2C_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names = ["states", "actions", "log_prob", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=True) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute approximate KL divergence for KLAdaptive learning rate scheduler if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss policy_loss = -(sampled_advantages * next_log_prob).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value") value_loss = F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / len(sampled_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / len(sampled_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / len(sampled_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/a2c/a2c_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] A2C_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "mini_batches": 1, # number of mini batches to use for updating "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "entropy_loss_scale": 0.0, # entropy loss scaling factor "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class A2C_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Advantage Actor Critic (A2C) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1602.01783 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(A2C_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self._tensors_names = ["states", "actions", "terminated", "log_prob", "returns", "advantages"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_policy, rnn_value = {}, {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 kl_divergences = [] # mini-batches loop for i, (sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_returns, sampled_advantages) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} rnn_value = rnn_policy else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=True) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="policy") # compute approximate KL divergence for KLAdaptive learning rate scheduler if isinstance(self.scheduler, KLAdaptiveLR): with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss policy_loss = -(sampled_advantages * next_log_prob).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, **rnn_value}, role="value") value_loss = F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / len(sampled_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / len(sampled_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / len(sampled_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/a2c/__init__.py
from skrl.agents.torch.a2c.a2c import A2C, A2C_DEFAULT_CONFIG from skrl.agents.torch.a2c.a2c_rnn import A2C_RNN
Toni-SM/skrl/skrl/agents/torch/ppo/ppo_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] PPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class PPO_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Proximal Policy Optimization (PPO) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/1707.06347 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "terminated", "log_prob", "values", "returns", "advantages"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_policy, rnn_value = {}, {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for i, (sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} rnn_value = rnn_policy else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=not epoch) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, **rnn_policy}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, **rnn_value}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip, max=self._value_clip) value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/ppo/__init__.py
from skrl.agents.torch.ppo.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.agents.torch.ppo.ppo_rnn import PPO_RNN
Toni-SM/skrl/skrl/agents/torch/ppo/ppo.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] PPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class PPO(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Proximal Policy Optimization (PPO) https://arxiv.org/abs/1707.06347 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float())}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=not epoch) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip, max=self._value_clip) value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/amp/amp.py
from typing import Any, Callable, Mapping, Optional, Tuple, Union import copy import itertools import math import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model # [start-config-dict-torch] AMP_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 6, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 5e-5, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "amp_state_preprocessor": None, # AMP state preprocessor class (see skrl.resources.preprocessors) "amp_state_preprocessor_kwargs": {}, # AMP state preprocessor's kwargs (e.g. {"size": env.amp_observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.0, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 2.5, # value loss scaling factor "discriminator_loss_scale": 5.0, # discriminator loss scaling factor "amp_batch_size": 512, # batch size for updating the reference motion dataset "task_reward_weight": 0.0, # task-reward weight (wG) "style_reward_weight": 1.0, # style-reward weight (wS) "discriminator_batch_size": 0, # batch size for computing the discriminator loss (all samples if 0) "discriminator_reward_scale": 2, # discriminator reward scaling factor "discriminator_logit_regularization_scale": 0.05, # logit regularization scale factor for the discriminator loss "discriminator_gradient_penalty_scale": 5, # gradient penalty scaling factor for the discriminator loss "discriminator_weight_decay_scale": 0.0001, # weight decay scaling factor for the discriminator loss "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class AMP(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None, amp_observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, motion_dataset: Optional[Memory] = None, reply_buffer: Optional[Memory] = None, collect_reference_motions: Optional[Callable[[int], torch.Tensor]] = None, collect_observation: Optional[Callable[[], torch.Tensor]] = None) -> None: """Adversarial Motion Priors (AMP) https://arxiv.org/abs/2104.02180 The implementation is adapted from the NVIDIA IsaacGymEnvs (https://github.com/NVIDIA-Omniverse/IsaacGymEnvs/blob/main/isaacgymenvs/learning/amp_continuous.py) :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :param amp_observation_space: AMP observation/state space or shape (default: ``None``) :type amp_observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None :param motion_dataset: Reference motion dataset: M (default: ``None``) :type motion_dataset: skrl.memory.torch.Memory or None :param reply_buffer: Reply buffer for preventing discriminator overfitting: B (default: ``None``) :type reply_buffer: skrl.memory.torch.Memory or None :param collect_reference_motions: Callable to collect reference motions (default: ``None``) :type collect_reference_motions: Callable[[int], torch.Tensor] or None :param collect_observation: Callable to collect observation (default: ``None``) :type collect_observation: Callable[[], torch.Tensor] or None :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(AMP_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) self.amp_observation_space = amp_observation_space self.motion_dataset = motion_dataset self.reply_buffer = reply_buffer self.collect_reference_motions = collect_reference_motions self.collect_observation = collect_observation # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) self.discriminator = self.models.get("discriminator", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value self.checkpoint_modules["discriminator"] = self.discriminator # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._discriminator_loss_scale = self.cfg["discriminator_loss_scale"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._amp_state_preprocessor = self.cfg["amp_state_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._amp_batch_size = self.cfg["amp_batch_size"] self._task_reward_weight = self.cfg["task_reward_weight"] self._style_reward_weight = self.cfg["style_reward_weight"] self._discriminator_batch_size = self.cfg["discriminator_batch_size"] self._discriminator_reward_scale = self.cfg["discriminator_reward_scale"] self._discriminator_logit_regularization_scale = self.cfg["discriminator_logit_regularization_scale"] self._discriminator_gradient_penalty_scale = self.cfg["discriminator_gradient_penalty_scale"] self._discriminator_weight_decay_scale = self.cfg["discriminator_weight_decay_scale"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None and self.discriminator is not None: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters(), self.discriminator.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor if self._amp_state_preprocessor: self._amp_state_preprocessor = self._amp_state_preprocessor(**self.cfg["amp_state_preprocessor_kwargs"]) self.checkpoint_modules["amp_state_preprocessor"] = self._amp_state_preprocessor else: self._amp_state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) self.memory.create_tensor(name="amp_states", size=self.amp_observation_space, dtype=torch.float32) self.memory.create_tensor(name="next_values", size=1, dtype=torch.float32) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated", \ "log_prob", "values", "returns", "advantages", "amp_states", "next_values"] # create tensors for motion dataset and reply buffer if self.motion_dataset is not None: self.motion_dataset.create_tensor(name="states", size=self.amp_observation_space, dtype=torch.float32) self.reply_buffer.create_tensor(name="states", size=self.amp_observation_space, dtype=torch.float32) # initialize motion dataset for _ in range(math.ceil(self.motion_dataset.memory_size / self._amp_batch_size)): self.motion_dataset.add_samples(states=self.collect_reference_motions(self._amp_batch_size)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # use collected states if self._current_states is not None: states = self._current_states states = self._state_preprocessor(states) # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": states}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # use collected states if self._current_states is not None: states = self._current_states super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: amp_states = infos["amp_obs"] # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) with torch.no_grad(): values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated with torch.no_grad(): next_values, _, _ = self.value.act({"states": self._state_preprocessor(next_states)}, role="value") next_values = self._value_preprocessor(next_values, inverse=True) next_values *= infos['terminate'].view(-1, 1).logical_not() self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, amp_states=amp_states, next_values=next_values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, amp_states=amp_states, next_values=next_values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if self.collect_observation is not None: self._current_states = self.collect_observation() def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): advantage = rewards[i] - values[i] + discount_factor * (next_values[i] + lambda_coefficient * not_dones[i] * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # update dataset of reference motions self.motion_dataset.add_samples(states=self.collect_reference_motions(self._amp_batch_size)) # compute combined rewards rewards = self.memory.get_tensor_by_name("rewards") amp_states = self.memory.get_tensor_by_name("amp_states") with torch.no_grad(): amp_logits, _, _ = self.discriminator.act({"states": self._amp_state_preprocessor(amp_states)}, role="discriminator") style_reward = -torch.log(torch.maximum(1 - 1 / (1 + torch.exp(-amp_logits)), torch.tensor(0.0001, device=self.device))) style_reward *= self._discriminator_reward_scale combined_rewards = self._task_reward_weight * rewards + self._style_reward_weight * style_reward # compute returns and advantages values = self.memory.get_tensor_by_name("values") next_values=self.memory.get_tensor_by_name("next_values") returns, advantages = compute_gae(rewards=combined_rewards, dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=next_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self.tensors_names, mini_batches=self._mini_batches) sampled_motion_batches = self.motion_dataset.sample(names=["states"], batch_size=self.memory.memory_size * self.memory.num_envs, mini_batches=self._mini_batches) if len(self.reply_buffer): sampled_replay_batches = self.reply_buffer.sample(names=["states"], batch_size=self.memory.memory_size * self.memory.num_envs, mini_batches=self._mini_batches) else: sampled_replay_batches = [[batches[self.tensors_names.index("amp_states")]] for batches in sampled_batches] cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 cumulative_discriminator_loss = 0 # learning epochs for epoch in range(self._learning_epochs): # mini-batches loop for batch_index, (sampled_states, sampled_actions, _, _, _, \ sampled_log_prob, sampled_values, sampled_returns, sampled_advantages, \ sampled_amp_states, _) in enumerate(sampled_batches): sampled_states = self._state_preprocessor(sampled_states, train=True) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip, max=self._value_clip) value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # compute discriminator loss if self._discriminator_batch_size: sampled_amp_states = self._amp_state_preprocessor(sampled_amp_states[0:self._discriminator_batch_size], train=True) sampled_amp_replay_states = self._amp_state_preprocessor( sampled_replay_batches[batch_index][0][0:self._discriminator_batch_size], train=True) sampled_amp_motion_states = self._amp_state_preprocessor( sampled_motion_batches[batch_index][0][0:self._discriminator_batch_size], train=True) else: sampled_amp_states = self._amp_state_preprocessor(sampled_amp_states, train=True) sampled_amp_replay_states = self._amp_state_preprocessor(sampled_replay_batches[batch_index][0], train=True) sampled_amp_motion_states = self._amp_state_preprocessor(sampled_motion_batches[batch_index][0], train=True) sampled_amp_motion_states.requires_grad_(True) amp_logits, _, _ = self.discriminator.act({"states": sampled_amp_states}, role="discriminator") amp_replay_logits, _, _ = self.discriminator.act({"states": sampled_amp_replay_states}, role="discriminator") amp_motion_logits, _, _ = self.discriminator.act({"states": sampled_amp_motion_states}, role="discriminator") amp_cat_logits = torch.cat([amp_logits, amp_replay_logits], dim=0) # discriminator prediction loss discriminator_loss = 0.5 * (nn.BCEWithLogitsLoss()(amp_cat_logits, torch.zeros_like(amp_cat_logits)) \ + torch.nn.BCEWithLogitsLoss()(amp_motion_logits, torch.ones_like(amp_motion_logits))) # discriminator logit regularization if self._discriminator_logit_regularization_scale: logit_weights = torch.flatten(list(self.discriminator.modules())[-1].weight) discriminator_loss += self._discriminator_logit_regularization_scale * torch.sum(torch.square(logit_weights)) # discriminator gradient penalty if self._discriminator_gradient_penalty_scale: amp_motion_gradient = torch.autograd.grad(amp_motion_logits, sampled_amp_motion_states, grad_outputs=torch.ones_like(amp_motion_logits), create_graph=True, retain_graph=True, only_inputs=True) gradient_penalty = torch.sum(torch.square(amp_motion_gradient[0]), dim=-1).mean() discriminator_loss += self._discriminator_gradient_penalty_scale * gradient_penalty # discriminator weight decay if self._discriminator_weight_decay_scale: weights = [torch.flatten(module.weight) for module in self.discriminator.modules() \ if isinstance(module, torch.nn.Linear)] weight_decay = torch.sum(torch.square(torch.cat(weights, dim=-1))) discriminator_loss += self._discriminator_weight_decay_scale * weight_decay discriminator_loss *= self._discriminator_loss_scale # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss + discriminator_loss).backward() if self._grad_norm_clip > 0: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters(), self.discriminator.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() cumulative_discriminator_loss += discriminator_loss.item() # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # update AMP repaly buffer self.reply_buffer.add_samples(states=amp_states.view(-1, amp_states.shape[-1])) # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Discriminator loss", cumulative_discriminator_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/amp/__init__.py
from skrl.agents.torch.amp.amp import AMP, AMP_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/torch/rpo/rpo.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] RPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "alpha": 0.5, # amount of uniform random perturbation on the mean actions: U(-alpha, alpha) "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class RPO(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Robust Policy Optimization (RPO) https://arxiv.org/abs/2212.07536 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(RPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._alpha = self.cfg["alpha"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="policy") self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), "alpha": self._alpha}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=not epoch) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, "alpha": self._alpha}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, "alpha": self._alpha}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip, max=self._value_clip) value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/torch/rpo/__init__.py
from skrl.agents.torch.rpo.rpo import RPO, RPO_DEFAULT_CONFIG from skrl.agents.torch.rpo.rpo_rnn import RPO_RNN
Toni-SM/skrl/skrl/agents/torch/rpo/rpo_rnn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.agents.torch import Agent from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] RPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "alpha": 0.5, # amount of uniform random perturbation on the mean actions: U(-alpha, alpha) "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class RPO_RNN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Robust Policy Optimization (RPO) with support for Recurrent Neural Networks (RNN, GRU, LSTM, etc.) https://arxiv.org/abs/2212.07536 :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ _cfg = copy.deepcopy(RPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._alpha = self.cfg["alpha"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: if self.policy is self.value: self.optimizer = torch.optim.Adam(self.policy.parameters(), lr=self._learning_rate) else: self.optimizer = torch.optim.Adam(itertools.chain(self.policy.parameters(), self.value.parameters()), lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=torch.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=torch.float32) self.memory.create_tensor(name="rewards", size=1, dtype=torch.float32) self.memory.create_tensor(name="terminated", size=1, dtype=torch.bool) self.memory.create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memory.create_tensor(name="values", size=1, dtype=torch.float32) self.memory.create_tensor(name="returns", size=1, dtype=torch.float32) self.memory.create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "terminated", "log_prob", "values", "returns", "advantages"] # RNN specifications self._rnn = False # flag to indicate whether RNN is available self._rnn_tensors_names = [] # used for sampling during training self._rnn_final_states = {"policy": [], "value": []} self._rnn_initial_states = {"policy": [], "value": []} self._rnn_sequence_length = self.policy.get_specification().get("rnn", {}).get("sequence_length", 1) # policy for i, size in enumerate(self.policy.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_policy_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_policy_{i}") # default RNN states self._rnn_initial_states["policy"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # value if self.value is not None: if self.policy is self.value: self._rnn_initial_states["value"] = self._rnn_initial_states["policy"] else: for i, size in enumerate(self.value.get_specification().get("rnn", {}).get("sizes", [])): self._rnn = True # create tensors in memory if self.memory is not None: self.memory.create_tensor(name=f"rnn_value_{i}", size=(size[0], size[2]), dtype=torch.float32, keep_dimensions=True) self._rnn_tensors_names.append(f"rnn_value_{i}") # default RNN states self._rnn_initial_states["value"].append(torch.zeros(size, dtype=torch.float32, device=self.device)) # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ rnn = {"rnn": self._rnn_initial_states["policy"]} if self._rnn else {} # sample random actions # TODO: fix for stochasticity, rnn and log_prob if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states), **rnn}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), "alpha": self._alpha, **rnn}, role="policy") self._current_log_prob = log_prob if self._rnn: self._rnn_final_states["policy"] = outputs.get("rnn", []) return actions, log_prob, outputs def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} values, _, outputs = self.value.act({"states": self._state_preprocessor(states), "alpha": self._alpha, **rnn}, role="value") values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # package RNN states rnn_states = {} if self._rnn: rnn_states.update({f"rnn_policy_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["policy"])}) if self.policy is not self.value: rnn_states.update({f"rnn_value_{i}": s.transpose(0, 1) for i, s in enumerate(self._rnn_initial_states["value"])}) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values, **rnn_states) # update RNN states if self._rnn: self._rnn_final_states["value"] = self._rnn_final_states["policy"] if self.policy is self.value else outputs.get("rnn", []) # reset states if the episodes have ended finished_episodes = terminated.nonzero(as_tuple=False) if finished_episodes.numel(): for rnn_state in self._rnn_final_states["policy"]: rnn_state[:, finished_episodes[:, 0]] = 0 if self.policy is not self.value: for rnn_state in self._rnn_final_states["value"]: rnn_state[:, finished_episodes[:, 0]] = 0 self._rnn_initial_states = self._rnn_final_states def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # compute returns and advantages with torch.no_grad(): self.value.train(False) rnn = {"rnn": self._rnn_initial_states["value"]} if self._rnn else {} last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states.float()), "alpha": self._alpha, **rnn}, role="value") self.value.train(True) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) rnn_policy, rnn_value = {}, {} if self._rnn: sampled_rnn_batches = self.memory.sample_all(names=self._rnn_tensors_names, mini_batches=self._mini_batches, sequence_length=self._rnn_sequence_length) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for i, (sampled_states, sampled_actions, sampled_dones, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages) in enumerate(sampled_batches): if self._rnn: if self.policy is self.value: rnn_policy = {"rnn": [s.transpose(0, 1) for s in sampled_rnn_batches[i]], "terminated": sampled_dones} rnn_value = rnn_policy else: rnn_policy = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "policy" in n], "terminated": sampled_dones} rnn_value = {"rnn": [s.transpose(0, 1) for s, n in zip(sampled_rnn_batches[i], self._rnn_tensors_names) if "value" in n], "terminated": sampled_dones} sampled_states = self._state_preprocessor(sampled_states, train=not epoch) _, next_log_prob, _ = self.policy.act({"states": sampled_states, "taken_actions": sampled_actions, "alpha": self._alpha, **rnn_policy}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # compute entropy loss if self._entropy_loss_scale: entropy_loss = -self._entropy_loss_scale * self.policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip, 1.0 + self._ratio_clip) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = self.value.act({"states": sampled_states, "alpha": self._alpha, **rnn_value}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip, max=self._value_clip) value_loss = self._value_loss_scale * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizer.zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip > 0: if self.policy is self.value: nn.utils.clip_grad_norm_(self.policy.parameters(), self._grad_norm_clip) else: nn.utils.clip_grad_norm_(itertools.chain(self.policy.parameters(), self.value.parameters()), self._grad_norm_clip) self.optimizer.step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(torch.tensor(kl_divergences).mean()) else: self.scheduler.step() # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", self.policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/jax/base.py
from typing import Any, Mapping, Optional, Tuple, Union import collections import copy import datetime import os import pickle import gym import gymnasium import flax import jax import numpy as np from skrl import config, logger from skrl.memories.jax import Memory from skrl.models.jax import Model class Agent: def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Base class that represent a RL agent :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict """ self._jax = config.jax.backend == "jax" self.models = models self.observation_space = observation_space self.action_space = action_space self.cfg = cfg if cfg is not None else {} if device is None: self.device = jax.devices()[0] else: self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0] if type(memory) is list: self.memory = memory[0] self.secondary_memories = memory[1:] else: self.memory = memory self.secondary_memories = [] # convert the models to their respective device for model in self.models.values(): if model is not None: pass self.tracking_data = collections.defaultdict(list) self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000) self._track_rewards = collections.deque(maxlen=100) self._track_timesteps = collections.deque(maxlen=100) self._cumulative_rewards = None self._cumulative_timesteps = None self.training = True # checkpoint self.checkpoint_modules = {} self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000) self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False) self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": False, "modules": {}} # experiment directory directory = self.cfg.get("experiment", {}).get("directory", "") experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "") if not directory: directory = os.path.join(os.getcwd(), "runs") if not experiment_name: experiment_name = "{}_{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), self.__class__.__name__) self.experiment_dir = os.path.join(directory, experiment_name) def __str__(self) -> str: """Generate a representation of the agent as string :return: Representation of the agent as string :rtype: str """ string = f"Agent: {repr(self)}" for k, v in self.cfg.items(): if type(v) is dict: string += f"\n |-- {k}" for k1, v1 in v.items(): string += f"\n | |-- {k1}: {v1}" else: string += f"\n |-- {k}: {v}" return string def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any: """Empty preprocess method This method is defined because PyTorch multiprocessing can't pickle lambdas :param _input: Input to preprocess :type _input: Any :return: Preprocessed input :rtype: Any """ return _input def _get_internal_value(self, _module: Any) -> Any: """Get internal module/variable state/value :param _module: Module or variable :type _module: Any :return: Module/variable state/value :rtype: Any """ return _module.state_dict.params if hasattr(_module, "state_dict") else _module def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent This method should be called before the agent is used. It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory :param trainer_cfg: Trainer configuration :type trainer_cfg: dict, optional """ # setup Weights & Biases if self.cfg.get("experiment", {}).get("wandb", False): # save experiment config trainer_cfg = trainer_cfg if trainer_cfg is not None else {} try: models_cfg = {k: v.net._modules for (k, v) in self.models.items()} except AttributeError: models_cfg = {k: v._modules for (k, v) in self.models.items()} config={**self.cfg, **trainer_cfg, **models_cfg} # set default values wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {})) wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1]) wandb_kwargs.setdefault("sync_tensorboard", True) wandb_kwargs.setdefault("config", {}) wandb_kwargs["config"].update(config) # init Weights & Biases import wandb wandb.init(**wandb_kwargs) # main entry to log data for consumption and visualization by TensorBoard if self.write_interval > 0: self.writer = None # tensorboard via torch SummaryWriter try: from torch.utils.tensorboard import SummaryWriter self.writer = SummaryWriter(log_dir=self.experiment_dir) except ImportError as e: pass # tensorboard via tensorflow if self.writer is None: try: import tensorflow class _SummaryWriter: def __init__(self, log_dir): self.writer = tensorflow.summary.create_file_writer(logdir=log_dir) def add_scalar(self, tag, value, step): with self.writer.as_default(): tensorflow.summary.scalar(tag, value, step=step) self.writer = _SummaryWriter(log_dir=self.experiment_dir) except ImportError as e: pass # tensorboard via tensorboardX if self.writer is None: try: import tensorboardX self.writer = tensorboardX.SummaryWriter(log_dir=self.experiment_dir) except ImportError as e: pass # show warnings and exit if self.writer is None: logger.warning("No package found to write events to Tensorboard.") logger.warning("Set agent's `write_interval` setting to 0 to disable writing") logger.warning("or install one of the following packages:") logger.warning(" - PyTorch: https://pytorch.org/get-started/locally") logger.warning(" - TensorFlow: https://www.tensorflow.org/install") logger.warning(" - TensorboardX: https://github.com/lanpa/tensorboardX#install") logger.warning("The current running process will be terminated.") exit() if self.checkpoint_interval > 0: os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True) def track_data(self, tag: str, value: float) -> None: """Track data to TensorBoard Currently only scalar data are supported :param tag: Data identifier (e.g. 'Loss / policy loss') :type tag: str :param value: Value to track :type value: float """ self.tracking_data[tag].append(value) def write_tracking_data(self, timestep: int, timesteps: int) -> None: """Write tracking data to TensorBoard :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for k, v in self.tracking_data.items(): if k.endswith("(min)"): self.writer.add_scalar(k, np.min(v), timestep) elif k.endswith("(max)"): self.writer.add_scalar(k, np.max(v), timestep) else: self.writer.add_scalar(k, np.mean(v), timestep) # reset data containers for next iteration self._track_rewards.clear() self._track_timesteps.clear() self.tracking_data.clear() def write_checkpoint(self, timestep: int, timesteps: int) -> None: """Write checkpoint (modules) to disk The checkpoints are saved in the directory 'checkpoints' in the experiment directory. The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time. :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f")) # separated modules if self.checkpoint_store_separately: for name, module in self.checkpoint_modules.items(): with open(os.path.join(self.experiment_dir, "checkpoints", f"{name}_{tag}.pickle"), "wb") as file: pickle.dump(flax.serialization.to_bytes(self._get_internal_value(module)), file, protocol=4) # whole agent else: modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = flax.serialization.to_bytes(self._get_internal_value(module)) with open(os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pickle"), "wb") as file: pickle.dump(modules, file, protocol=4) # best modules if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]: # separated modules if self.checkpoint_store_separately: for name, module in self.checkpoint_modules.items(): with open(os.path.join(self.experiment_dir, "checkpoints", f"best_{name}.pickle"), "wb") as file: pickle.dump(flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][name]), file, protocol=4) # whole agent else: modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][name]) with open(os.path.join(self.experiment_dir, "checkpoints", "best_agent.pickle"), "wb") as file: pickle.dump(modules, file, protocol=4) self.checkpoint_best_modules["saved"] = True def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Actions :rtype: np.ndarray or jax.Array """ raise NotImplementedError def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory (to be implemented by the inheriting classes) Inheriting classes must call this method to record episode information (rewards, timesteps, etc.). In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded. :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if self.write_interval > 0: # compute the cumulative sum of the rewards and timesteps if self._cumulative_rewards is None: self._cumulative_rewards = np.zeros_like(rewards, dtype=np.float32) self._cumulative_timesteps = np.zeros_like(rewards, dtype=np.int32) # TODO: find a better way to avoid https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError if self._jax: rewards = jax.device_get(rewards) terminated = jax.device_get(terminated) truncated = jax.device_get(truncated) self._cumulative_rewards += rewards self._cumulative_timesteps += 1 # check ended episodes finished_episodes = (terminated + truncated).nonzero()[0] if finished_episodes.size: # storage cumulative rewards and timesteps self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist()) self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist()) # reset the cumulative rewards and timesteps self._cumulative_rewards[finished_episodes] = 0 self._cumulative_timesteps[finished_episodes] = 0 # record data self.tracking_data["Reward / Instantaneous reward (max)"].append(np.max(rewards).item()) self.tracking_data["Reward / Instantaneous reward (min)"].append(np.min(rewards).item()) self.tracking_data["Reward / Instantaneous reward (mean)"].append(np.mean(rewards).item()) if len(self._track_rewards): track_rewards = np.array(self._track_rewards) track_timesteps = np.array(self._track_timesteps) self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards)) self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards)) self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards)) self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps)) self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps)) self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps)) def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ for model in self.models.values(): if model is not None: model.set_mode(mode) def set_running_mode(self, mode: str) -> None: """Set the current running mode (training or evaluation) This method sets the value of the ``training`` property (boolean). This property can be used to know if the agent is running in training or evaluation mode. :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ self.training = mode == "train" def save(self, path: str) -> None: """Save the agent to the specified path :param path: Path to save the model to :type path: str """ modules = {} for name, module in self.checkpoint_modules.items(): modules[name] = flax.serialization.to_bytes(self._get_internal_value(module)) # HACK: Does it make sense to use https://github.com/google/orbax # file.write(flax.serialization.to_bytes(modules)) with open(path, "wb") as file: pickle.dump(modules, file, protocol=4) def load(self, path: str) -> None: """Load the model from the specified path :param path: Path to load the model from :type path: str """ with open(path, "rb") as file: modules = pickle.load(file) if type(modules) is dict: for name, data in modules.items(): module = self.checkpoint_modules.get(name, None) if module is not None: if hasattr(module, "state_dict"): params = flax.serialization.from_bytes(module.state_dict.params, data) module.state_dict = module.state_dict.replace(params=params) else: pass # TODO: raise NotImplementedError else: logger.warning(f"Cannot load the {name} module. The agent doesn't have such an instance") def migrate(self, path: str, name_map: Mapping[str, Mapping[str, str]] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal checkpoint to the current agent :raises NotImplementedError: Not yet implemented """ raise NotImplementedError def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ timestep += 1 # update best models and write checkpoints if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval: # update best models reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31)) if reward > self.checkpoint_best_modules["reward"]: self.checkpoint_best_modules["timestep"] = timestep self.checkpoint_best_modules["reward"] = reward self.checkpoint_best_modules["saved"] = False self.checkpoint_best_modules["modules"] = {k: copy.deepcopy(self._get_internal_value(v)) for k, v in self.checkpoint_modules.items()} # write checkpoints self.write_checkpoint(timestep, timesteps) # write to tensorboard if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval: self.write_tracking_data(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes """ raise NotImplementedError
Toni-SM/skrl/skrl/agents/jax/__init__.py
from skrl.agents.jax.base import Agent
Toni-SM/skrl/skrl/agents/jax/cem/cem.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import gym import gymnasium import jax import jax.numpy as jnp import numpy as np import optax from skrl import logger from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam # [start-config-dict-jax] CEM_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "percentile": 0.70, # percentile to compute the reward bound [0, 1] "discount_factor": 0.99, # discount factor (gamma) "learning_rate": 1e-2, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] class CEM(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Cross-Entropy Method (CEM) https://ieeexplore.ieee.org/abstract/document/6796865/ :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(CEM_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = CEM_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy # configuration self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._percentile = self.cfg["percentile"] self._discount_factor = self.cfg["discount_factor"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._episode_tracking = [] # set up optimizer and learning rate scheduler if self.policy is not None: self.optimizer = Adam(model=self.policy, lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.int32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self.tensors_names = ["states", "actions", "rewards"] # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) return actions, None, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) # track episodes internally if self._rollout: indexes = (terminated + truncated).nonzero()[0] if indexes.size: for i in indexes: self._episode_tracking[i.item()].append(self._rollout + 1) else: self._episode_tracking = [[0] for _ in range(rewards.shape[-1])] def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self._rollout = 0 self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample all memory sampled_states, sampled_actions, sampled_rewards = self.memory.sample_all(names=self.tensors_names)[0] sampled_states = self._state_preprocessor(sampled_states, train=True) if self._jax: # move to numpy backend sampled_states = jax.device_get(sampled_states) sampled_actions = jax.device_get(sampled_actions) sampled_rewards = jax.device_get(sampled_rewards) # compute discounted return threshold limits = [] returns = [] for e in range(sampled_rewards.shape[-1]): for i, j in zip(self._episode_tracking[e][:-1], self._episode_tracking[e][1:]): limits.append([e + i, e + j]) rewards = sampled_rewards[e + i: e + j] returns.append(np.sum(rewards * self._discount_factor ** \ np.flip(np.arange(rewards.shape[0]), axis=-1).reshape(rewards.shape))) if not len(returns): logger.warning("No returns to update. Consider increasing the number of rollouts") return returns = np.array(returns) return_threshold = np.quantile(returns, self._percentile, axis=-1) # get elite states and actions indexes = (returns >= return_threshold).nonzero()[0] elite_states = np.concatenate([sampled_states[limits[i][0]:limits[i][1]] for i in indexes], axis=0) elite_actions = np.concatenate([sampled_actions[limits[i][0]:limits[i][1]] for i in indexes], axis=0).reshape(-1) # compute policy loss def _policy_loss(params): # compute scores for the elite states _, _, outputs = self.policy.act({"states": elite_states}, "policy", params) scores = outputs["net_output"] # HACK: return optax.softmax_cross_entropy_with_integer_labels(scores, elite_actions).mean() labels = jax.nn.one_hot(elite_actions, self.action_space.n) return optax.softmax_cross_entropy(scores, labels).mean() policy_loss, grad = jax.value_and_grad(_policy_loss, has_aux=False)(self.policy.state_dict.params) # optimization step (policy) self.optimizer = self.optimizer.step(grad, self.policy) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Coefficient / Return threshold", return_threshold.item()) self.track_data("Coefficient / Mean discounted returns", returns.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/jax/cem/__init__.py
from skrl.agents.jax.cem.cem import CEM, CEM_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/sac/__init__.py
from skrl.agents.jax.sac.sac import SAC, SAC_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/sac/sac.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import flax import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam # [start-config-dict-jax] SAC_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "learn_entropy": True, # learn entropy "entropy_learning_rate": 1e-3, # entropy learning rate "initial_entropy_value": 0.2, # initial entropy value "target_entropy": None, # target entropy "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @functools.partial(jax.jit, static_argnames=("critic_1_act", "critic_2_act")) def _update_critic(critic_1_act, critic_1_state_dict, critic_2_act, critic_2_state_dict, target_q1_values: jax.Array, target_q2_values: jax.Array, entropy_coefficient, next_log_prob, sampled_states: Union[np.ndarray, jax.Array], sampled_actions: Union[np.ndarray, jax.Array], sampled_rewards: Union[np.ndarray, jax.Array], sampled_dones: Union[np.ndarray, jax.Array], discount_factor: float): # compute target values target_q_values = jnp.minimum(target_q1_values, target_q2_values) - entropy_coefficient * next_log_prob target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values # compute critic loss def _critic_loss(params, critic_act, role): critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": sampled_actions}, role, params) critic_loss = ((critic_values - target_values) ** 2).mean() return critic_loss, critic_values (critic_1_loss, critic_1_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_1_state_dict.params, critic_1_act, "critic_1") (critic_2_loss, critic_2_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_2_state_dict.params, critic_2_act, "critic_2") return grad, (critic_1_loss + critic_2_loss) / 2, critic_1_values, critic_2_values, target_values @functools.partial(jax.jit, static_argnames=("policy_act", "critic_1_act", "critic_2_act")) def _update_policy(policy_act, critic_1_act, critic_2_act, policy_state_dict, critic_1_state_dict, critic_2_state_dict, entropy_coefficient, sampled_states): # compute policy (actor) loss def _policy_loss(policy_params, critic_1_params, critic_2_params): actions, log_prob, _ = policy_act({"states": sampled_states}, "policy", policy_params) critic_1_values, _, _ = critic_1_act({"states": sampled_states, "taken_actions": actions}, "critic_1", critic_1_params) critic_2_values, _, _ = critic_2_act({"states": sampled_states, "taken_actions": actions}, "critic_2", critic_2_params) return (entropy_coefficient * log_prob - jnp.minimum(critic_1_values, critic_2_values)).mean(), log_prob (policy_loss, log_prob), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params, critic_1_state_dict.params, critic_2_state_dict.params) return grad, policy_loss, log_prob @jax.jit def _update_entropy(log_entropy_coefficient_state_dict, target_entropy, log_prob): # compute entropy loss def _entropy_loss(params): return -(params["params"] * (log_prob + target_entropy)).mean() entropy_loss, grad = jax.value_and_grad(_entropy_loss, has_aux=False)(log_entropy_coefficient_state_dict.params) return grad, entropy_loss class SAC(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Soft Actor-Critic (SAC) https://arxiv.org/abs/1801.01290 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(SAC_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = SAC_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_learning_rate = self.cfg["entropy_learning_rate"] self._learn_entropy = self.cfg["learn_entropy"] self._entropy_coefficient = self.cfg["initial_entropy_value"] self._rewards_shaper = self.cfg["rewards_shaper"] # entropy if self._learn_entropy: self._target_entropy = self.cfg["target_entropy"] if self._target_entropy is None: if issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): self._target_entropy = -np.prod(self.action_space.shape).astype(np.float32) elif issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): self._target_entropy = -self.action_space.n else: self._target_entropy = 0 class _LogEntropyCoefficient: def __init__(self, entropy_coefficient: float) -> None: class StateDict(flax.struct.PyTreeNode): params: flax.core.FrozenDict[str, Any] = flax.struct.field(pytree_node=True) self.state_dict = StateDict(flax.core.FrozenDict({"params": jnp.array([jnp.log(entropy_coefficient)])})) @property def value(self): return self.state_dict.params["params"] self.log_entropy_coefficient = _LogEntropyCoefficient(self._entropy_coefficient) self.entropy_optimizer = Adam(model=self.log_entropy_coefficient, lr=self._entropy_learning_rate) self.checkpoint_modules["entropy_optimizer"] = self.entropy_optimizer # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = Adam(model=self.policy, lr=self._actor_learning_rate, grad_norm_clip=self._grad_norm_clip) self.critic_1_optimizer = Adam(model=self.critic_1, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip) self.critic_2_optimizer = Adam(model=self.critic_2, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_1_scheduler = self._learning_rate_scheduler(self.critic_1_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_2_scheduler = self._learning_rate_scheduler(self.critic_2_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_1_optimizer"] = self.critic_1_optimizer self.checkpoint_modules["critic_2_optimizer"] = self.critic_2_optimizer # set up target networks if self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) if self.critic_1 is not None and self.critic_2 is not None: self.critic_1.apply = jax.jit(self.critic_1.apply, static_argnums=2) self.critic_2.apply = jax.jit(self.critic_2.apply, static_argnums=2) if self.target_critic_1 is not None and self.target_critic_2 is not None: self.target_critic_1.apply = jax.jit(self.target_critic_1.apply, static_argnums=2) self.target_critic_2.apply = jax.jit(self.target_critic_2.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) return actions, None, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) next_actions, next_log_prob, _ = self.policy.act({"states": sampled_next_states}, role="policy") # compute target values target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2") # compute critic loss grad, critic_loss, critic_1_values, critic_2_values, target_values = _update_critic(self.critic_1.act, self.critic_1.state_dict, self.critic_2.act, self.critic_2.state_dict, target_q1_values, target_q2_values, self._entropy_coefficient, next_log_prob, sampled_states, sampled_actions, sampled_rewards, sampled_dones, self._discount_factor) # optimization step (critic) self.critic_1_optimizer = self.critic_1_optimizer.step(grad, self.critic_1) self.critic_2_optimizer = self.critic_2_optimizer.step(grad, self.critic_2) # compute policy (actor) loss grad, policy_loss, log_prob = _update_policy(self.policy.act, self.critic_1.act, self.critic_2.act, self.policy.state_dict, self.critic_1.state_dict, self.critic_2.state_dict, self._entropy_coefficient, sampled_states) # optimization step (policy) self.policy_optimizer = self.policy_optimizer.step(grad, self.policy) # entropy learning if self._learn_entropy: # compute entropy loss grad, entropy_loss = _update_entropy(self.log_entropy_coefficient.state_dict, self._target_entropy, log_prob) # optimization step (entropy) self.entropy_optimizer = self.entropy_optimizer.step(grad, self.log_entropy_coefficient) # compute entropy coefficient self._entropy_coefficient = jnp.exp(self.log_entropy_coefficient.value) # update target networks self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_1_scheduler.step() self.critic_2_scheduler.step() # record data if self.write_interval > 0: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", critic_1_values.max().item()) self.track_data("Q-network / Q1 (min)", critic_1_values.min().item()) self.track_data("Q-network / Q1 (mean)", critic_1_values.mean().item()) self.track_data("Q-network / Q2 (max)", critic_2_values.max().item()) self.track_data("Q-network / Q2 (min)", critic_2_values.min().item()) self.track_data("Q-network / Q2 (mean)", critic_2_values.mean().item()) self.track_data("Target / Target (max)", target_values.max().item()) self.track_data("Target / Target (min)", target_values.min().item()) self.track_data("Target / Target (mean)", target_values.mean().item()) if self._learn_entropy: self.track_data("Loss / Entropy loss", entropy_loss.item()) self.track_data("Coefficient / Entropy coefficient", self._entropy_coefficient.item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic 1 learning rate", self.critic_1_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic 2 learning rate", self.critic_2_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/jax/td3/__init__.py
from skrl.agents.jax.td3.td3 import TD3, TD3_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/td3/td3.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl import logger from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam # [start-config-dict-jax] TD3_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "policy_delay": 2, # policy delay update with respect to critic update "smooth_regularization_noise": None, # smooth noise for regularization "smooth_regularization_clip": 0.5, # clip for smooth regularization "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _apply_exploration_noise(actions: jax.Array, noises: jax.Array, clip_actions_min: jax.Array, clip_actions_max: jax.Array, scale: float) -> jax.Array: noises = noises.at[:].multiply(scale) return jnp.clip(actions + noises, a_min=clip_actions_min, a_max=clip_actions_max), noises @jax.jit def _apply_smooth_regularization_noise(actions: jax.Array, noises: jax.Array, clip_actions_min: jax.Array, clip_actions_max: jax.Array, smooth_regularization_clip: float) -> jax.Array: noises = jnp.clip(noises, a_min=-smooth_regularization_clip, a_max=smooth_regularization_clip) return jnp.clip(actions + noises, a_min=clip_actions_min, a_max=clip_actions_max) @functools.partial(jax.jit, static_argnames=("critic_1_act", "critic_2_act")) def _update_critic(critic_1_act, critic_1_state_dict, critic_2_act, critic_2_state_dict, target_q1_values: jax.Array, target_q2_values: jax.Array, sampled_states: Union[np.ndarray, jax.Array], sampled_actions: Union[np.ndarray, jax.Array], sampled_rewards: Union[np.ndarray, jax.Array], sampled_dones: Union[np.ndarray, jax.Array], discount_factor: float): # compute target values target_q_values = jnp.minimum(target_q1_values, target_q2_values) target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values # compute critic loss def _critic_loss(params, critic_act, role): critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": sampled_actions}, role, params) critic_loss = ((critic_values - target_values) ** 2).mean() return critic_loss, critic_values (critic_1_loss, critic_1_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_1_state_dict.params, critic_1_act, "critic_1") (critic_2_loss, critic_2_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_2_state_dict.params, critic_2_act, "critic_2") return grad, critic_1_loss + critic_2_loss, critic_1_values, critic_2_values, target_values @functools.partial(jax.jit, static_argnames=("policy_act", "critic_1_act")) def _update_policy(policy_act, critic_1_act, policy_state_dict, critic_1_state_dict, sampled_states): # compute policy (actor) loss def _policy_loss(policy_params, critic_1_params): actions, _, _ = policy_act({"states": sampled_states}, "policy", policy_params) critic_values, _, _ = critic_1_act({"states": sampled_states, "taken_actions": actions}, "critic_1", critic_1_params) return -critic_values.mean() policy_loss, grad = jax.value_and_grad(_policy_loss, has_aux=False)(policy_state_dict.params, critic_1_state_dict.params) return grad, policy_loss class TD3(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Twin Delayed DDPG (TD3) https://arxiv.org/abs/1802.09477 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(TD3_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = TD3_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic_1 = self.models.get("critic_1", None) self.critic_2 = self.models.get("critic_2", None) self.target_critic_1 = self.models.get("target_critic_1", None) self.target_critic_2 = self.models.get("target_critic_2", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic_1"] = self.critic_1 self.checkpoint_modules["critic_2"] = self.critic_2 self.checkpoint_modules["target_critic_1"] = self.target_critic_1 self.checkpoint_modules["target_critic_2"] = self.target_critic_2 # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._policy_delay = self.cfg["policy_delay"] self._critic_update_counter = 0 self._smooth_regularization_noise = self.cfg["smooth_regularization_noise"] self._smooth_regularization_clip = self.cfg["smooth_regularization_clip"] if self._smooth_regularization_noise is None: logger.warning("agents:TD3: No smooth regularization noise specified to reduce variance during training") self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic_1 is not None and self.critic_2 is not None: self.policy_optimizer = Adam(model=self.policy, lr=self._actor_learning_rate, grad_norm_clip=self._grad_norm_clip) self.critic_1_optimizer = Adam(model=self.critic_1, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip) self.critic_2_optimizer = Adam(model=self.critic_2, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_1_scheduler = self._learning_rate_scheduler(self.critic_1_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_2_scheduler = self._learning_rate_scheduler(self.critic_2_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_1_optimizer"] = self.critic_1_optimizer self.checkpoint_modules["critic_2_optimizer"] = self.critic_2_optimizer # set up target networks if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic_1.freeze_parameters(True) self.target_critic_2.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic_1.update_parameters(self.critic_1, polyak=1) self.target_critic_2.update_parameters(self.critic_2, polyak=1) # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # clip noise bounds if self.action_space is not None: if self._jax: self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32) self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32) else: self.clip_actions_min = np.array(self.action_space.low, dtype=np.float32) self.clip_actions_max = np.array(self.action_space.high, dtype=np.float32) # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) if self.critic_1 is not None and self.critic_2 is not None: self.critic_1.apply = jax.jit(self.critic_1.apply, static_argnums=2) self.critic_2.apply = jax.jit(self.critic_2.apply, static_argnums=2) if self.target_policy is not None and self.target_critic_1 is not None and self.target_critic_2 is not None: self.target_policy.apply = jax.jit(self.target_policy.apply, static_argnums=2) self.target_critic_1.apply = jax.jit(self.target_critic_1.apply, static_argnums=2) self.target_critic_2.apply = jax.jit(self.target_critic_2.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale # modify actions if self._jax: actions, noises = _apply_exploration_noise(actions, noises, self.clip_actions_min, self.clip_actions_max, scale) else: noises *= scale actions = np.clip(actions + noises, a_min=self.clip_actions_min, a_max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", noises.max().item()) self.track_data("Exploration / Exploration noise (min)", noises.min().item()) self.track_data("Exploration / Exploration noise (mean)", noises.mean().item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # target policy smoothing next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy") if self._smooth_regularization_noise is not None: noises = self._smooth_regularization_noise.sample(next_actions.shape) if self._jax: next_actions = _apply_smooth_regularization_noise(next_actions, noises, self.clip_actions_min, self.clip_actions_max, self._smooth_regularization_clip) else: noises = np.clip(noises, a_min=-self._smooth_regularization_clip, a_max=self._smooth_regularization_clip) next_actions = np.clip(next_actions + noises, a_min=self.clip_actions_min, a_max=self.clip_actions_max) # compute target values target_q1_values, _, _ = self.target_critic_1.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_1") target_q2_values, _, _ = self.target_critic_2.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic_2") # compute critic loss grad, critic_loss, critic_1_values, critic_2_values, target_values = _update_critic(self.critic_1.act, self.critic_1.state_dict, self.critic_2.act, self.critic_2.state_dict, target_q1_values, target_q2_values, sampled_states, sampled_actions, sampled_rewards, sampled_dones, self._discount_factor) # optimization step (critic) self.critic_1_optimizer = self.critic_1_optimizer.step(grad, self.critic_1) self.critic_2_optimizer = self.critic_2_optimizer.step(grad, self.critic_2) # delayed update self._critic_update_counter += 1 if not self._critic_update_counter % self._policy_delay: # compute policy (actor) loss grad, policy_loss = _update_policy(self.policy.act, self.critic_1.act, self.policy.state_dict, self.critic_1.state_dict, sampled_states) # optimization step (policy) self.policy_optimizer = self.policy_optimizer.step(grad, self.policy) # update target networks self.target_policy.update_parameters(self.policy, polyak=self._polyak) self.target_critic_1.update_parameters(self.critic_1, polyak=self._polyak) self.target_critic_2.update_parameters(self.critic_2, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_1_scheduler.step() self.critic_2_scheduler.step() # record data if not self._critic_update_counter % self._policy_delay: self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", critic_1_values.max().item()) self.track_data("Q-network / Q1 (min)", critic_1_values.min().item()) self.track_data("Q-network / Q1 (mean)", critic_1_values.mean().item()) self.track_data("Q-network / Q2 (max)", critic_2_values.max().item()) self.track_data("Q-network / Q2 (min)", critic_2_values.min().item()) self.track_data("Q-network / Q2 (mean)", critic_2_values.mean().item()) self.track_data("Target / Target (max)", target_values.max().item()) self.track_data("Target / Target (min)", target_values.min().item()) self.track_data("Target / Target (mean)", target_values.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic 1 learning rate", self.critic_1_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic 2 learning rate", self.critic_2_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/jax/ddpg/ddpg.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam # [start-config-dict-jax] DDPG_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "actor_learning_rate": 1e-3, # actor learning rate "critic_learning_rate": 1e-3, # critic learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0, # clipping coefficient for the norm of the gradients "exploration": { "noise": None, # exploration noise "initial_scale": 1.0, # initial scale for the noise "final_scale": 1e-3, # final scale for the noise "timesteps": None, # timesteps for the noise decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _apply_exploration_noise(actions: jax.Array, noises: jax.Array, clip_actions_min: jax.Array, clip_actions_max: jax.Array, scale: float) -> jax.Array: noises = noises.at[:].multiply(scale) return jnp.clip(actions + noises, a_min=clip_actions_min, a_max=clip_actions_max), noises @functools.partial(jax.jit, static_argnames=("critic_act")) def _update_critic(critic_act, critic_state_dict, target_q_values: jax.Array, sampled_states: Union[np.ndarray, jax.Array], sampled_actions: Union[np.ndarray, jax.Array], sampled_rewards: Union[np.ndarray, jax.Array], sampled_dones: Union[np.ndarray, jax.Array], discount_factor: float): # compute target values target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values # compute critic loss def _critic_loss(params): critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": sampled_actions}, "critic", params) critic_loss = ((critic_values - target_values) ** 2).mean() return critic_loss, critic_values (critic_loss, critic_values), grad = jax.value_and_grad(_critic_loss, has_aux=True)(critic_state_dict.params) return grad, critic_loss, critic_values, target_values @functools.partial(jax.jit, static_argnames=("policy_act", "critic_act")) def _update_policy(policy_act, critic_act, policy_state_dict, critic_state_dict, sampled_states): # compute policy (actor) loss def _policy_loss(policy_params, critic_params): actions, _, _ = policy_act({"states": sampled_states}, "policy", policy_params) critic_values, _, _ = critic_act({"states": sampled_states, "taken_actions": actions}, "critic", critic_params) return -critic_values.mean() policy_loss, grad = jax.value_and_grad(_policy_loss, has_aux=False)(policy_state_dict.params, critic_state_dict.params) return grad, policy_loss class DDPG(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Deep Deterministic Policy Gradient (DDPG) https://arxiv.org/abs/1509.02971 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(DDPG_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = DDPG_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.target_policy = self.models.get("target_policy", None) self.critic = self.models.get("critic", None) self.target_critic = self.models.get("target_critic", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["target_policy"] = self.target_policy self.checkpoint_modules["critic"] = self.critic self.checkpoint_modules["target_critic"] = self.target_critic # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._actor_learning_rate = self.cfg["actor_learning_rate"] self._critic_learning_rate = self.cfg["critic_learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._grad_norm_clip = self.cfg["grad_norm_clip"] self._exploration_noise = self.cfg["exploration"]["noise"] self._exploration_initial_scale = self.cfg["exploration"]["initial_scale"] self._exploration_final_scale = self.cfg["exploration"]["final_scale"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizers and learning rate schedulers if self.policy is not None and self.critic is not None: self.policy_optimizer = Adam(model=self.policy, lr=self._actor_learning_rate, grad_norm_clip=self._grad_norm_clip) self.critic_optimizer = Adam(model=self.critic, lr=self._critic_learning_rate, grad_norm_clip=self._grad_norm_clip) if self._learning_rate_scheduler is not None: self.policy_scheduler = self._learning_rate_scheduler(self.policy_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.critic_scheduler = self._learning_rate_scheduler(self.critic_optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["critic_optimizer"] = self.critic_optimizer # set up target networks if self.target_policy is not None and self.target_critic is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_policy.freeze_parameters(True) self.target_critic.freeze_parameters(True) # update target networks (hard update) self.target_policy.update_parameters(self.policy, polyak=1) self.target_critic.update_parameters(self.critic, polyak=1) # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self._tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # clip noise bounds if self.action_space is not None: if self._jax: self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32) self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32) else: self.clip_actions_min = np.array(self.action_space.low, dtype=np.float32) self.clip_actions_max = np.array(self.action_space.high, dtype=np.float32) # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) if self.critic is not None: self.critic.apply = jax.jit(self.critic.apply, static_argnums=2) if self.target_policy is not None and self.target_critic is not None: self.target_policy.apply = jax.jit(self.target_policy.apply, static_argnums=2) self.target_critic.apply = jax.jit(self.target_critic.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample deterministic actions actions, _, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) # add exloration noise if self._exploration_noise is not None: # sample noises noises = self._exploration_noise.sample(actions.shape) # define exploration timesteps scale = self._exploration_final_scale if self._exploration_timesteps is None: self._exploration_timesteps = timesteps # apply exploration noise if timestep <= self._exploration_timesteps: scale = (1 - timestep / self._exploration_timesteps) \ * (self._exploration_initial_scale - self._exploration_final_scale) \ + self._exploration_final_scale # modify actions if self._jax: actions, noises = _apply_exploration_noise(actions, noises, self.clip_actions_min, self.clip_actions_max, scale) else: noises *= scale actions = np.clip(actions + noises, a_min=self.clip_actions_min, a_max=self.clip_actions_max) # record noises self.track_data("Exploration / Exploration noise (max)", noises.max().item()) self.track_data("Exploration / Exploration noise (min)", noises.min().item()) self.track_data("Exploration / Exploration noise (mean)", noises.mean().item()) else: # record noises self.track_data("Exploration / Exploration noise (max)", 0) self.track_data("Exploration / Exploration noise (min)", 0) self.track_data("Exploration / Exploration noise (mean)", 0) return actions, None, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self._tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values next_actions, _, _ = self.target_policy.act({"states": sampled_next_states}, role="target_policy") target_q_values, _, _ = self.target_critic.act({"states": sampled_next_states, "taken_actions": next_actions}, role="target_critic") # compute critic loss grad, critic_loss, critic_values, target_values = _update_critic(self.critic.act, self.critic.state_dict, target_q_values, sampled_states, sampled_actions, sampled_rewards, sampled_dones, self._discount_factor) # optimization step (critic) self.critic_optimizer = self.critic_optimizer.step(grad, self.critic) # compute policy (actor) loss grad, policy_loss = _update_policy(self.policy.act, self.critic.act, self.policy.state_dict, self.critic.state_dict, sampled_states) # optimization step (policy) self.policy_optimizer = self.policy_optimizer.step(grad, self.policy) # update target networks self.target_policy.update_parameters(self.policy, polyak=self._polyak) self.target_critic.update_parameters(self.critic, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.policy_scheduler.step() self.critic_scheduler.step() # record data self.track_data("Loss / Policy loss", policy_loss.item()) self.track_data("Loss / Critic loss", critic_loss.item()) self.track_data("Q-network / Q1 (max)", critic_values.max().item()) self.track_data("Q-network / Q1 (min)", critic_values.min().item()) self.track_data("Q-network / Q1 (mean)", critic_values.mean().item()) self.track_data("Target / Target (max)", target_values.max().item()) self.track_data("Target / Target (min)", target_values.min().item()) self.track_data("Target / Target (mean)", target_values.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Policy learning rate", self.policy_scheduler.get_last_lr()[0]) self.track_data("Learning / Critic learning rate", self.critic_scheduler.get_last_lr()[0])
Toni-SM/skrl/skrl/agents/jax/ddpg/__init__.py
from skrl.agents.jax.ddpg.ddpg import DDPG, DDPG_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/dqn/dqn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam # [start-config-dict-jax] DQN_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "update_interval": 1, # agent update interval "target_update_interval": 10, # target network update interval "exploration": { "initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration "final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration "timesteps": 1000, # timesteps for epsilon-greedy decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @functools.partial(jax.jit, static_argnames=("q_network_act")) def _update_q_network(q_network_act, q_network_state_dict, next_q_values, sampled_states, sampled_actions, sampled_rewards, sampled_dones, discount_factor): # compute target values target_q_values = jnp.max(next_q_values, axis=-1, keepdims=True) target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values # compute Q-network loss def _q_network_loss(params): q_values = q_network_act({"states": sampled_states}, "q_network", params)[0] q_values = q_values[jnp.arange(q_values.shape[0]), sampled_actions.reshape(-1)] return ((q_values - target_values.reshape(-1)) ** 2).mean() q_network_loss, grad = jax.value_and_grad(_q_network_loss, has_aux=False)(q_network_state_dict.params) return grad, q_network_loss, target_values class DQN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Deep Q-Network (DQN) https://arxiv.org/abs/1312.5602 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(DQN_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = DQN_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.q_network = self.models.get("q_network", None) self.target_q_network = self.models.get("target_q_network", None) # checkpoint models self.checkpoint_modules["q_network"] = self.q_network self.checkpoint_modules["target_q_network"] = self.target_q_network # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._update_interval = self.cfg["update_interval"] self._target_update_interval = self.cfg["target_update_interval"] self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"] self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizer and learning rate scheduler if self.q_network is not None: self.optimizer = Adam(model=self.q_network, lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up target networks if self.target_q_network is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_q_network.freeze_parameters(True) # update target networks (hard update) self.target_q_network.update_parameters(self.q_network, polyak=1) # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.int32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # set up models for just-in-time compilation with XLA self.q_network.apply = jax.jit(self.q_network.apply, static_argnums=2) if self.target_q_network is not None: self.target_q_network.apply = jax.jit(self.target_q_network.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ states = self._state_preprocessor(states) if not self._exploration_timesteps: q_values, _, outputs = self.q_network.act({"states": states}, role="q_network") actions = jnp.argmax(q_values, axis=1, keepdims=True) if not self._jax: # numpy backend actions = jax.device_get(actions) return actions, None, outputs # sample random actions actions, _, outputs = self.q_network.random_act({"states": states}, role="q_network") if timestep < self._random_timesteps: raise NotImplementedError # if not self._jax: # numpy backend # actions = jax.device_get(actions) return actions, None, outputs # sample actions with epsilon-greedy policy epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \ * np.exp(-1.0 * timestep / self._exploration_timesteps) indexes = (np.random.random(states.shape[0]) >= epsilon).nonzero()[0] if indexes.size: q_values, _, outputs = self.q_network.act({"states": states[indexes]}, role="q_network") if self._jax: raise NotImplementedError actions[indexes] = jnp.argmax(q_values, axis=1, keepdims=True) else: q_values = jax.device_get(q_values) actions[indexes] = np.argmax(q_values, axis=1, keepdims=True) # record epsilon self.track_data("Exploration / Exploration epsilon", epsilon) return actions, None, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts and not timestep % self._update_interval: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network") grad, q_network_loss, target_values = _update_q_network(self.q_network.act, self.q_network.state_dict, next_q_values, sampled_states, sampled_actions, sampled_rewards, sampled_dones, self._discount_factor) # optimization step (Q-network) self.optimizer = self.optimizer.step(grad, self.q_network) # update target network if not timestep % self._target_update_interval: self.target_q_network.update_parameters(self.q_network, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Q-network loss", q_network_loss.item()) self.track_data("Target / Target (max)", target_values.max().item()) self.track_data("Target / Target (min)", target_values.min().item()) self.track_data("Target / Target (mean)", target_values.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler._lr)
Toni-SM/skrl/skrl/agents/jax/dqn/__init__.py
from skrl.agents.jax.dqn.ddqn import DDQN, DDQN_DEFAULT_CONFIG from skrl.agents.jax.dqn.dqn import DQN, DQN_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/dqn/ddqn.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam # [start-config-dict-jax] DDQN_DEFAULT_CONFIG = { "gradient_steps": 1, # gradient steps "batch_size": 64, # training batch size "discount_factor": 0.99, # discount factor (gamma) "polyak": 0.005, # soft update hyperparameter (tau) "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "update_interval": 1, # agent update interval "target_update_interval": 10, # target network update interval "exploration": { "initial_epsilon": 1.0, # initial epsilon for epsilon-greedy exploration "final_epsilon": 0.05, # final epsilon for epsilon-greedy exploration "timesteps": 1000, # timesteps for epsilon-greedy decay }, "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @functools.partial(jax.jit, static_argnames=("q_network_act")) def _update_q_network(q_network_act, q_network_state_dict, next_q_values, sampled_states, sampled_next_states, sampled_actions, sampled_rewards, sampled_dones, discount_factor): # compute target values q_values = q_network_act({"states": sampled_next_states}, "q_network")[0] actions = jnp.argmax(q_values, axis=-1, keepdims=True) target_q_values = next_q_values[jnp.arange(q_values.shape[0]), actions.reshape(-1)].reshape(-1, 1) target_values = sampled_rewards + discount_factor * jnp.logical_not(sampled_dones) * target_q_values # compute Q-network loss def _q_network_loss(params): q_values = q_network_act({"states": sampled_states}, "q_network", params)[0] q_values = q_values[jnp.arange(q_values.shape[0]), sampled_actions.reshape(-1)] return ((q_values - target_values.reshape(-1)) ** 2).mean() q_network_loss, grad = jax.value_and_grad(_q_network_loss, has_aux=False)(q_network_state_dict.params) return grad, q_network_loss, target_values class DDQN(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Double Deep Q-Network (DDQN) https://ojs.aaai.org/index.php/AAAI/article/view/10295 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(DDQN_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = DDQN_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.q_network = self.models.get("q_network", None) self.target_q_network = self.models.get("target_q_network", None) # checkpoint models self.checkpoint_modules["q_network"] = self.q_network self.checkpoint_modules["target_q_network"] = self.target_q_network # configuration self._gradient_steps = self.cfg["gradient_steps"] self._batch_size = self.cfg["batch_size"] self._discount_factor = self.cfg["discount_factor"] self._polyak = self.cfg["polyak"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._update_interval = self.cfg["update_interval"] self._target_update_interval = self.cfg["target_update_interval"] self._exploration_initial_epsilon = self.cfg["exploration"]["initial_epsilon"] self._exploration_final_epsilon = self.cfg["exploration"]["final_epsilon"] self._exploration_timesteps = self.cfg["exploration"]["timesteps"] self._rewards_shaper = self.cfg["rewards_shaper"] # set up optimizer and learning rate scheduler if self.q_network is not None: self.optimizer = Adam(model=self.q_network, lr=self._learning_rate) if self._learning_rate_scheduler is not None: self.scheduler = self._learning_rate_scheduler(self.optimizer, **self.cfg["learning_rate_scheduler_kwargs"]) self.checkpoint_modules["optimizer"] = self.optimizer # set up target networks if self.target_q_network is not None: # freeze target networks with respect to optimizers (update via .update_parameters()) self.target_q_network.freeze_parameters(True) # update target networks (hard update) self.target_q_network.update_parameters(self.q_network, polyak=1) # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="next_states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.int32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self.tensors_names = ["states", "actions", "rewards", "next_states", "terminated"] # set up models for just-in-time compilation with XLA self.q_network.apply = jax.jit(self.q_network.apply, static_argnums=2) if self.target_q_network is not None: self.target_q_network.apply = jax.jit(self.target_q_network.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ states = self._state_preprocessor(states) if not self._exploration_timesteps: q_values, _, outputs = self.q_network.act({"states": states}, role="q_network") actions = jnp.argmax(q_values, axis=1, keepdims=True) if not self._jax: # numpy backend actions = jax.device_get(actions) return actions, None, outputs # sample random actions actions, _, outputs = self.q_network.random_act({"states": states}, role="q_network") if timestep < self._random_timesteps: raise NotImplementedError # if not self._jax: # numpy backend # actions = jax.device_get(actions) return actions, None, outputs # sample actions with epsilon-greedy policy epsilon = self._exploration_final_epsilon + (self._exploration_initial_epsilon - self._exploration_final_epsilon) \ * np.exp(-1.0 * timestep / self._exploration_timesteps) indexes = (np.random.random(states.shape[0]) >= epsilon).nonzero()[0] if indexes.size: q_values, _, outputs = self.q_network.act({"states": states[indexes]}, role="q_network") if self._jax: raise NotImplementedError actions[indexes] = jnp.argmax(q_values, axis=1, keepdims=True) else: q_values = jax.device_get(q_values) actions[indexes] = np.argmax(q_values, axis=1, keepdims=True) # record epsilon self.track_data("Exploration / Exploration epsilon", epsilon) return actions, None, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if timestep >= self._learning_starts and not timestep % self._update_interval: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # sample a batch from memory sampled_states, sampled_actions, sampled_rewards, sampled_next_states, sampled_dones = \ self.memory.sample(names=self.tensors_names, batch_size=self._batch_size)[0] # gradient steps for gradient_step in range(self._gradient_steps): sampled_states = self._state_preprocessor(sampled_states, train=True) sampled_next_states = self._state_preprocessor(sampled_next_states, train=True) # compute target values next_q_values, _, _ = self.target_q_network.act({"states": sampled_next_states}, role="target_q_network") grad, q_network_loss, target_values = _update_q_network(self.q_network.act, self.q_network.state_dict, next_q_values, sampled_states, sampled_next_states, sampled_actions, sampled_rewards, sampled_dones, self._discount_factor) # optimization step (Q-network) self.optimizer = self.optimizer.step(grad, self.q_network) # update target network if not timestep % self._target_update_interval: self.target_q_network.update_parameters(self.q_network, polyak=self._polyak) # update learning rate if self._learning_rate_scheduler: self.scheduler.step() # record data self.track_data("Loss / Q-network loss", q_network_loss.item()) self.track_data("Target / Target (max)", target_values.max().item()) self.track_data("Target / Target (min)", target_values.min().item()) self.track_data("Target / Target (mean)", target_values.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler._lr)
Toni-SM/skrl/skrl/agents/jax/a2c/a2c.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam from skrl.resources.schedulers.jax import KLAdaptiveLR # [start-config-dict-jax] A2C_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "mini_batches": 1, # number of mini batches to use for updating "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "entropy_loss_scale": 0.0, # entropy loss scaling factor "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] def compute_gae(rewards: np.ndarray, dones: np.ndarray, values: np.ndarray, next_values: np.ndarray, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> np.ndarray: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: np.ndarray :param dones: Signals to indicate that episodes have ended :type dones: np.ndarray :param values: Values obtained by the agent :type values: np.ndarray :param next_values: Next values obtained by the agent :type next_values: np.ndarray :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: np.ndarray """ advantage = 0 advantages = np.zeros_like(rewards) not_dones = np.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _compute_gae(rewards: jax.Array, dones: jax.Array, values: jax.Array, next_values: jax.Array, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> jax.Array: advantage = 0 advantages = jnp.zeros_like(rewards) not_dones = jnp.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages = advantages.at[i].set(advantage) # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages @functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale")) def _update_policy(policy_act, policy_state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, get_entropy, entropy_loss_scale): # compute policy loss def _policy_loss(params): _, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params) # compute approximate KL divergence ratio = next_log_prob - sampled_log_prob kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean() # compute entropy loss entropy_loss = 0 if entropy_loss_scale: entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean() return -(sampled_advantages * next_log_prob).mean(), (entropy_loss, kl_divergence, outputs["stddev"]) (policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params) return grad, policy_loss, entropy_loss, kl_divergence, stddev @functools.partial(jax.jit, static_argnames=("value_act")) def _update_value(value_act, value_state_dict, sampled_states, sampled_returns): # compute value loss def _value_loss(params): predicted_values, _, _ = value_act({"states": sampled_states}, "value", params) return ((sampled_returns - predicted_values) ** 2).mean() value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params) return grad, value_loss class A2C(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Advantage Actor Critic (A2C) https://arxiv.org/abs/1602.01783 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(A2C_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = A2C_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: # scheduler scale = True self.scheduler = None if self._learning_rate_scheduler is not None: if self._learning_rate_scheduler == KLAdaptiveLR: scale = False self.scheduler = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"]) else: self._learning_rate = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"]) # optimizer self.policy_optimizer = Adam(model=self.policy, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale) self.value_optimizer = Adam(model=self.value, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self.memory.create_tensor(name="log_prob", size=1, dtype=jnp.float32) self.memory.create_tensor(name="values", size=1, dtype=jnp.float32) self.memory.create_tensor(name="returns", size=1, dtype=jnp.float32) self.memory.create_tensor(name="advantages", size=1, dtype=jnp.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) if self.value is not None: self.value.apply = jax.jit(self.value.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) log_prob = jax.device_get(log_prob) self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") if not self._jax: # numpy backend values = jax.device_get(values) values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # compute returns and advantages self.value.training = False last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states)}, role="value") # TODO: .float() self.value.training = True if not self._jax: # numpy backend last_values = jax.device_get(last_values) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") if self._jax: returns, advantages = _compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) else: returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=True) # compute policy loss grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(self.policy.act, self.policy.state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, self.policy.get_entropy, self._entropy_loss_scale) kl_divergences.append(kl_divergence.item()) # optimization step (policy) self.policy_optimizer = self.policy_optimizer.step(grad, self.policy, self.scheduler._lr if self.scheduler else None) # compute value loss grad, value_loss = _update_value(self.value.act, self.value.state_dict, sampled_states, sampled_returns) # optimization step (value) self.value_optimizer = self.value_optimizer.step(grad, self.value, self.scheduler._lr if self.scheduler else None) # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(np.mean(kl_divergences)) # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / len(sampled_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / len(sampled_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / len(sampled_batches)) self.track_data("Policy / Standard deviation", stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler._lr)
Toni-SM/skrl/skrl/agents/jax/a2c/__init__.py
from skrl.agents.jax.a2c.a2c import A2C, A2C_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/ppo/__init__.py
from skrl.agents.jax.ppo.ppo import PPO, PPO_DEFAULT_CONFIG
Toni-SM/skrl/skrl/agents/jax/ppo/ppo.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam from skrl.resources.schedulers.jax import KLAdaptiveLR # [start-config-dict-jax] PPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] def compute_gae(rewards: np.ndarray, dones: np.ndarray, values: np.ndarray, next_values: np.ndarray, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> np.ndarray: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: np.ndarray :param dones: Signals to indicate that episodes have ended :type dones: np.ndarray :param values: Values obtained by the agent :type values: np.ndarray :param next_values: Next values obtained by the agent :type next_values: np.ndarray :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: np.ndarray """ advantage = 0 advantages = np.zeros_like(rewards) not_dones = np.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _compute_gae(rewards: jax.Array, dones: jax.Array, values: jax.Array, next_values: jax.Array, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> jax.Array: advantage = 0 advantages = jnp.zeros_like(rewards) not_dones = jnp.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages = advantages.at[i].set(advantage) # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages @functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale")) def _update_policy(policy_act, policy_state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, ratio_clip, get_entropy, entropy_loss_scale): # compute policy loss def _policy_loss(params): _, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params) # compute approximate KL divergence ratio = next_log_prob - sampled_log_prob kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean() # compute policy loss ratio = jnp.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip) # compute entropy loss entropy_loss = 0 if entropy_loss_scale: entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean() return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"]) (policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params) return grad, policy_loss, entropy_loss, kl_divergence, stddev @functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values")) def _update_value(value_act, value_state_dict, sampled_states, sampled_values, sampled_returns, value_loss_scale, clip_predicted_values, value_clip): # compute value loss def _value_loss(params): predicted_values, _, _ = value_act({"states": sampled_states}, "value", params) if clip_predicted_values: predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip) return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean() value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params) return grad, value_loss class PPO(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Proximal Policy Optimization (PPO) https://arxiv.org/abs/1707.06347 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = PPO_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: # scheduler scale = True self.scheduler = None if self._learning_rate_scheduler is not None: if self._learning_rate_scheduler == KLAdaptiveLR: scale = False self.scheduler = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"]) else: self._learning_rate = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"]) # optimizer self.policy_optimizer = Adam(model=self.policy, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale) self.value_optimizer = Adam(model=self.value, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self.memory.create_tensor(name="log_prob", size=1, dtype=jnp.float32) self.memory.create_tensor(name="values", size=1, dtype=jnp.float32) self.memory.create_tensor(name="returns", size=1, dtype=jnp.float32) self.memory.create_tensor(name="advantages", size=1, dtype=jnp.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) if self.value is not None: self.value.apply = jax.jit(self.value.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states)}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) log_prob = jax.device_get(log_prob) self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states)}, role="value") if not self._jax: # numpy backend values = jax.device_get(values) values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # compute returns and advantages self.value.training = False last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states)}, role="value") # TODO: .float() self.value.training = True if not self._jax: # numpy backend last_values = jax.device_get(last_values) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") if self._jax: returns, advantages = _compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) else: returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=not epoch) # compute policy loss grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(self.policy.act, self.policy.state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, self._ratio_clip, self.policy.get_entropy, self._entropy_loss_scale) kl_divergences.append(kl_divergence.item()) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # optimization step (policy) self.policy_optimizer = self.policy_optimizer.step(grad, self.policy, self.scheduler._lr if self.scheduler else None) # compute value loss grad, value_loss = _update_value(self.value.act, self.value.state_dict, sampled_states, sampled_values, sampled_returns, self._value_loss_scale, self._clip_predicted_values, self._value_clip) # optimization step (value) self.value_optimizer = self.value_optimizer.step(grad, self.value, self.scheduler._lr if self.scheduler else None) # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(np.mean(kl_divergences)) # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler._lr)
Toni-SM/skrl/skrl/agents/jax/rpo/rpo.py
from typing import Any, Mapping, Optional, Tuple, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam from skrl.resources.schedulers.jax import KLAdaptiveLR # [start-config-dict-jax] RPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "alpha": 0.5, # amount of uniform random perturbation on the mean actions: U(-alpha, alpha) "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] def compute_gae(rewards: np.ndarray, dones: np.ndarray, values: np.ndarray, next_values: np.ndarray, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> np.ndarray: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: np.ndarray :param dones: Signals to indicate that episodes have ended :type dones: np.ndarray :param values: Values obtained by the agent :type values: np.ndarray :param next_values: Next values obtained by the agent :type next_values: np.ndarray :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: np.ndarray """ advantage = 0 advantages = np.zeros_like(rewards) not_dones = np.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _compute_gae(rewards: jax.Array, dones: jax.Array, values: jax.Array, next_values: jax.Array, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> jax.Array: advantage = 0 advantages = jnp.zeros_like(rewards) not_dones = jnp.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages = advantages.at[i].set(advantage) # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages @functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale")) def _update_policy(policy_act, policy_state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, ratio_clip, get_entropy, entropy_loss_scale, alpha): # compute policy loss def _policy_loss(params): _, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions, "alpha": alpha}, "policy", params) # compute approximate KL divergence ratio = next_log_prob - sampled_log_prob kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean() # compute policy loss ratio = jnp.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip) # compute entropy loss entropy_loss = 0 if entropy_loss_scale: entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean() return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"]) (policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params) return grad, policy_loss, entropy_loss, kl_divergence, stddev @functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values")) def _update_value(value_act, value_state_dict, sampled_states, sampled_values, sampled_returns, value_loss_scale, clip_predicted_values, value_clip, alpha): # compute value loss def _value_loss(params): predicted_values, _, _ = value_act({"states": sampled_states, "alpha": alpha}, "value", params) if clip_predicted_values: predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip) return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean() value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params) return grad, value_loss class RPO(Agent): def __init__(self, models: Mapping[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Robust Policy Optimization (RPO) https://arxiv.org/abs/2212.07536 :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: ``None``) :type observation_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: ``None``) :type action_space: int, tuple or list of int, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :raises KeyError: If the models dictionary is missing a required key """ # _cfg = copy.deepcopy(PPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = RPO_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # models self.policy = self.models.get("policy", None) self.value = self.models.get("value", None) # checkpoint models self.checkpoint_modules["policy"] = self.policy self.checkpoint_modules["value"] = self.value # configuration self._learning_epochs = self.cfg["learning_epochs"] self._mini_batches = self.cfg["mini_batches"] self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self.cfg["grad_norm_clip"] self._ratio_clip = self.cfg["ratio_clip"] self._value_clip = self.cfg["value_clip"] self._clip_predicted_values = self.cfg["clip_predicted_values"] self._value_loss_scale = self.cfg["value_loss_scale"] self._entropy_loss_scale = self.cfg["entropy_loss_scale"] self._kl_threshold = self.cfg["kl_threshold"] self._learning_rate = self.cfg["learning_rate"] self._learning_rate_scheduler = self.cfg["learning_rate_scheduler"] self._state_preprocessor = self.cfg["state_preprocessor"] self._value_preprocessor = self.cfg["value_preprocessor"] self._alpha = self.cfg["alpha"] self._discount_factor = self.cfg["discount_factor"] self._lambda = self.cfg["lambda"] self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self.cfg["time_limit_bootstrap"] # set up optimizer and learning rate scheduler if self.policy is not None and self.value is not None: # scheduler scale = True self.scheduler = None if self._learning_rate_scheduler is not None: if self._learning_rate_scheduler == KLAdaptiveLR: scale = False self.scheduler = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"]) else: self._learning_rate = self._learning_rate_scheduler(self._learning_rate, **self.cfg["learning_rate_scheduler_kwargs"]) # optimizer self.policy_optimizer = Adam(model=self.policy, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale) self.value_optimizer = Adam(model=self.value, lr=self._learning_rate, grad_norm_clip=self._grad_norm_clip, scale=scale) self.checkpoint_modules["policy_optimizer"] = self.policy_optimizer self.checkpoint_modules["value_optimizer"] = self.value_optimizer # set up preprocessors if self._state_preprocessor: self._state_preprocessor = self._state_preprocessor(**self.cfg["state_preprocessor_kwargs"]) self.checkpoint_modules["state_preprocessor"] = self._state_preprocessor else: self._state_preprocessor = self._empty_preprocessor if self._value_preprocessor: self._value_preprocessor = self._value_preprocessor(**self.cfg["value_preprocessor_kwargs"]) self.checkpoint_modules["value_preprocessor"] = self._value_preprocessor else: self._value_preprocessor = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memory if self.memory is not None: self.memory.create_tensor(name="states", size=self.observation_space, dtype=jnp.float32) self.memory.create_tensor(name="actions", size=self.action_space, dtype=jnp.float32) self.memory.create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memory.create_tensor(name="terminated", size=1, dtype=jnp.int8) self.memory.create_tensor(name="log_prob", size=1, dtype=jnp.float32) self.memory.create_tensor(name="values", size=1, dtype=jnp.float32) self.memory.create_tensor(name="returns", size=1, dtype=jnp.float32) self.memory.create_tensor(name="advantages", size=1, dtype=jnp.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = None self._current_next_states = None # set up models for just-in-time compilation with XLA self.policy.apply = jax.jit(self.policy.apply, static_argnums=2) if self.value is not None: self.value.apply = jax.jit(self.value.apply, static_argnums=2) def act(self, states: Union[np.ndarray, jax.Array], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # sample random actions # TODO, check for stochasticity if timestep < self._random_timesteps: return self.policy.random_act({"states": self._state_preprocessor(states)}, role="policy") # sample stochastic actions actions, log_prob, outputs = self.policy.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="policy") if not self._jax: # numpy backend actions = jax.device_get(actions) log_prob = jax.device_get(log_prob) self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Union[np.ndarray, jax.Array], actions: Union[np.ndarray, jax.Array], rewards: Union[np.ndarray, jax.Array], next_states: Union[np.ndarray, jax.Array], terminated: Union[np.ndarray, jax.Array], truncated: Union[np.ndarray, jax.Array], infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memory is not None: self._current_next_states = next_states # reward shaping if self._rewards_shaper is not None: rewards = self._rewards_shaper(rewards, timestep, timesteps) # compute values values, _, _ = self.value.act({"states": self._state_preprocessor(states), "alpha": self._alpha}, role="value") if not self._jax: # numpy backend values = jax.device_get(values) values = self._value_preprocessor(values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap: rewards += self._discount_factor * values * truncated # storage transition in memory self.memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) for memory in self.secondary_memories: memory.add_samples(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, log_prob=self._current_log_prob, values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # compute returns and advantages self.value.training = False last_values, _, _ = self.value.act({"states": self._state_preprocessor(self._current_next_states), "alpha": self._alpha}, role="value") # TODO: .float() self.value.training = True if not self._jax: # numpy backend last_values = jax.device_get(last_values) last_values = self._value_preprocessor(last_values, inverse=True) values = self.memory.get_tensor_by_name("values") if self._jax: returns, advantages = _compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) else: returns, advantages = compute_gae(rewards=self.memory.get_tensor_by_name("rewards"), dones=self.memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor, lambda_coefficient=self._lambda) self.memory.set_tensor_by_name("values", self._value_preprocessor(values, train=True)) self.memory.set_tensor_by_name("returns", self._value_preprocessor(returns, train=True)) self.memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = self.memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs): kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor(sampled_states, train=not epoch) # compute policy loss grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(self.policy.act, self.policy.state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, self._ratio_clip, self.policy.get_entropy, self._entropy_loss_scale, self._alpha) kl_divergences.append(kl_divergence.item()) # early stopping with KL divergence if self._kl_threshold and kl_divergence > self._kl_threshold: break # optimization step (policy) self.policy_optimizer = self.policy_optimizer.step(grad, self.policy, self.scheduler._lr if self.scheduler else None) # compute value loss grad, value_loss = _update_value(self.value.act, self.value.state_dict, sampled_states, sampled_values, sampled_returns, self._value_loss_scale, self._clip_predicted_values, self._value_clip, self._alpha) # optimization step (value) self.value_optimizer = self.value_optimizer.step(grad, self.value, self.scheduler._lr if self.scheduler else None) # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler: if isinstance(self.scheduler, KLAdaptiveLR): self.scheduler.step(np.mean(kl_divergences)) # record data self.track_data("Loss / Policy loss", cumulative_policy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Loss / Value loss", cumulative_value_loss / (self._learning_epochs * self._mini_batches)) if self._entropy_loss_scale: self.track_data("Loss / Entropy loss", cumulative_entropy_loss / (self._learning_epochs * self._mini_batches)) self.track_data("Policy / Standard deviation", stddev.mean().item()) if self._learning_rate_scheduler: self.track_data("Learning / Learning rate", self.scheduler._lr)
Toni-SM/skrl/skrl/agents/jax/rpo/__init__.py
from skrl.agents.jax.rpo.rpo import RPO, RPO_DEFAULT_CONFIG
Toni-SM/skrl/skrl/resources/__init__.py
Toni-SM/skrl/skrl/resources/schedulers/__init__.py
Toni-SM/skrl/skrl/resources/schedulers/torch/kl_adaptive.py
from typing import Optional, Union import torch from torch.optim.lr_scheduler import _LRScheduler class KLAdaptiveLR(_LRScheduler): def __init__(self, optimizer: torch.optim.Optimizer, kl_threshold: float = 0.008, min_lr: float = 1e-6, max_lr: float = 1e-2, kl_factor: float = 2, lr_factor: float = 1.5, last_epoch: int = -1, verbose: bool = False) -> None: """Adaptive KL scheduler Adjusts the learning rate according to the KL divergence. The implementation is adapted from the rl_games library (https://github.com/Denys88/rl_games/blob/master/rl_games/common/schedulers.py) .. note:: This scheduler is only available for PPO at the moment. Applying it to other agents will not change the learning rate Example:: >>> scheduler = KLAdaptiveLR(optimizer, kl_threshold=0.01) >>> for epoch in range(100): >>> # ... >>> kl_divergence = ... >>> scheduler.step(kl_divergence) :param optimizer: Wrapped optimizer :type optimizer: torch.optim.Optimizer :param kl_threshold: Threshold for KL divergence (default: ``0.008``) :type kl_threshold: float, optional :param min_lr: Lower bound for learning rate (default: ``1e-6``) :type min_lr: float, optional :param max_lr: Upper bound for learning rate (default: ``1e-2``) :type max_lr: float, optional :param kl_factor: The number used to modify the KL divergence threshold (default: ``2``) :type kl_factor: float, optional :param lr_factor: The number used to modify the learning rate (default: ``1.5``) :type lr_factor: float, optional :param last_epoch: The index of last epoch (default: ``-1``) :type last_epoch: int, optional :param verbose: Verbose mode (default: ``False``) :type verbose: bool, optional """ super().__init__(optimizer, last_epoch, verbose) self.kl_threshold = kl_threshold self.min_lr = min_lr self.max_lr = max_lr self._kl_factor = kl_factor self._lr_factor = lr_factor self._last_lr = [group['lr'] for group in self.optimizer.param_groups] def step(self, kl: Optional[Union[torch.Tensor, float]] = None, epoch: Optional[int] = None) -> None: """ Step scheduler Example:: >>> kl = torch.distributions.kl_divergence(p, q) >>> kl tensor([0.0332, 0.0500, 0.0383, ..., 0.0076, 0.0240, 0.0164]) >>> scheduler.step(kl.mean()) >>> kl = 0.0046 >>> scheduler.step(kl) :param kl: KL divergence (default: ``None``) If None, no adjustment is made. If tensor, the number of elements must be 1 :type kl: torch.Tensor, float or None, optional :param epoch: Epoch (default: ``None``) :type epoch: int, optional """ if kl is not None: for group in self.optimizer.param_groups: if kl > self.kl_threshold * self._kl_factor: group['lr'] = max(group['lr'] / self._lr_factor, self.min_lr) elif kl < self.kl_threshold / self._kl_factor: group['lr'] = min(group['lr'] * self._lr_factor, self.max_lr) self._last_lr = [group['lr'] for group in self.optimizer.param_groups]
Toni-SM/skrl/skrl/resources/schedulers/torch/__init__.py
from skrl.resources.schedulers.torch.kl_adaptive import KLAdaptiveLR KLAdaptiveRL = KLAdaptiveLR # known typo (compatibility with versions prior to 1.0.0)
Toni-SM/skrl/skrl/resources/schedulers/jax/kl_adaptive.py
from typing import Optional, Union import numpy as np class KLAdaptiveLR: def __init__(self, init_value: float, kl_threshold: float = 0.008, min_lr: float = 1e-6, max_lr: float = 1e-2, kl_factor: float = 2, lr_factor: float = 1.5) -> None: """Adaptive KL scheduler Adjusts the learning rate according to the KL divergence. The implementation is adapted from the rl_games library (https://github.com/Denys88/rl_games/blob/master/rl_games/common/schedulers.py) .. note:: This scheduler is only available for PPO at the moment. Applying it to other agents will not change the learning rate Example:: >>> scheduler = KLAdaptiveLR(init_value=1e-3, kl_threshold=0.01) >>> for epoch in range(100): >>> # ... >>> kl_divergence = ... >>> scheduler.step(kl_divergence) >>> scheduler.lr # get the updated learning rate :param init_value: Initial learning rate :type init_value: float :param kl_threshold: Threshold for KL divergence (default: ``0.008``) :type kl_threshold: float, optional :param min_lr: Lower bound for learning rate (default: ``1e-6``) :type min_lr: float, optional :param max_lr: Upper bound for learning rate (default: ``1e-2``) :type max_lr: float, optional :param kl_factor: The number used to modify the KL divergence threshold (default: ``2``) :type kl_factor: float, optional :param lr_factor: The number used to modify the learning rate (default: ``1.5``) :type lr_factor: float, optional """ self.kl_threshold = kl_threshold self.min_lr = min_lr self.max_lr = max_lr self._kl_factor = kl_factor self._lr_factor = lr_factor self._lr = init_value @property def lr(self) -> float: """Learning rate """ return self._lr def step(self, kl: Optional[Union[np.ndarray, float]] = None) -> None: """ Step scheduler Example:: >>> kl = [0.0332, 0.0500, 0.0383, 0.0456, 0.0076, 0.0240, 0.0164] >>> kl [0.0332, 0.05, 0.0383, 0.0456, 0.0076, 0.024, 0.0164] >>> scheduler.step(np.mean(kl)) >>> kl = 0.0046 >>> scheduler.step(kl) :param kl: KL divergence (default: ``None``) If None, no adjustment is made. If array, the number of elements must be 1 :type kl: np.ndarray, float or None, optional """ if kl is not None: if kl > self.kl_threshold * self._kl_factor: self._lr = max(self._lr / self._lr_factor, self.min_lr) elif kl < self.kl_threshold / self._kl_factor: self._lr = min(self._lr * self._lr_factor, self.max_lr) # Alias to maintain naming compatibility with Optax schedulers # https://optax.readthedocs.io/en/latest/api.html#schedules kl_adaptive = KLAdaptiveLR
Toni-SM/skrl/skrl/resources/schedulers/jax/__init__.py
from skrl.resources.schedulers.jax.kl_adaptive import KLAdaptiveLR, kl_adaptive KLAdaptiveRL = KLAdaptiveLR # known typo (compatibility with versions prior to 1.0.0)
Toni-SM/skrl/skrl/resources/preprocessors/__init__.py
Toni-SM/skrl/skrl/resources/preprocessors/torch/running_standard_scaler.py
from typing import Optional, Tuple, Union import gym import gymnasium import numpy as np import torch import torch.nn as nn class RunningStandardScaler(nn.Module): def __init__(self, size: Union[int, Tuple[int], gym.Space, gymnasium.Space], epsilon: float = 1e-8, clip_threshold: float = 5.0, device: Optional[Union[str, torch.device]] = None) -> None: """Standardize the input data by removing the mean and scaling by the standard deviation The implementation is adapted from the rl_games library (https://github.com/Denys88/rl_games/blob/master/rl_games/algos_torch/running_mean_std.py) Example:: >>> running_standard_scaler = RunningStandardScaler(size=2) >>> data = torch.rand(3, 2) # tensor of shape (N, 2) >>> running_standard_scaler(data) tensor([[0.1954, 0.3356], [0.9719, 0.4163], [0.8540, 0.1982]]) :param size: Size of the input space :type size: int, tuple or list of integers, gym.Space, or gymnasium.Space :param epsilon: Small number to avoid division by zero (default: ``1e-8``) :type epsilon: float :param clip_threshold: Threshold to clip the data (default: ``5.0``) :type clip_threshold: float :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional """ super().__init__() self.epsilon = epsilon self.clip_threshold = clip_threshold if device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: self.device = torch.device(device) size = self._get_space_size(size) self.register_buffer("running_mean", torch.zeros(size, dtype=torch.float64, device=self.device)) self.register_buffer("running_variance", torch.ones(size, dtype=torch.float64, device=self.device)) self.register_buffer("current_count", torch.ones((), dtype=torch.float64, device=self.device)) def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space]) -> int: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, tuple or list of integers, gym.Space, or gymnasium.Space :raises ValueError: If the space is not supported :return: Size of the space data :rtype: Space size (number of elements) """ if type(space) in [int, float]: return int(space) elif type(space) in [tuple, list]: return np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return 1 elif issubclass(type(space), gym.spaces.Box): return np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): return 1 elif issubclass(type(space), gymnasium.spaces.Box): return np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) raise ValueError(f"Space type {type(space)} not supported") def _parallel_variance(self, input_mean: torch.Tensor, input_var: torch.Tensor, input_count: int) -> None: """Update internal variables using the parallel algorithm for computing variance https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm :param input_mean: Mean of the input data :type input_mean: torch.Tensor :param input_var: Variance of the input data :type input_var: torch.Tensor :param input_count: Batch size of the input data :type input_count: int """ delta = input_mean - self.running_mean total_count = self.current_count + input_count M2 = (self.running_variance * self.current_count) + (input_var * input_count) \ + delta ** 2 * self.current_count * input_count / total_count # update internal variables self.running_mean = self.running_mean + delta * input_count / total_count self.running_variance = M2 / total_count self.current_count = total_count def _compute(self, x: torch.Tensor, train: bool = False, inverse: bool = False) -> torch.Tensor: """Compute the standardization of the input data :param x: Input tensor :type x: torch.Tensor :param train: Whether to train the standardizer (default: ``False``) :type train: bool, optional :param inverse: Whether to inverse the standardizer to scale back the data (default: ``False``) :type inverse: bool, optional :return: Standardized tensor :rtype: torch.Tensor """ if train: if x.dim() == 3: self._parallel_variance(torch.mean(x, dim=(0, 1)), torch.var(x, dim=(0, 1)), x.shape[0] * x.shape[1]) else: self._parallel_variance(torch.mean(x, dim=0), torch.var(x, dim=0), x.shape[0]) # scale back the data to the original representation if inverse: return torch.sqrt(self.running_variance.float()) \ * torch.clamp(x, min=-self.clip_threshold, max=self.clip_threshold) + self.running_mean.float() # standardization by centering and scaling return torch.clamp((x - self.running_mean.float()) / (torch.sqrt(self.running_variance.float()) + self.epsilon), min=-self.clip_threshold, max=self.clip_threshold) def forward(self, x: torch.Tensor, train: bool = False, inverse: bool = False, no_grad: bool = True) -> torch.Tensor: """Forward pass of the standardizer Example:: >>> x = torch.rand(3, 2, device="cuda:0") >>> running_standard_scaler(x) tensor([[0.6933, 0.1905], [0.3806, 0.3162], [0.1140, 0.0272]], device='cuda:0') >>> running_standard_scaler(x, train=True) tensor([[ 0.8681, -0.6731], [ 0.0560, -0.3684], [-0.6360, -1.0690]], device='cuda:0') >>> running_standard_scaler(x, inverse=True) tensor([[0.6260, 0.5468], [0.5056, 0.5987], [0.4029, 0.4795]], device='cuda:0') :param x: Input tensor :type x: torch.Tensor :param train: Whether to train the standardizer (default: ``False``) :type train: bool, optional :param inverse: Whether to inverse the standardizer to scale back the data (default: ``False``) :type inverse: bool, optional :param no_grad: Whether to disable the gradient computation (default: ``True``) :type no_grad: bool, optional :return: Standardized tensor :rtype: torch.Tensor """ if no_grad: with torch.no_grad(): return self._compute(x, train, inverse) return self._compute(x, train, inverse)
Toni-SM/skrl/skrl/resources/preprocessors/torch/__init__.py
from skrl.resources.preprocessors.torch.running_standard_scaler import RunningStandardScaler
Toni-SM/skrl/skrl/resources/preprocessors/jax/running_standard_scaler.py
from typing import Mapping, Optional, Tuple, Union import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _copyto(dst, src): """NumPy function copyto not yet implemented """ return dst.at[:].set(src) @jax.jit def _parallel_variance(running_mean: jax.Array, running_variance: jax.Array, current_count: jax.Array, array: jax.Array) -> Tuple[jax.Array, jax.Array, jax.Array]: # yapf: disable # ddof = 1: https://github.com/pytorch/pytorch/issues/50010 if array.ndim == 3: input_mean = jnp.mean(array, axis=(0, 1)) input_var = jnp.var(array, axis=(0, 1), ddof=1) input_count = array.shape[0] * array.shape[1] else: input_mean = jnp.mean(array, axis=0) input_var = jnp.var(array, axis=0, ddof=1) input_count = array.shape[0] delta = input_mean - running_mean total_count = current_count + input_count M2 = (running_variance * current_count) + (input_var * input_count) \ + delta ** 2 * current_count * input_count / total_count return running_mean + delta * input_count / total_count, M2 / total_count, total_count @jax.jit def _inverse(running_mean: jax.Array, running_variance: jax.Array, clip_threshold: float, array: jax.Array) -> jax.Array: # yapf: disable return jnp.sqrt(running_variance) * jnp.clip(array, -clip_threshold, clip_threshold) + running_mean @jax.jit def _standardization(running_mean: jax.Array, running_variance: jax.Array, clip_threshold: float, epsilon: float, array: jax.Array) -> jax.Array: return jnp.clip((array - running_mean) / (jnp.sqrt(running_variance) + epsilon), -clip_threshold, clip_threshold) class RunningStandardScaler: def __init__(self, size: Union[int, Tuple[int], gym.Space, gymnasium.Space], epsilon: float = 1e-8, clip_threshold: float = 5.0, device: Optional[Union[str, jax.Device]] = None) -> None: """Standardize the input data by removing the mean and scaling by the standard deviation The implementation is adapted from the rl_games library (https://github.com/Denys88/rl_games/blob/master/rl_games/algos_torch/running_mean_std.py) Example:: >>> running_standard_scaler = RunningStandardScaler(size=2) >>> data = jax.random.uniform(jax.random.PRNGKey(0), (3,2)) # tensor of shape (N, 2) >>> running_standard_scaler(data) Array([[0.57450044, 0.09968603], [0.7419659 , 0.8941783 ], [0.59656656, 0.45325184]], dtype=float32) :param size: Size of the input space :type size: int, tuple or list of integers, gym.Space, or gymnasium.Space :param epsilon: Small number to avoid division by zero (default: ``1e-8``) :type epsilon: float :param clip_threshold: Threshold to clip the data (default: ``5.0``) :type clip_threshold: float :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional """ self._jax = config.jax.backend == "jax" self.epsilon = epsilon self.clip_threshold = clip_threshold if device is None: self.device = jax.devices()[0] else: self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0] size = self._get_space_size(size) if self._jax: self.running_mean = jnp.zeros(size, dtype=jnp.float32) self.running_variance = jnp.ones(size, dtype=jnp.float32) self.current_count = jnp.ones((1,), dtype=jnp.float32) else: self.running_mean = np.zeros(size, dtype=np.float32) self.running_variance = np.ones(size, dtype=np.float32) self.current_count = np.ones((1,), dtype=np.float32) @property def state_dict(self) -> Mapping[str, Union[np.ndarray, jax.Array]]: """Dictionary containing references to the whole state of the module """ class _StateDict: def __init__(self, params): self.params = params def replace(self, params): return params return _StateDict({ "running_mean": self.running_mean, "running_variance": self.running_variance, "current_count": self.current_count }) @state_dict.setter def state_dict(self, value: Mapping[str, Union[np.ndarray, jax.Array]]) -> None: if self._jax: self.running_mean = _copyto(self.running_mean, value["running_mean"]) self.running_variance = _copyto(self.running_variance, value["running_variance"]) self.current_count = _copyto(self.current_count, value["current_count"]) else: np.copyto(self.running_mean, value["running_mean"]) np.copyto(self.running_variance, value["running_variance"]) np.copyto(self.current_count, value["current_count"]) def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space]) -> int: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, tuple or list of integers, gym.Space, or gymnasium.Space :raises ValueError: If the space is not supported :return: Size of the space data :rtype: Space size (number of elements) """ if type(space) in [int, float]: return int(space) elif type(space) in [tuple, list]: return np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return 1 elif issubclass(type(space), gym.spaces.Box): return np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): return 1 elif issubclass(type(space), gymnasium.spaces.Box): return np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) raise ValueError(f"Space type {type(space)} not supported") def _parallel_variance(self, input_mean: Union[np.ndarray, jax.Array], input_var: Union[np.ndarray, jax.Array], input_count: int) -> None: """Update internal variables using the parallel algorithm for computing variance https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm :param input_mean: Mean of the input data :type input_mean: np.ndarray or jax.Array :param input_var: Variance of the input data :type input_var: np.ndarray or jax.Array :param input_count: Batch size of the input data :type input_count: int """ delta = input_mean - self.running_mean total_count = self.current_count + input_count M2 = (self.running_variance * self.current_count) + (input_var * input_count) \ + delta ** 2 * self.current_count * input_count / total_count # update internal variables self.running_mean = self.running_mean + delta * input_count / total_count self.running_variance = M2 / total_count self.current_count = total_count def __call__(self, x: Union[np.ndarray, jax.Array], train: bool = False, inverse: bool = False) -> Union[np.ndarray, jax.Array]: """Forward pass of the standardizer Example:: >>> x = jax.random.uniform(jax.random.PRNGKey(0), (3,2)) >>> running_standard_scaler(x) Array([[0.57450044, 0.09968603], [0.7419659 , 0.8941783 ], [0.59656656, 0.45325184]], dtype=float32) >>> running_standard_scaler(x, train=True) Array([[ 0.167439 , -0.4292293 ], [ 0.45878986, 0.8719094 ], [ 0.20582889, 0.14980486]], dtype=float32) >>> running_standard_scaler(x, inverse=True) Array([[0.80847514, 0.4226486 ], [0.9047325 , 0.90777594], [0.8211585 , 0.6385405 ]], dtype=float32) :param x: Input tensor :type x: np.ndarray or jax.Array :param train: Whether to train the standardizer (default: ``False``) :type train: bool, optional :param inverse: Whether to inverse the standardizer to scale back the data (default: ``False``) :type inverse: bool, optional :return: Standardized tensor :rtype: np.ndarray or jax.Array """ if train: if self._jax: self.running_mean, self.running_variance, self.current_count = \ _parallel_variance(self.running_mean, self.running_variance, self.current_count, x) else: # ddof = 1: https://github.com/pytorch/pytorch/issues/50010 if x.ndim == 3: self._parallel_variance(np.mean(x, axis=(0, 1)), np.var(x, axis=(0, 1), ddof=1), x.shape[0] * x.shape[1]) else: self._parallel_variance(np.mean(x, axis=0), np.var(x, axis=0, ddof=1), x.shape[0]) # scale back the data to the original representation if inverse: if self._jax: return _inverse(self.running_mean, self.running_variance, self.clip_threshold, x) return np.sqrt(self.running_variance) * np.clip(x, -self.clip_threshold, self.clip_threshold) + self.running_mean # standardization by centering and scaling if self._jax: return _standardization(self.running_mean, self.running_variance, self.clip_threshold, self.epsilon, x) return np.clip((x - self.running_mean) / (np.sqrt(self.running_variance) + self.epsilon), a_min=-self.clip_threshold, a_max=self.clip_threshold)
Toni-SM/skrl/skrl/resources/preprocessors/jax/__init__.py
from skrl.resources.preprocessors.jax.running_standard_scaler import RunningStandardScaler
Toni-SM/skrl/skrl/resources/noises/__init__.py
Toni-SM/skrl/skrl/resources/noises/torch/base.py
from typing import Optional, Tuple, Union import torch class Noise(): def __init__(self, device: Optional[Union[str, torch.device]] = None) -> None: """Base class representing a noise :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional Custom noises should override the ``sample`` method:: import torch from skrl.resources.noises.torch import Noise class CustomNoise(Noise): def __init__(self, device=None): super().__init__(device) def sample(self, size): return torch.rand(size, device=self.device) """ if device is None: self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") else: self.device = torch.device(device) def sample_like(self, tensor: torch.Tensor) -> torch.Tensor: """Sample a noise with the same size (shape) as the input tensor This method will call the sampling method as follows ``.sample(tensor.shape)`` :param tensor: Input tensor used to determine output tensor size (shape) :type tensor: torch.Tensor :return: Sampled noise :rtype: torch.Tensor Example:: >>> x = torch.rand(3, 2, device="cuda:0") >>> noise.sample_like(x) tensor([[-0.0423, -0.1325], [-0.0639, -0.0957], [-0.1367, 0.1031]], device='cuda:0') """ return self.sample(tensor.shape) def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor: """Noise sampling method to be implemented by the inheriting classes :param size: Shape of the sampled tensor :type size: tuple or list of int, or torch.Size :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Sampled noise :rtype: torch.Tensor """ raise NotImplementedError("The sampling method (.sample()) is not implemented")
Toni-SM/skrl/skrl/resources/noises/torch/ornstein_uhlenbeck.py
from typing import Optional, Tuple, Union import torch from torch.distributions import Normal from skrl.resources.noises.torch import Noise class OrnsteinUhlenbeckNoise(Noise): def __init__(self, theta: float, sigma: float, base_scale: float, mean: float = 0, std: float = 1, device: Optional[Union[str, torch.device]] = None) -> None: """Class representing an Ornstein-Uhlenbeck noise :param theta: Factor to apply to current internal state :type theta: float :param sigma: Factor to apply to the normal distribution :type sigma: float :param base_scale: Factor to apply to returned noise :type base_scale: float :param mean: Mean of the normal distribution (default: ``0.0``) :type mean: float, optional :param std: Standard deviation of the normal distribution (default: ``1.0``) :type std: float, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional Example:: >>> noise = OrnsteinUhlenbeckNoise(theta=0.1, sigma=0.2, base_scale=0.5) """ super().__init__(device) self.state = 0 self.theta = theta self.sigma = sigma self.base_scale = base_scale self.distribution = Normal(loc=torch.tensor(mean, device=self.device, dtype=torch.float32), scale=torch.tensor(std, device=self.device, dtype=torch.float32)) def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor: """Sample an Ornstein-Uhlenbeck noise :param size: Shape of the sampled tensor :type size: tuple or list of int, or torch.Size :return: Sampled noise :rtype: torch.Tensor Example:: >>> noise.sample((3, 2)) tensor([[-0.0452, 0.0162], [ 0.0649, -0.0708], [-0.0211, 0.0066]], device='cuda:0') >>> x = torch.rand(3, 2, device="cuda:0") >>> noise.sample(x.shape) tensor([[-0.0540, 0.0461], [ 0.1117, -0.1157], [-0.0074, 0.0420]], device='cuda:0') """ if hasattr(self.state, "shape") and self.state.shape != torch.Size(size): self.state = 0 self.state += -self.state * self.theta + self.sigma * self.distribution.sample(size) return self.base_scale * self.state
Toni-SM/skrl/skrl/resources/noises/torch/__init__.py
from skrl.resources.noises.torch.base import Noise # isort:skip from skrl.resources.noises.torch.gaussian import GaussianNoise from skrl.resources.noises.torch.ornstein_uhlenbeck import OrnsteinUhlenbeckNoise
Toni-SM/skrl/skrl/resources/noises/torch/gaussian.py
from typing import Optional, Tuple, Union import torch from torch.distributions import Normal from skrl.resources.noises.torch import Noise class GaussianNoise(Noise): def __init__(self, mean: float, std: float, device: Optional[Union[str, torch.device]] = None) -> None: """Class representing a Gaussian noise :param mean: Mean of the normal distribution :type mean: float :param std: Standard deviation of the normal distribution :type std: float :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional Example:: >>> noise = GaussianNoise(mean=0, std=1) """ super().__init__(device) self.distribution = Normal(loc=torch.tensor(mean, device=self.device, dtype=torch.float32), scale=torch.tensor(std, device=self.device, dtype=torch.float32)) def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor: """Sample a Gaussian noise :param size: Shape of the sampled tensor :type size: tuple or list of int, or torch.Size :return: Sampled noise :rtype: torch.Tensor Example:: >>> noise.sample((3, 2)) tensor([[-0.4901, 1.3357], [-1.2141, 0.3323], [-0.0889, -1.1651]], device='cuda:0') >>> x = torch.rand(3, 2, device="cuda:0") >>> noise.sample(x.shape) tensor([[0.5398, 1.2009], [0.0307, 1.3065], [0.2082, 0.6116]], device='cuda:0') """ return self.distribution.sample(size)
Toni-SM/skrl/skrl/resources/noises/jax/base.py
from typing import Optional, Tuple, Union import jax import numpy as np from skrl import config class Noise(): def __init__(self, device: Optional[Union[str, jax.Device]] = None) -> None: """Base class representing a noise :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional Custom noises should override the ``sample`` method:: import jax from skrl.resources.noises.jax import Noise class CustomNoise(Noise): def __init__(self, device=None): super().__init__(device) def sample(self, size): return jax.random.uniform(jax.random.PRNGKey(0), size) """ self._jax = config.jax.backend == "jax" if device is None: self.device = jax.devices()[0] else: self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0] def sample_like(self, tensor: Union[np.ndarray, jax.Array]) -> Union[np.ndarray, jax.Array]: """Sample a noise with the same size (shape) as the input tensor This method will call the sampling method as follows ``.sample(tensor.shape)`` :param tensor: Input tensor used to determine output tensor size (shape) :type tensor: np.ndarray or jax.Array :return: Sampled noise :rtype: np.ndarray or jax.Array Example:: >>> x = jax.random.uniform(jax.random.PRNGKey(0), (3, 2)) >>> noise.sample_like(x) Array([[0.57450044, 0.09968603], [0.7419659 , 0.8941783 ], [0.59656656, 0.45325184]], dtype=float32) """ return self.sample(tensor.shape) def sample(self, size: Tuple[int]) -> Union[np.ndarray, jax.Array]: """Noise sampling method to be implemented by the inheriting classes :param size: Shape of the sampled tensor :type size: tuple or list of int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Sampled noise :rtype: np.ndarray or jax.Array """ raise NotImplementedError("The sampling method (.sample()) is not implemented")
Toni-SM/skrl/skrl/resources/noises/jax/ornstein_uhlenbeck.py
from typing import Optional, Tuple, Union from functools import partial import jax import jax.numpy as jnp import numpy as np from skrl import config from skrl.resources.noises.jax import Noise # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("shape")) def _sample(theta, sigma, state, mean, std, key, iterator, shape): subkey = jax.random.fold_in(key, iterator) return state * theta + sigma * (jax.random.normal(subkey, shape) * std + mean) class OrnsteinUhlenbeckNoise(Noise): def __init__(self, theta: float, sigma: float, base_scale: float, mean: float = 0, std: float = 1, device: Optional[Union[str, jax.Device]] = None) -> None: """Class representing an Ornstein-Uhlenbeck noise :param theta: Factor to apply to current internal state :type theta: float :param sigma: Factor to apply to the normal distribution :type sigma: float :param base_scale: Factor to apply to returned noise :type base_scale: float :param mean: Mean of the normal distribution (default: ``0.0``) :type mean: float, optional :param std: Standard deviation of the normal distribution (default: ``1.0``) :type std: float, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional Example:: >>> noise = OrnsteinUhlenbeckNoise(theta=0.1, sigma=0.2, base_scale=0.5) """ super().__init__(device) self.state = 0 self.theta = theta self.sigma = sigma self.base_scale = base_scale if self._jax: self.mean = jnp.array(mean) self.std = jnp.array(std) self._i = 0 self._key = config.jax.key else: self.mean = np.array(mean) self.std = np.array(std) def sample(self, size: Tuple[int]) -> Union[np.ndarray, jax.Array]: """Sample an Ornstein-Uhlenbeck noise :param size: Shape of the sampled tensor :type size: tuple or list of int :return: Sampled noise :rtype: np.ndarray or jax.Array Example:: >>> noise.sample((3, 2)) Array([[ 0.01878439, -0.12833427], [ 0.06494182, 0.12490594], [ 0.024447 , -0.01174496]], dtype=float32) >>> x = jax.random.uniform(jax.random.PRNGKey(0), (3, 2)) >>> noise.sample(x.shape) Array([[ 0.17988093, -1.2289404 ], [ 0.6218886 , 1.1961104 ], [ 0.23410667, -0.11247082]], dtype=float32) """ if hasattr(self.state, "shape") and self.state.shape != size: self.state = 0 if self._jax: self._i += 1 self.state = _sample(self.theta, self.sigma, self.state, self.mean, self.std, self._key, self._i, size) else: self.state += -self.state * self.theta + self.sigma * np.random.normal(self.mean, self.std, size) return self.base_scale * self.state
Toni-SM/skrl/skrl/resources/noises/jax/__init__.py
from skrl.resources.noises.jax.base import Noise # isort:skip from skrl.resources.noises.jax.gaussian import GaussianNoise from skrl.resources.noises.jax.ornstein_uhlenbeck import OrnsteinUhlenbeckNoise
Toni-SM/skrl/skrl/resources/noises/jax/gaussian.py
from typing import Optional, Tuple, Union from functools import partial import jax import jax.numpy as jnp import numpy as np from skrl import config from skrl.resources.noises.jax import Noise # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("shape")) def _sample(mean, std, key, iterator, shape): subkey = jax.random.fold_in(key, iterator) return jax.random.normal(subkey, shape) * std + mean class GaussianNoise(Noise): def __init__(self, mean: float, std: float, device: Optional[Union[str, jax.Device]] = None) -> None: """Class representing a Gaussian noise :param mean: Mean of the normal distribution :type mean: float :param std: Standard deviation of the normal distribution :type std: float :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional Example:: >>> noise = GaussianNoise(mean=0, std=1) """ super().__init__(device) if self._jax: self._i = 0 self._key = config.jax.key self.mean = jnp.array(mean) self.std = jnp.array(std) else: self.mean = np.array(mean) self.std = np.array(std) def sample(self, size: Tuple[int]) -> Union[np.ndarray, jax.Array]: """Sample a Gaussian noise :param size: Shape of the sampled tensor :type size: tuple or list of int :return: Sampled noise :rtype: np.ndarray or jax.Array Example:: >>> noise.sample((3, 2)) Array([[ 0.01878439, -0.12833427], [ 0.06494182, 0.12490594], [ 0.024447 , -0.01174496]], dtype=float32) >>> x = jax.random.uniform(jax.random.PRNGKey(0), (3, 2)) >>> noise.sample(x.shape) Array([[ 0.17988093, -1.2289404 ], [ 0.6218886 , 1.1961104 ], [ 0.23410667, -0.11247082]], dtype=float32) """ if self._jax: self._i += 1 return _sample(self.mean, self.std, self._key, self._i, size) return np.random.normal(self.mean, self.std, size)
Toni-SM/skrl/skrl/resources/optimizers/__init__.py
Toni-SM/skrl/skrl/resources/optimizers/jax/__init__.py
from skrl.resources.optimizers.jax.adam import Adam
Toni-SM/skrl/skrl/resources/optimizers/jax/adam.py
from typing import Optional import functools import flax import jax import optax from skrl.models.jax import Model # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @functools.partial(jax.jit, static_argnames=("transformation")) def _step(transformation, grad, state, state_dict): # optax transform params, optimizer_state = transformation.update(grad, state, state_dict.params) # apply transformation params = optax.apply_updates(state_dict.params, params) return optimizer_state, state_dict.replace(params=params) @functools.partial(jax.jit, static_argnames=("transformation")) def _step_with_scale(transformation, grad, state, state_dict, scale): # optax transform params, optimizer_state = transformation.update(grad, state, state_dict.params) # custom scale # https://optax.readthedocs.io/en/latest/api.html?#optax.scale params = jax.tree_util.tree_map(lambda params: scale * params, params) # apply transformation params = optax.apply_updates(state_dict.params, params) return optimizer_state, state_dict.replace(params=params) class Adam: def __new__(cls, model: Model, lr: float = 1e-3, grad_norm_clip: float = 0, scale: bool = True) -> "Optimizer": """Adam optimizer Adapted from `Optax's Adam <https://optax.readthedocs.io/en/latest/api.html?#adam>`_ to support custom scale (learning rate) :param model: Model :type model: skrl.models.jax.Model :param lr: Learning rate (default: ``1e-3``) :type lr: float, optional :param grad_norm_clip: Clipping coefficient for the norm of the gradients (default: ``0``). Disabled if less than or equal to zero :type grad_norm_clip: float, optional :param scale: Whether to instantiate the optimizer as-is or remove the scaling step (default: ``True``). Remove the scaling step if a custom learning rate is to be applied during optimization steps :type scale: bool, optional :return: Adam optimizer :rtype: flax.struct.PyTreeNode Example:: >>> optimizer = Adam(model=policy, lr=5e-4) >>> # step the optimizer given a computed gradiend (grad) >>> optimizer = optimizer.step(grad, policy) # apply custom learning rate during optimization steps >>> optimizer = Adam(model=policy, lr=5e-4, scale=False) >>> # step the optimizer given a computed gradiend and an updated learning rate (lr) >>> optimizer = optimizer.step(grad, policy, lr) """ class Optimizer(flax.struct.PyTreeNode): """Optimizer This class is the result of isolating the Optax optimizer, which is mixed with the model parameters, from Flax's TrainState class https://flax.readthedocs.io/en/latest/api_reference/flax.training.html#train-state """ transformation: optax.GradientTransformation = flax.struct.field(pytree_node=False) state: optax.OptState = flax.struct.field(pytree_node=True) @classmethod def _create(cls, *, transformation, state, **kwargs): return cls(transformation=transformation, state=state, **kwargs) def step(self, grad: jax.Array, model: Model, lr: Optional[float] = None) -> "Optimizer": """Performs a single optimization step :param grad: Gradients :type grad: jax.Array :param model: Model :type model: skrl.models.jax.Model :param lr: Learning rate. If given, a scale optimization step will be performed :type lr: float, optional :return: Optimizer :rtype: flax.struct.PyTreeNode """ if lr is None: optimizer_state, model.state_dict = _step(self.transformation, grad, self.state, model.state_dict) else: optimizer_state, model.state_dict = _step_with_scale(self.transformation, grad, self.state, model.state_dict, -lr) return self.replace(state=optimizer_state) # default optax transformation if scale: transformation = optax.adam(learning_rate=lr) # optax transformation without scaling step else: transformation = optax.scale_by_adam() # clip updates using their global norm if grad_norm_clip > 0: transformation = optax.chain(optax.clip_by_global_norm(grad_norm_clip), transformation) return Optimizer._create(transformation=transformation, state=transformation.init(model.state_dict.params))
Toni-SM/skrl/skrl/trainers/__init__.py
Toni-SM/skrl/skrl/trainers/torch/base.py
from typing import List, Optional, Union import atexit import sys import tqdm import torch from skrl import logger from skrl.agents.torch import Agent from skrl.envs.wrappers.torch import Wrapper def generate_equally_spaced_scopes(num_envs: int, num_simultaneous_agents: int) -> List[int]: """Generate a list of equally spaced scopes for the agents :param num_envs: Number of environments :type num_envs: int :param num_simultaneous_agents: Number of simultaneous agents :type num_simultaneous_agents: int :raises ValueError: If the number of simultaneous agents is greater than the number of environments :return: List of equally spaced scopes :rtype: List[int] """ scopes = [int(num_envs / num_simultaneous_agents)] * num_simultaneous_agents if sum(scopes): scopes[-1] += num_envs - sum(scopes) else: raise ValueError(f"The number of simultaneous agents ({num_simultaneous_agents}) is greater than the number of environments ({num_envs})") return scopes class Trainer: def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Base class for trainers :param env: Environment to train on :type env: skrl.envs.wrappers.torch.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``) :type cfg: dict, optional """ self.cfg = cfg if cfg is not None else {} self.env = env self.agents = agents self.agents_scope = agents_scope if agents_scope is not None else [] # get configuration self.timesteps = self.cfg.get("timesteps", 0) self.headless = self.cfg.get("headless", False) self.disable_progressbar = self.cfg.get("disable_progressbar", False) self.close_environment_at_exit = self.cfg.get("close_environment_at_exit", True) self.initial_timestep = 0 # setup agents self.num_simultaneous_agents = 0 self._setup_agents() # register environment closing if configured if self.close_environment_at_exit: @atexit.register def close_env(): logger.info("Closing environment") self.env.close() logger.info("Environment closed") def __str__(self) -> str: """Generate a string representation of the trainer :return: Representation of the trainer as string :rtype: str """ string = f"Trainer: {self}" string += f"\n |-- Number of parallelizable environments: {self.env.num_envs}" string += f"\n |-- Number of simultaneous agents: {self.num_simultaneous_agents}" string += "\n |-- Agents and scopes:" if self.num_simultaneous_agents > 1: for agent, scope in zip(self.agents, self.agents_scope): string += f"\n | |-- agent: {type(agent)}" string += f"\n | | |-- scope: {scope[1] - scope[0]} environments ({scope[0]}:{scope[1]})" else: string += f"\n | |-- agent: {type(self.agents)}" string += f"\n | | |-- scope: {self.env.num_envs} environment(s)" return string def _setup_agents(self) -> None: """Setup agents for training :raises ValueError: Invalid setup """ # validate agents and their scopes if type(self.agents) in [tuple, list]: # single agent if len(self.agents) == 1: self.num_simultaneous_agents = 1 self.agents = self.agents[0] self.agents_scope = [1] # parallel agents elif len(self.agents) > 1: self.num_simultaneous_agents = len(self.agents) # check scopes if not len(self.agents_scope): logger.warning("The agents' scopes are empty, they will be generated as equal as possible") self.agents_scope = [int(self.env.num_envs / len(self.agents))] * len(self.agents) if sum(self.agents_scope): self.agents_scope[-1] += self.env.num_envs - sum(self.agents_scope) else: raise ValueError(f"The number of agents ({len(self.agents)}) is greater than the number of parallelizable environments ({self.env.num_envs})") elif len(self.agents_scope) != len(self.agents): raise ValueError(f"The number of agents ({len(self.agents)}) doesn't match the number of scopes ({len(self.agents_scope)})") elif sum(self.agents_scope) != self.env.num_envs: raise ValueError(f"The scopes ({sum(self.agents_scope)}) don't cover the number of parallelizable environments ({self.env.num_envs})") # generate agents' scopes index = 0 for i in range(len(self.agents_scope)): index += self.agents_scope[i] self.agents_scope[i] = (index - self.agents_scope[i], index) else: raise ValueError("A list of agents is expected") else: self.num_simultaneous_agents = 1 def train(self) -> None: """Train the agents :raises NotImplementedError: Not implemented """ raise NotImplementedError def eval(self) -> None: """Evaluate the agents :raises NotImplementedError: Not implemented """ raise NotImplementedError def single_agent_train(self) -> None: """Train agent This method executes the following steps in loop: - Pre-interaction - Compute actions - Interact with the environments - Render scene - Record transitions - Post-interaction - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents == 1, "This method is not allowed for multi-agents" # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with torch.no_grad(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # record the environments' transitions self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if self.env.num_envs > 1: states = next_states else: if terminated.any() or truncated.any(): with torch.no_grad(): states, infos = self.env.reset() else: states = next_states def single_agent_eval(self) -> None: """Evaluate agent This method executes the following steps in loop: - Compute actions (sequentially) - Interact with the environments - Render scene - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents == 1, "This method is not allowed for multi-agents" # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with torch.no_grad(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # write data to TensorBoard self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if self.env.num_envs > 1: states = next_states else: if terminated.any() or truncated.any(): with torch.no_grad(): states, infos = self.env.reset() else: states = next_states def multi_agent_train(self) -> None: """Train multi-agents This method executes the following steps in loop: - Pre-interaction - Compute actions - Interact with the environments - Render scene - Record transitions - Post-interaction - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents > 1, "This method is not allowed for single-agent" # reset env states, infos = self.env.reset() shared_states = infos.get("shared_states", None) for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with torch.no_grad(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) shared_next_states = infos.get("shared_states", None) infos["shared_states"] = shared_states infos["shared_next_states"] = shared_next_states # render scene if not self.headless: self.env.render() # record the environments' transitions self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments with torch.no_grad(): if not self.env.agents: states, infos = self.env.reset() shared_states = infos.get("shared_states", None) else: states = next_states shared_states = shared_next_states def multi_agent_eval(self) -> None: """Evaluate multi-agents This method executes the following steps in loop: - Compute actions (sequentially) - Interact with the environments - Render scene - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents > 1, "This method is not allowed for single-agent" # reset env states, infos = self.env.reset() shared_states = infos.get("shared_states", None) for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with torch.no_grad(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) shared_next_states = infos.get("shared_states", None) infos["shared_states"] = shared_states infos["shared_next_states"] = shared_next_states # render scene if not self.headless: self.env.render() # write data to TensorBoard self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if not self.env.agents: states, infos = self.env.reset() shared_states = infos.get("shared_states", None) else: states = next_states shared_states = shared_next_states
Toni-SM/skrl/skrl/trainers/torch/__init__.py
from skrl.trainers.torch.base import Trainer, generate_equally_spaced_scopes # isort:skip from skrl.trainers.torch.parallel import ParallelTrainer from skrl.trainers.torch.sequential import SequentialTrainer from skrl.trainers.torch.step import StepTrainer
Toni-SM/skrl/skrl/trainers/torch/step.py
from typing import Any, List, Optional, Tuple, Union import copy import sys import tqdm import torch from skrl.agents.torch import Agent from skrl.envs.wrappers.torch import Wrapper from skrl.trainers.torch import Trainer # [start-config-dict-torch] STEP_TRAINER_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } # [end-config-dict-torch] class StepTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Step-by-step trainer Train agents by controlling the training/evaluation loop step by step :param env: Environment to train on :type env: skrl.envs.wrappers.torch.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``). See STEP_TRAINER_DEFAULT_CONFIG for default values :type cfg: dict, optional """ _cfg = copy.deepcopy(STEP_TRAINER_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # init agents if self.num_simultaneous_agents > 1: for agent in self.agents: agent.init(trainer_cfg=self.cfg) else: self.agents.init(trainer_cfg=self.cfg) self._timestep = 0 self._progress = None self.states = None def train(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \ Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Execute a training iteration This method executes the following steps once: - Pre-interaction (sequentially if num_simultaneous_agents > 1) - Compute actions (sequentially if num_simultaneous_agents > 1) - Interact with the environments - Render scene - Record transitions (sequentially if num_simultaneous_agents > 1) - Post-interaction (sequentially if num_simultaneous_agents > 1) - Reset environments :param timestep: Current timestep (default: ``None``). If None, the current timestep will be carried by an internal variable :type timestep: int, optional :param timesteps: Total number of timesteps (default: ``None``). If None, the total number of timesteps is obtained from the trainer's config :type timesteps: int, optional :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ if timestep is None: self._timestep += 1 timestep = self._timestep timesteps = self.timesteps if timesteps is None else timesteps if self._progress is None: self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout) self._progress.update(n=1) # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("train") else: self.agents.set_running_mode("train") # reset env if self.states is None: self.states, infos = self.env.reset() if self.num_simultaneous_agents == 1: # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=timesteps) # compute actions with torch.no_grad(): actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0] else: # pre-interaction for agent in self.agents: agent.pre_interaction(timestep=timestep, timesteps=timesteps) # compute actions with torch.no_grad(): actions = torch.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) with torch.no_grad(): # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() if self.num_simultaneous_agents == 1: # record the environments' transitions with torch.no_grad(): self.agents.record_transition(states=self.states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=timesteps) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=timesteps) else: # record the environments' transitions with torch.no_grad(): for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=self.states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=timesteps) # post-interaction for agent in self.agents: agent.post_interaction(timestep=timestep, timesteps=timesteps) # reset environments with torch.no_grad(): if terminated.any() or truncated.any(): self.states, infos = self.env.reset() else: self.states = next_states return next_states, rewards, terminated, truncated, infos def eval(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \ Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, Any]: """Evaluate the agents sequentially This method executes the following steps in loop: - Compute actions (sequentially if num_simultaneous_agents > 1) - Interact with the environments - Render scene - Reset environments :param timestep: Current timestep (default: ``None``). If None, the current timestep will be carried by an internal variable :type timestep: int, optional :param timesteps: Total number of timesteps (default: ``None``). If None, the total number of timesteps is obtained from the trainer's config :type timesteps: int, optional :return: Observation, reward, terminated, truncated, info :rtype: tuple of torch.Tensor and any other info """ if timestep is None: self._timestep += 1 timestep = self._timestep timesteps = self.timesteps if timesteps is None else timesteps if self._progress is None: self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout) self._progress.update(n=1) # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("eval") else: self.agents.set_running_mode("eval") # reset env if self.states is None: self.states, infos = self.env.reset() with torch.no_grad(): if self.num_simultaneous_agents == 1: # compute actions actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0] else: # compute actions actions = torch.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() if self.num_simultaneous_agents == 1: # write data to TensorBoard self.agents.record_transition(states=self.states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=timesteps) super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=timesteps) else: # write data to TensorBoard for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=self.states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=timesteps) super(type(agent), agent).post_interaction(timestep=timestep, timesteps=timesteps) # reset environments if terminated.any() or truncated.any(): self.states, infos = self.env.reset() else: self.states = next_states return next_states, rewards, terminated, truncated, infos
Toni-SM/skrl/skrl/trainers/torch/parallel.py
from typing import List, Optional, Union import copy import sys import tqdm import torch import torch.multiprocessing as mp from skrl.agents.torch import Agent from skrl.envs.wrappers.torch import Wrapper from skrl.trainers.torch import Trainer # [start-config-dict-torch] PARALLEL_TRAINER_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } # [end-config-dict-torch] def fn_processor(process_index, *args): print(f"[INFO] Processor {process_index}: started") pipe = args[0][process_index] queue = args[1][process_index] barrier = args[2] scope = args[3][process_index] trainer_cfg = args[4] agent = None _states = None _actions = None # wait for the main process to start all the workers barrier.wait() while True: msg = pipe.recv() task = msg['task'] # terminate process if task == 'terminate': break # initialize agent elif task == 'init': agent = queue.get() agent.init(trainer_cfg=trainer_cfg) print(f"[INFO] Processor {process_index}: init agent {type(agent).__name__} with scope {scope}") barrier.wait() # execute agent's pre-interaction step elif task == "pre_interaction": agent.pre_interaction(timestep=msg['timestep'], timesteps=msg['timesteps']) barrier.wait() # get agent's actions elif task == "act": _states = queue.get()[scope[0]:scope[1]] with torch.no_grad(): _actions = agent.act(_states, timestep=msg['timestep'], timesteps=msg['timesteps'])[0] if not _actions.is_cuda: _actions.share_memory_() queue.put(_actions) barrier.wait() # record agent's experience elif task == "record_transition": with torch.no_grad(): agent.record_transition(states=_states, actions=_actions, rewards=queue.get()[scope[0]:scope[1]], next_states=queue.get()[scope[0]:scope[1]], terminated=queue.get()[scope[0]:scope[1]], truncated=queue.get()[scope[0]:scope[1]], infos=queue.get(), timestep=msg['timestep'], timesteps=msg['timesteps']) barrier.wait() # execute agent's post-interaction step elif task == "post_interaction": agent.post_interaction(timestep=msg['timestep'], timesteps=msg['timesteps']) barrier.wait() # write data to TensorBoard (evaluation) elif task == "eval-record_transition-post_interaction": with torch.no_grad(): agent.record_transition(states=_states, actions=_actions, rewards=queue.get()[scope[0]:scope[1]], next_states=queue.get()[scope[0]:scope[1]], terminated=queue.get()[scope[0]:scope[1]], truncated=queue.get()[scope[0]:scope[1]], infos=queue.get(), timestep=msg['timestep'], timesteps=msg['timesteps']) super(type(agent), agent).post_interaction(timestep=msg['timestep'], timesteps=msg['timesteps']) barrier.wait() class ParallelTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Parallel trainer Train agents in parallel using multiple processes :param env: Environment to train on :type env: skrl.envs.wrappers.torch.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``). See PARALLEL_TRAINER_DEFAULT_CONFIG for default values :type cfg: dict, optional """ _cfg = copy.deepcopy(PARALLEL_TRAINER_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) mp.set_start_method(method='spawn', force=True) def train(self) -> None: """Train the agents in parallel This method executes the following steps in loop: - Pre-interaction (parallel) - Compute actions (in parallel) - Interact with the environments - Render scene - Record transitions (in parallel) - Post-interaction (in parallel) - Reset environments """ # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("train") else: self.agents.set_running_mode("train") # non-simultaneous agents if self.num_simultaneous_agents == 1: self.agents.init(trainer_cfg=self.cfg) # single-agent if self.env.num_agents == 1: self.single_agent_train() # multi-agent else: self.multi_agent_train() return # initialize multiprocessing variables queues = [] producer_pipes = [] consumer_pipes = [] barrier = mp.Barrier(self.num_simultaneous_agents + 1) processes = [] for i in range(self.num_simultaneous_agents): pipe_read, pipe_write = mp.Pipe(duplex=False) producer_pipes.append(pipe_write) consumer_pipes.append(pipe_read) queues.append(mp.Queue()) # move tensors to shared memory for agent in self.agents: if agent.memory is not None: agent.memory.share_memory() for model in agent.models.values(): try: model.share_memory() except RuntimeError: pass # spawn and wait for all processes to start for i in range(self.num_simultaneous_agents): process = mp.Process(target=fn_processor, args=(i, consumer_pipes, queues, barrier, self.agents_scope, self.cfg), daemon=True) processes.append(process) process.start() barrier.wait() # initialize agents for pipe, queue, agent in zip(producer_pipes, queues, self.agents): pipe.send({'task': 'init'}) queue.put(agent) barrier.wait() # reset env states, infos = self.env.reset() if not states.is_cuda: states.share_memory_() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction for pipe in producer_pipes: pipe.send({"task": "pre_interaction", "timestep": timestep, "timesteps": self.timesteps}) barrier.wait() # compute actions with torch.no_grad(): for pipe, queue in zip(producer_pipes, queues): pipe.send({"task": "act", "timestep": timestep, "timesteps": self.timesteps}) queue.put(states) barrier.wait() actions = torch.vstack([queue.get() for queue in queues]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # record the environments' transitions if not rewards.is_cuda: rewards.share_memory_() if not next_states.is_cuda: next_states.share_memory_() if not terminated.is_cuda: terminated.share_memory_() if not truncated.is_cuda: truncated.share_memory_() for pipe, queue in zip(producer_pipes, queues): pipe.send({"task": "record_transition", "timestep": timestep, "timesteps": self.timesteps}) queue.put(rewards) queue.put(next_states) queue.put(terminated) queue.put(truncated) queue.put(infos) barrier.wait() # post-interaction for pipe in producer_pipes: pipe.send({"task": "post_interaction", "timestep": timestep, "timesteps": self.timesteps}) barrier.wait() # reset environments with torch.no_grad(): if terminated.any() or truncated.any(): states, infos = self.env.reset() if not states.is_cuda: states.share_memory_() else: states.copy_(next_states) # terminate processes for pipe in producer_pipes: pipe.send({"task": "terminate"}) # join processes for process in processes: process.join() def eval(self) -> None: """Evaluate the agents sequentially This method executes the following steps in loop: - Compute actions (in parallel) - Interact with the environments - Render scene - Reset environments """ # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("eval") else: self.agents.set_running_mode("eval") # non-simultaneous agents if self.num_simultaneous_agents == 1: self.agents.init(trainer_cfg=self.cfg) # single-agent if self.env.num_agents == 1: self.single_agent_eval() # multi-agent else: self.multi_agent_eval() return # initialize multiprocessing variables queues = [] producer_pipes = [] consumer_pipes = [] barrier = mp.Barrier(self.num_simultaneous_agents + 1) processes = [] for i in range(self.num_simultaneous_agents): pipe_read, pipe_write = mp.Pipe(duplex=False) producer_pipes.append(pipe_write) consumer_pipes.append(pipe_read) queues.append(mp.Queue()) # move tensors to shared memory for agent in self.agents: if agent.memory is not None: agent.memory.share_memory() for model in agent.models.values(): if model is not None: try: model.share_memory() except RuntimeError: pass # spawn and wait for all processes to start for i in range(self.num_simultaneous_agents): process = mp.Process(target=fn_processor, args=(i, consumer_pipes, queues, barrier, self.agents_scope, self.cfg), daemon=True) processes.append(process) process.start() barrier.wait() # initialize agents for pipe, queue, agent in zip(producer_pipes, queues, self.agents): pipe.send({'task': 'init'}) queue.put(agent) barrier.wait() # reset env states, infos = self.env.reset() if not states.is_cuda: states.share_memory_() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with torch.no_grad(): for pipe, queue in zip(producer_pipes, queues): pipe.send({"task": "act", "timestep": timestep, "timesteps": self.timesteps}) queue.put(states) barrier.wait() actions = torch.vstack([queue.get() for queue in queues]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # write data to TensorBoard if not rewards.is_cuda: rewards.share_memory_() if not next_states.is_cuda: next_states.share_memory_() if not terminated.is_cuda: terminated.share_memory_() if not truncated.is_cuda: truncated.share_memory_() for pipe, queue in zip(producer_pipes, queues): pipe.send({"task": "eval-record_transition-post_interaction", "timestep": timestep, "timesteps": self.timesteps}) queue.put(rewards) queue.put(next_states) queue.put(terminated) queue.put(truncated) queue.put(infos) barrier.wait() # reset environments if terminated.any() or truncated.any(): states, infos = self.env.reset() if not states.is_cuda: states.share_memory_() else: states.copy_(next_states) # terminate processes for pipe in producer_pipes: pipe.send({"task": "terminate"}) # join processes for process in processes: process.join()
Toni-SM/skrl/skrl/trainers/torch/sequential.py
from typing import List, Optional, Union import copy import sys import tqdm import torch from skrl.agents.torch import Agent from skrl.envs.wrappers.torch import Wrapper from skrl.trainers.torch import Trainer # [start-config-dict-torch] SEQUENTIAL_TRAINER_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } # [end-config-dict-torch] class SequentialTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Sequential trainer Train agents sequentially (i.e., one after the other in each interaction with the environment) :param env: Environment to train on :type env: skrl.envs.wrappers.torch.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``). See SEQUENTIAL_TRAINER_DEFAULT_CONFIG for default values :type cfg: dict, optional """ _cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # init agents if self.num_simultaneous_agents > 1: for agent in self.agents: agent.init(trainer_cfg=self.cfg) else: self.agents.init(trainer_cfg=self.cfg) def train(self) -> None: """Train the agents sequentially This method executes the following steps in loop: - Pre-interaction (sequentially) - Compute actions (sequentially) - Interact with the environments - Render scene - Record transitions (sequentially) - Post-interaction (sequentially) - Reset environments """ # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("train") else: self.agents.set_running_mode("train") # non-simultaneous agents if self.num_simultaneous_agents == 1: # single-agent if self.env.num_agents == 1: self.single_agent_train() # multi-agent else: self.multi_agent_train() return # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction for agent in self.agents: agent.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with torch.no_grad(): actions = torch.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # record the environments' transitions for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=self.timesteps) # post-interaction for agent in self.agents: agent.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments with torch.no_grad(): if terminated.any() or truncated.any(): states, infos = self.env.reset() else: states = next_states def eval(self) -> None: """Evaluate the agents sequentially This method executes the following steps in loop: - Compute actions (sequentially) - Interact with the environments - Render scene - Reset environments """ # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("eval") else: self.agents.set_running_mode("eval") # non-simultaneous agents if self.num_simultaneous_agents == 1: # single-agent if self.env.num_agents == 1: self.single_agent_eval() # multi-agent else: self.multi_agent_eval() return # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with torch.no_grad(): actions = torch.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # write data to TensorBoard for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=self.timesteps) super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if terminated.any() or truncated.any(): states, infos = self.env.reset() else: states = next_states
Toni-SM/skrl/skrl/trainers/jax/base.py
from typing import List, Optional, Union import atexit import contextlib import sys import tqdm from skrl import logger from skrl.agents.jax import Agent from skrl.envs.wrappers.jax import Wrapper def generate_equally_spaced_scopes(num_envs: int, num_simultaneous_agents: int) -> List[int]: """Generate a list of equally spaced scopes for the agents :param num_envs: Number of environments :type num_envs: int :param num_simultaneous_agents: Number of simultaneous agents :type num_simultaneous_agents: int :raises ValueError: If the number of simultaneous agents is greater than the number of environments :return: List of equally spaced scopes :rtype: List[int] """ scopes = [int(num_envs / num_simultaneous_agents)] * num_simultaneous_agents if sum(scopes): scopes[-1] += num_envs - sum(scopes) else: raise ValueError(f"The number of simultaneous agents ({num_simultaneous_agents}) is greater than the number of environments ({num_envs})") return scopes class Trainer: def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Base class for trainers :param env: Environment to train on :type env: skrl.envs.wrappers.jax.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``) :type cfg: dict, optional """ self.cfg = cfg if cfg is not None else {} self.env = env self.agents = agents self.agents_scope = agents_scope if agents_scope is not None else [] # get configuration self.timesteps = self.cfg.get("timesteps", 0) self.headless = self.cfg.get("headless", False) self.disable_progressbar = self.cfg.get("disable_progressbar", False) self.close_environment_at_exit = self.cfg.get("close_environment_at_exit", True) self.initial_timestep = 0 # setup agents self.num_simultaneous_agents = 0 self._setup_agents() # register environment closing if configured if self.close_environment_at_exit: @atexit.register def close_env(): logger.info("Closing environment") self.env.close() logger.info("Environment closed") def __str__(self) -> str: """Generate a string representation of the trainer :return: Representation of the trainer as string :rtype: str """ string = f"Trainer: {self}" string += f"\n |-- Number of parallelizable environments: {self.env.num_envs}" string += f"\n |-- Number of simultaneous agents: {self.num_simultaneous_agents}" string += "\n |-- Agents and scopes:" if self.num_simultaneous_agents > 1: for agent, scope in zip(self.agents, self.agents_scope): string += f"\n | |-- agent: {type(agent)}" string += f"\n | | |-- scope: {scope[1] - scope[0]} environments ({scope[0]}:{scope[1]})" else: string += f"\n | |-- agent: {type(self.agents)}" string += f"\n | | |-- scope: {self.env.num_envs} environment(s)" return string def _setup_agents(self) -> None: """Setup agents for training :raises ValueError: Invalid setup """ # validate agents and their scopes if type(self.agents) in [tuple, list]: # single agent if len(self.agents) == 1: self.num_simultaneous_agents = 1 self.agents = self.agents[0] self.agents_scope = [1] # parallel agents elif len(self.agents) > 1: self.num_simultaneous_agents = len(self.agents) # check scopes if not len(self.agents_scope): logger.warning("The agents' scopes are empty, they will be generated as equal as possible") self.agents_scope = [int(self.env.num_envs / len(self.agents))] * len(self.agents) if sum(self.agents_scope): self.agents_scope[-1] += self.env.num_envs - sum(self.agents_scope) else: raise ValueError(f"The number of agents ({len(self.agents)}) is greater than the number of parallelizable environments ({self.env.num_envs})") elif len(self.agents_scope) != len(self.agents): raise ValueError(f"The number of agents ({len(self.agents)}) doesn't match the number of scopes ({len(self.agents_scope)})") elif sum(self.agents_scope) != self.env.num_envs: raise ValueError(f"The scopes ({sum(self.agents_scope)}) don't cover the number of parallelizable environments ({self.env.num_envs})") # generate agents' scopes index = 0 for i in range(len(self.agents_scope)): index += self.agents_scope[i] self.agents_scope[i] = (index - self.agents_scope[i], index) else: raise ValueError("A list of agents is expected") else: self.num_simultaneous_agents = 1 def train(self) -> None: """Train the agents :raises NotImplementedError: Not implemented """ raise NotImplementedError def eval(self) -> None: """Evaluate the agents :raises NotImplementedError: Not implemented """ raise NotImplementedError def single_agent_train(self) -> None: """Train agent This method executes the following steps in loop: - Pre-interaction - Compute actions - Interact with the environments - Render scene - Record transitions - Post-interaction - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents == 1, "This method is not allowed for multi-agents" # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with contextlib.nullcontext(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # record the environments' transitions with contextlib.nullcontext(): self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if self.env.num_envs > 1: states = next_states else: if terminated.any() or truncated.any(): with contextlib.nullcontext(): states, infos = self.env.reset() else: states = next_states def single_agent_eval(self) -> None: """Evaluate agent This method executes the following steps in loop: - Compute actions (sequentially) - Interact with the environments - Render scene - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents == 1, "This method is not allowed for multi-agents" # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with contextlib.nullcontext(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() with contextlib.nullcontext(): # write data to TensorBoard self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if self.env.num_envs > 1: states = next_states else: if terminated.any() or truncated.any(): with contextlib.nullcontext(): states, infos = self.env.reset() else: states = next_states def multi_agent_train(self) -> None: """Train multi-agents This method executes the following steps in loop: - Pre-interaction - Compute actions - Interact with the environments - Render scene - Record transitions - Post-interaction - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents > 1, "This method is not allowed for single-agent" # reset env states, infos = self.env.reset() shared_states = infos.get("shared_states", None) for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with contextlib.nullcontext(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) shared_next_states = infos.get("shared_states", None) infos["shared_states"] = shared_states infos["shared_next_states"] = shared_next_states # render scene if not self.headless: self.env.render() # record the environments' transitions with contextlib.nullcontext(): self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments with contextlib.nullcontext(): if not self.env.agents: states, infos = self.env.reset() shared_states = infos.get("shared_states", None) else: states = next_states shared_states = shared_next_states def multi_agent_eval(self) -> None: """Evaluate multi-agents This method executes the following steps in loop: - Compute actions (sequentially) - Interact with the environments - Render scene - Reset environments """ assert self.num_simultaneous_agents == 1, "This method is not allowed for simultaneous agents" assert self.env.num_agents > 1, "This method is not allowed for single-agent" # reset env states, infos = self.env.reset() shared_states = infos.get("shared_states", None) for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with contextlib.nullcontext(): actions = self.agents.act(states, timestep=timestep, timesteps=self.timesteps)[0] # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) shared_next_states = infos.get("shared_states", None) infos["shared_states"] = shared_states infos["shared_next_states"] = shared_next_states # render scene if not self.headless: self.env.render() with contextlib.nullcontext(): # write data to TensorBoard self.agents.record_transition(states=states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=self.timesteps) super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if not self.env.agents: states, infos = self.env.reset() shared_states = infos.get("shared_states", None) else: states = next_states shared_states = shared_next_states
Toni-SM/skrl/skrl/trainers/jax/__init__.py
from skrl.trainers.jax.base import Trainer, generate_equally_spaced_scopes # isort:skip from skrl.trainers.jax.sequential import SequentialTrainer from skrl.trainers.jax.step import StepTrainer
Toni-SM/skrl/skrl/trainers/jax/step.py
from typing import Any, List, Optional, Tuple, Union import contextlib import copy import sys import tqdm import jax import jax.numpy as jnp import numpy as np from skrl.agents.jax import Agent from skrl.envs.wrappers.jax import Wrapper from skrl.trainers.jax import Trainer # [start-config-dict-jax] STEP_TRAINER_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } # [end-config-dict-jax] class StepTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Step-by-step trainer Train agents by controlling the training/evaluation loop step by step :param env: Environment to train on :type env: skrl.envs.wrappers.jax.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``). See STEP_TRAINER_DEFAULT_CONFIG for default values :type cfg: dict, optional """ _cfg = copy.deepcopy(STEP_TRAINER_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # init agents if self.num_simultaneous_agents > 1: for agent in self.agents: agent.init(trainer_cfg=self.cfg) else: self.agents.init(trainer_cfg=self.cfg) self._timestep = 0 self._progress = None self.states = None def train(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Execute a training iteration This method executes the following steps once: - Pre-interaction (sequentially if num_simultaneous_agents > 1) - Compute actions (sequentially if num_simultaneous_agents > 1) - Interact with the environments - Render scene - Record transitions (sequentially if num_simultaneous_agents > 1) - Post-interaction (sequentially if num_simultaneous_agents > 1) - Reset environments :param timestep: Current timestep (default: ``None``). If None, the current timestep will be carried by an internal variable :type timestep: int, optional :param timesteps: Total number of timesteps (default: ``None``). If None, the total number of timesteps is obtained from the trainer's config :type timesteps: int, optional :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ if timestep is None: self._timestep += 1 timestep = self._timestep timesteps = self.timesteps if timesteps is None else timesteps if self._progress is None: self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout) self._progress.update(n=1) # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("train") else: self.agents.set_running_mode("train") # reset env if self.states is None: self.states, infos = self.env.reset() if self.num_simultaneous_agents == 1: # pre-interaction self.agents.pre_interaction(timestep=timestep, timesteps=timesteps) # compute actions with contextlib.nullcontext(): actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0] else: # pre-interaction for agent in self.agents: agent.pre_interaction(timestep=timestep, timesteps=timesteps) # compute actions with contextlib.nullcontext(): actions = jnp.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() if self.num_simultaneous_agents == 1: # record the environments' transitions with contextlib.nullcontext(): self.agents.record_transition(states=self.states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=timesteps) # post-interaction self.agents.post_interaction(timestep=timestep, timesteps=timesteps) else: # record the environments' transitions with contextlib.nullcontext(): for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=self.states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=timesteps) # post-interaction for agent in self.agents: agent.post_interaction(timestep=timestep, timesteps=timesteps) # reset environments with contextlib.nullcontext(): if terminated.any() or truncated.any(): self.states, infos = self.env.reset() else: self.states = next_states return next_states, rewards, terminated, truncated, infos def eval(self, timestep: Optional[int] = None, timesteps: Optional[int] = None) -> \ Tuple[Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Union[np.ndarray, jax.Array], Any]: """Evaluate the agents sequentially This method executes the following steps in loop: - Compute actions (sequentially if num_simultaneous_agents > 1) - Interact with the environments - Render scene - Reset environments :param timestep: Current timestep (default: ``None``). If None, the current timestep will be carried by an internal variable :type timestep: int, optional :param timesteps: Total number of timesteps (default: ``None``). If None, the total number of timesteps is obtained from the trainer's config :type timesteps: int, optional :return: Observation, reward, terminated, truncated, info :rtype: tuple of np.ndarray or jax.Array and any other info """ if timestep is None: self._timestep += 1 timestep = self._timestep timesteps = self.timesteps if timesteps is None else timesteps if self._progress is None: self._progress = tqdm.tqdm(total=timesteps, disable=self.disable_progressbar, file=sys.stdout) self._progress.update(n=1) # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("eval") else: self.agents.set_running_mode("eval") # reset env if self.states is None: self.states, infos = self.env.reset() with contextlib.nullcontext(): if self.num_simultaneous_agents == 1: # compute actions actions = self.agents.act(self.states, timestep=timestep, timesteps=timesteps)[0] else: # compute actions actions = jnp.vstack([agent.act(self.states[scope[0]:scope[1]], timestep=timestep, timesteps=timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() with contextlib.nullcontext(): if self.num_simultaneous_agents == 1: # write data to TensorBoard self.agents.record_transition(states=self.states, actions=actions, rewards=rewards, next_states=next_states, terminated=terminated, truncated=truncated, infos=infos, timestep=timestep, timesteps=timesteps) super(type(self.agents), self.agents).post_interaction(timestep=timestep, timesteps=timesteps) else: # write data to TensorBoard for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=self.states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=timesteps) super(type(agent), agent).post_interaction(timestep=timestep, timesteps=timesteps) # reset environments if terminated.any() or truncated.any(): self.states, infos = self.env.reset() else: self.states = next_states return next_states, rewards, terminated, truncated, infos
Toni-SM/skrl/skrl/trainers/jax/sequential.py
from typing import List, Optional, Union import contextlib import copy import sys import tqdm import jax.numpy as jnp from skrl.agents.jax import Agent from skrl.envs.wrappers.jax import Wrapper from skrl.trainers.jax import Trainer # [start-config-dict-jax] SEQUENTIAL_TRAINER_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } # [end-config-dict-jax] class SequentialTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """Sequential trainer Train agents sequentially (i.e., one after the other in each interaction with the environment) :param env: Environment to train on :type env: skrl.envs.wrappers.jax.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: ``None``) :type agents_scope: tuple or list of int, optional :param cfg: Configuration dictionary (default: ``None``). See SEQUENTIAL_TRAINER_DEFAULT_CONFIG for default values :type cfg: dict, optional """ _cfg = copy.deepcopy(SEQUENTIAL_TRAINER_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # init agents if self.num_simultaneous_agents > 1: for agent in self.agents: agent.init(trainer_cfg=self.cfg) else: self.agents.init(trainer_cfg=self.cfg) def train(self) -> None: """Train the agents sequentially This method executes the following steps in loop: - Pre-interaction (sequentially) - Compute actions (sequentially) - Interact with the environments - Render scene - Record transitions (sequentially) - Post-interaction (sequentially) - Reset environments """ # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("train") else: self.agents.set_running_mode("train") # non-simultaneous agents if self.num_simultaneous_agents == 1: # single-agent if self.env.num_agents == 1: self.single_agent_train() # multi-agent else: self.multi_agent_train() return # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # pre-interaction for agent in self.agents: agent.pre_interaction(timestep=timestep, timesteps=self.timesteps) # compute actions with contextlib.nullcontext(): actions = jnp.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() # record the environments' transitions with contextlib.nullcontext(): for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=self.timesteps) # post-interaction for agent in self.agents: agent.post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments with contextlib.nullcontext(): if terminated.any() or truncated.any(): states, infos = self.env.reset() else: states = next_states def eval(self) -> None: """Evaluate the agents sequentially This method executes the following steps in loop: - Compute actions (sequentially) - Interact with the environments - Render scene - Reset environments """ # set running mode if self.num_simultaneous_agents > 1: for agent in self.agents: agent.set_running_mode("eval") else: self.agents.set_running_mode("eval") # non-simultaneous agents if self.num_simultaneous_agents == 1: # single-agent if self.env.num_agents == 1: self.single_agent_eval() # multi-agent else: self.multi_agent_eval() return # reset env states, infos = self.env.reset() for timestep in tqdm.tqdm(range(self.initial_timestep, self.timesteps), disable=self.disable_progressbar, file=sys.stdout): # compute actions with contextlib.nullcontext(): actions = jnp.vstack([agent.act(states[scope[0]:scope[1]], timestep=timestep, timesteps=self.timesteps)[0] \ for agent, scope in zip(self.agents, self.agents_scope)]) # step the environments next_states, rewards, terminated, truncated, infos = self.env.step(actions) # render scene if not self.headless: self.env.render() with contextlib.nullcontext(): # write data to TensorBoard for agent, scope in zip(self.agents, self.agents_scope): agent.record_transition(states=states[scope[0]:scope[1]], actions=actions[scope[0]:scope[1]], rewards=rewards[scope[0]:scope[1]], next_states=next_states[scope[0]:scope[1]], terminated=terminated[scope[0]:scope[1]], truncated=truncated[scope[0]:scope[1]], infos=infos, timestep=timestep, timesteps=self.timesteps) super(type(agent), agent).post_interaction(timestep=timestep, timesteps=self.timesteps) # reset environments if terminated.any() or truncated.any(): states, infos = self.env.reset() else: states = next_states
Toni-SM/skrl/skrl/models/__init__.py
Toni-SM/skrl/skrl/models/torch/base.py
from typing import Any, Mapping, Optional, Sequence, Tuple, Union import collections import gym import gymnasium import numpy as np import torch from skrl import logger class Model(torch.nn.Module): def __init__(self, observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], device: Optional[Union[str, torch.device]] = None) -> None: """Base class representing a function approximator The following properties are defined: - ``device`` (torch.device): Device to be used for the computations - ``observation_space`` (int, sequence of int, gym.Space, gymnasium.Space): Observation/state space - ``action_space`` (int, sequence of int, gym.Space, gymnasium.Space): Action space - ``num_observations`` (int): Number of elements in the observation/state space - ``num_actions`` (int): Number of elements in the action space :param observation_space: Observation/state space or shape. The ``num_observations`` property will contain the size of that space :type observation_space: int, sequence of int, gym.Space, gymnasium.Space :param action_space: Action space or shape. The ``num_actions`` property will contain the size of that space :type action_space: int, sequence of int, gym.Space, gymnasium.Space :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional Custom models should override the ``act`` method:: import torch from skrl.models.torch import Model class CustomModel(Model): def __init__(self, observation_space, action_space, device="cuda:0"): Model.__init__(self, observation_space, action_space, device) self.layer_1 = nn.Linear(self.num_observations, 64) self.layer_2 = nn.Linear(64, self.num_actions) def act(self, inputs, role=""): x = F.relu(self.layer_1(inputs["states"])) x = F.relu(self.layer_2(x)) return x, None, {} """ super(Model, self).__init__() self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) self.observation_space = observation_space self.action_space = action_space self.num_observations = None if observation_space is None else self._get_space_size(observation_space) self.num_actions = None if action_space is None else self._get_space_size(action_space) self._random_distribution = None def _get_space_size(self, space: Union[int, Sequence[int], gym.Space, gymnasium.Space], number_of_elements: bool = True) -> int: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, sequence of int, gym.Space, or gymnasium.Space :param number_of_elements: Whether the number of elements occupied by the space is returned (default: ``True``). If ``False``, the shape of the space is returned. It only affects Discrete and MultiDiscrete spaces :type number_of_elements: bool, optional :raises ValueError: If the space is not supported :return: Size of the space (number of elements) :rtype: int Example:: # from int >>> model._get_space_size(2) 2 # from sequence of int >>> model._get_space_size([2, 3]) 6 # Box space >>> space = gym.spaces.Box(low=-1, high=1, shape=(2, 3)) >>> model._get_space_size(space) 6 # Discrete space >>> space = gym.spaces.Discrete(4) >>> model._get_space_size(space) 4 >>> model._get_space_size(space, number_of_elements=False) 1 # MultiDiscrete space >>> space = gym.spaces.MultiDiscrete([5, 3, 2]) >>> model._get_space_size(space) 10 >>> model._get_space_size(space, number_of_elements=False) 3 # Dict space >>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)), ... 'b': gym.spaces.Discrete(4)}) >>> model._get_space_size(space) 10 >>> model._get_space_size(space, number_of_elements=False) 7 """ size = None if type(space) in [int, float]: size = space elif type(space) in [tuple, list]: size = np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): if number_of_elements: size = space.n else: size = 1 elif issubclass(type(space), gym.spaces.MultiDiscrete): if number_of_elements: size = np.sum(space.nvec) else: size = space.nvec.shape[0] elif issubclass(type(space), gym.spaces.Box): size = np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): if number_of_elements: size = space.n else: size = 1 elif issubclass(type(space), gymnasium.spaces.MultiDiscrete): if number_of_elements: size = np.sum(space.nvec) else: size = space.nvec.shape[0] elif issubclass(type(space), gymnasium.spaces.Box): size = np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces]) if size is None: raise ValueError(f"Space type {type(space)} not supported") return int(size) def tensor_to_space(self, tensor: torch.Tensor, space: Union[gym.Space, gymnasium.Space], start: int = 0) -> Union[torch.Tensor, dict]: """Map a flat tensor to a Gym/Gymnasium space The mapping is done in the following way: - Tensors belonging to Discrete spaces are returned without modification - Tensors belonging to Box spaces are reshaped to the corresponding space shape keeping the first dimension (number of samples) as they are - Tensors belonging to Dict spaces are mapped into a dictionary with the same keys as the original space :param tensor: Tensor to map from :type tensor: torch.Tensor :param space: Space to map the tensor to :type space: gym.Space or gymnasium.Space :param start: Index of the first element of the tensor to map (default: ``0``) :type start: int, optional :raises ValueError: If the space is not supported :return: Mapped tensor or dictionary :rtype: torch.Tensor or dict Example:: >>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)), ... 'b': gym.spaces.Discrete(4)}) >>> tensor = torch.tensor([[-0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 2]]) >>> >>> model.tensor_to_space(tensor, space) {'a': tensor([[[-0.3000, -0.2000, -0.1000], [ 0.1000, 0.2000, 0.3000]]]), 'b': tensor([[2.]])} """ if issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return tensor elif issubclass(type(space), gym.spaces.Box): return tensor.view(tensor.shape[0], *space.shape) elif issubclass(type(space), gym.spaces.Dict): output = {} for k in sorted(space.keys()): end = start + self._get_space_size(space[k], number_of_elements=False) output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end) start = end return output else: if issubclass(type(space), gymnasium.spaces.Discrete): return tensor elif issubclass(type(space), gymnasium.spaces.Box): return tensor.view(tensor.shape[0], *space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): output = {} for k in sorted(space.keys()): end = start + self._get_space_size(space[k], number_of_elements=False) output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end) start = end return output raise ValueError(f"Space type {type(space)} not supported") def random_act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, None, Mapping[str, Union[torch.Tensor, Any]]]: """Act randomly according to the action space :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :raises NotImplementedError: Unsupported action space :return: Model output. The first component is the action to be taken by the agent :rtype: tuple of torch.Tensor, None, and dict """ # discrete action space (Discrete) if issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): return torch.randint(self.action_space.n, (inputs["states"].shape[0], 1), device=self.device), None, {} # continuous action space (Box) elif issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): if self._random_distribution is None: self._random_distribution = torch.distributions.uniform.Uniform( low=torch.tensor(self.action_space.low[0], device=self.device, dtype=torch.float32), high=torch.tensor(self.action_space.high[0], device=self.device, dtype=torch.float32)) return self._random_distribution.sample(sample_shape=(inputs["states"].shape[0], self.num_actions)), None, {} else: raise NotImplementedError(f"Action space type ({type(self.action_space)}) not supported") def init_parameters(self, method_name: str = "normal_", *args, **kwargs) -> None: """Initialize the model parameters according to the specified method name Method names are from the `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ module. Allowed method names are *uniform_*, *normal_*, *constant_*, etc. :param method_name: `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ method name (default: ``"normal_"``) :type method_name: str, optional :param args: Positional arguments of the method to be called :type args: tuple, optional :param kwargs: Key-value arguments of the method to be called :type kwargs: dict, optional Example:: # initialize all parameters with an orthogonal distribution with a gain of 0.5 >>> model.init_parameters("orthogonal_", gain=0.5) # initialize all parameters as a sparse matrix with a sparsity of 0.1 >>> model.init_parameters("sparse_", sparsity=0.1) """ for parameters in self.parameters(): exec(f"torch.nn.init.{method_name}(parameters, *args, **kwargs)") def init_weights(self, method_name: str = "orthogonal_", *args, **kwargs) -> None: """Initialize the model weights according to the specified method name Method names are from the `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ module. Allowed method names are *uniform_*, *normal_*, *constant_*, etc. The following layers will be initialized: - torch.nn.Linear :param method_name: `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ method name (default: ``"orthogonal_"``) :type method_name: str, optional :param args: Positional arguments of the method to be called :type args: tuple, optional :param kwargs: Key-value arguments of the method to be called :type kwargs: dict, optional Example:: # initialize all weights with uniform distribution in range [-0.1, 0.1] >>> model.init_weights(method_name="uniform_", a=-0.1, b=0.1) # initialize all weights with normal distribution with mean 0 and standard deviation 0.25 >>> model.init_weights(method_name="normal_", mean=0.0, std=0.25) """ def _update_weights(module, method_name, args, kwargs): for layer in module: if isinstance(layer, torch.nn.Sequential): _update_weights(layer, method_name, args, kwargs) elif isinstance(layer, torch.nn.Linear): exec(f"torch.nn.init.{method_name}(layer.weight, *args, **kwargs)") _update_weights(self.children(), method_name, args, kwargs) def init_biases(self, method_name: str = "constant_", *args, **kwargs) -> None: """Initialize the model biases according to the specified method name Method names are from the `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ module. Allowed method names are *uniform_*, *normal_*, *constant_*, etc. The following layers will be initialized: - torch.nn.Linear :param method_name: `torch.nn.init <https://pytorch.org/docs/stable/nn.init.html>`_ method name (default: ``"constant_"``) :type method_name: str, optional :param args: Positional arguments of the method to be called :type args: tuple, optional :param kwargs: Key-value arguments of the method to be called :type kwargs: dict, optional Example:: # initialize all biases with a constant value (0) >>> model.init_biases(method_name="constant_", val=0) # initialize all biases with normal distribution with mean 0 and standard deviation 0.25 >>> model.init_biases(method_name="normal_", mean=0.0, std=0.25) """ def _update_biases(module, method_name, args, kwargs): for layer in module: if isinstance(layer, torch.nn.Sequential): _update_biases(layer, method_name, args, kwargs) elif isinstance(layer, torch.nn.Linear): exec(f"torch.nn.init.{method_name}(layer.bias, *args, **kwargs)") _update_biases(self.children(), method_name, args, kwargs) def get_specification(self) -> Mapping[str, Any]: """Returns the specification of the model The following keys are used by the agents for initialization: - ``"rnn"``: Recurrent Neural Network (RNN) specification for RNN, LSTM and GRU layers/cells - ``"sizes"``: List of RNN shapes (number of layers, number of environments, number of features in the RNN state). There must be as many tuples as there are states in the recurrent layer/cell. E.g., LSTM has 2 states (hidden and cell). :return: Dictionary containing advanced specification of the model :rtype: dict Example:: # model with a LSTM layer. # - number of layers: 1 # - number of environments: 4 # - number of features in the RNN state: 64 >>> model.get_specification() {'rnn': {'sizes': [(1, 4, 64), (1, 4, 64)]}} """ return {} def forward(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Forward pass of the model This method calls the ``.act()`` method and returns its outputs :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function for stochastic models or None for deterministic models. The third component is a dictionary containing extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict """ return self.act(inputs, role) def compute(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[Union[torch.Tensor, Mapping[str, Union[torch.Tensor, Any]]]]: """Define the computation performed (to be implemented by the inheriting classes) by the models :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :raises NotImplementedError: Child class must implement this method :return: Computation performed by the models :rtype: tuple of torch.Tensor and dict """ raise NotImplementedError("The computation performed by the models (.compute()) is not implemented") def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act according to the specified behavior (to be implemented by the inheriting classes) Agents will call this method to obtain the decision to be taken given the state of the environment. This method is currently implemented by the helper models (**GaussianModel**, etc.). The classes that inherit from the latter must only implement the ``.compute()`` method :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :raises NotImplementedError: Child class must implement this method :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function for stochastic models or None for deterministic models. The third component is a dictionary containing extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict """ logger.warning("Make sure to place Mixins before Model during model definition") raise NotImplementedError("The action to be taken by the agent (.act()) is not implemented") def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: ``"train"`` for training or ``"eval"`` for evaluation. See `torch.nn.Module.train <https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.train>`_ :type mode: str :raises ValueError: If the mode is not ``"train"`` or ``"eval"`` """ if mode == "train": self.train(True) elif mode == "eval": self.train(False) else: raise ValueError("Invalid mode. Use 'train' for training or 'eval' for evaluation") def save(self, path: str, state_dict: Optional[dict] = None) -> None: """Save the model to the specified path :param path: Path to save the model to :type path: str :param state_dict: State dictionary to save (default: ``None``). If None, the model's state_dict will be saved :type state_dict: dict, optional Example:: # save the current model to the specified path >>> model.save("/tmp/model.pt") # save an older version of the model to the specified path >>> old_state_dict = copy.deepcopy(model.state_dict()) >>> # ... >>> model.save("/tmp/model.pt", old_state_dict) """ torch.save(self.state_dict() if state_dict is None else state_dict, path) def load(self, path: str) -> None: """Load the model from the specified path The final storage device is determined by the constructor of the model :param path: Path to load the model from :type path: str Example:: # load the model onto the CPU >>> model = Model(observation_space, action_space, device="cpu") >>> model.load("model.pt") # load the model onto the GPU 1 >>> model = Model(observation_space, action_space, device="cuda:1") >>> model.load("model.pt") """ self.load_state_dict(torch.load(path, map_location=self.device)) self.eval() def migrate(self, state_dict: Optional[Mapping[str, torch.Tensor]] = None, path: Optional[str] = None, name_map: Mapping[str, str] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal model's state dict to the current model The final storage device is determined by the constructor of the model Only one of ``state_dict`` or ``path`` can be specified. The ``path`` parameter allows automatic loading the ``state_dict`` only from files generated by the *rl_games* and *stable-baselines3* libraries at the moment For ambiguous models (where 2 or more parameters, for source or current model, have equal shape) it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully :param state_dict: External model's state dict to migrate from (default: ``None``) :type state_dict: Mapping[str, torch.Tensor], optional :param path: Path to the external checkpoint to migrate from (default: ``None``) :type path: str, optional :param name_map: Name map to use for the migration (default: ``{}``). Keys are the current parameter names and values are the external parameter names :type name_map: Mapping[str, str], optional :param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``) :type auto_mapping: bool, optional :param verbose: Show model names and migration (default: ``False``) :type verbose: bool, optional :raises ValueError: If neither or both of ``state_dict`` and ``path`` parameters have been set :raises ValueError: If the correct file type cannot be identified from the ``path`` parameter :return: True if the migration was successful, False otherwise. Migration is successful if all parameters of the current model are found in the external model :rtype: bool Example:: # migrate a rl_games checkpoint with unambiguous state_dict >>> model.migrate(path="./runs/Ant/nn/Ant.pth") True # migrate a rl_games checkpoint with ambiguous state_dict >>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", verbose=False) [skrl:WARNING] Ambiguous match for log_std_parameter <- [value_mean_std.running_mean, value_mean_std.running_var, a2c_network.sigma] [skrl:WARNING] Ambiguous match for net.0.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.2.bias <- [a2c_network.actor_mlp.0.bias, a2c_network.actor_mlp.2.bias] [skrl:WARNING] Ambiguous match for net.4.weight <- [a2c_network.value.weight, a2c_network.mu.weight] [skrl:WARNING] Ambiguous match for net.4.bias <- [a2c_network.value.bias, a2c_network.mu.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.0.bias -> [net.0.bias, net.2.bias] [skrl:WARNING] Multiple use of a2c_network.actor_mlp.2.bias -> [net.0.bias, net.2.bias] False >>> name_map = {"log_std_parameter": "a2c_network.sigma", ... "net.0.bias": "a2c_network.actor_mlp.0.bias", ... "net.2.bias": "a2c_network.actor_mlp.2.bias", ... "net.4.weight": "a2c_network.mu.weight", ... "net.4.bias": "a2c_network.mu.bias"} >>> model.migrate(path="./runs/Cartpole/nn/Cartpole.pth", name_map=name_map, verbose=True) [skrl:INFO] Models [skrl:INFO] |-- current: 7 items [skrl:INFO] | |-- log_std_parameter : torch.Size([1]) [skrl:INFO] | |-- net.0.weight : torch.Size([32, 4]) [skrl:INFO] | |-- net.0.bias : torch.Size([32]) [skrl:INFO] | |-- net.2.weight : torch.Size([32, 32]) [skrl:INFO] | |-- net.2.bias : torch.Size([32]) [skrl:INFO] | |-- net.4.weight : torch.Size([1, 32]) [skrl:INFO] | |-- net.4.bias : torch.Size([1]) [skrl:INFO] |-- source: 15 items [skrl:INFO] | |-- value_mean_std.running_mean : torch.Size([1]) [skrl:INFO] | |-- value_mean_std.running_var : torch.Size([1]) [skrl:INFO] | |-- value_mean_std.count : torch.Size([]) [skrl:INFO] | |-- running_mean_std.running_mean : torch.Size([4]) [skrl:INFO] | |-- running_mean_std.running_var : torch.Size([4]) [skrl:INFO] | |-- running_mean_std.count : torch.Size([]) [skrl:INFO] | |-- a2c_network.sigma : torch.Size([1]) [skrl:INFO] | |-- a2c_network.actor_mlp.0.weight : torch.Size([32, 4]) [skrl:INFO] | |-- a2c_network.actor_mlp.0.bias : torch.Size([32]) [skrl:INFO] | |-- a2c_network.actor_mlp.2.weight : torch.Size([32, 32]) [skrl:INFO] | |-- a2c_network.actor_mlp.2.bias : torch.Size([32]) [skrl:INFO] | |-- a2c_network.value.weight : torch.Size([1, 32]) [skrl:INFO] | |-- a2c_network.value.bias : torch.Size([1]) [skrl:INFO] | |-- a2c_network.mu.weight : torch.Size([1, 32]) [skrl:INFO] | |-- a2c_network.mu.bias : torch.Size([1]) [skrl:INFO] Migration [skrl:INFO] |-- map: log_std_parameter <- a2c_network.sigma [skrl:INFO] |-- auto: net.0.weight <- a2c_network.actor_mlp.0.weight [skrl:INFO] |-- map: net.0.bias <- a2c_network.actor_mlp.0.bias [skrl:INFO] |-- auto: net.2.weight <- a2c_network.actor_mlp.2.weight [skrl:INFO] |-- map: net.2.bias <- a2c_network.actor_mlp.2.bias [skrl:INFO] |-- map: net.4.weight <- a2c_network.mu.weight [skrl:INFO] |-- map: net.4.bias <- a2c_network.mu.bias False # migrate a stable-baselines3 checkpoint with unambiguous state_dict >>> model.migrate(path="./ddpg_pendulum.zip") True # migrate from any exported model by loading its state_dict (unambiguous state_dict) >>> state_dict = torch.load("./external_model.pt") >>> model.migrate(state_dict=state_dict) True """ if (state_dict is not None) + (path is not None) != 1: raise ValueError("Exactly one of state_dict or path may be specified") # load state_dict from path if path is not None: state_dict = {} # rl_games checkpoint if path.endswith(".pt") or path.endswith(".pth"): checkpoint = torch.load(path, map_location=self.device) if type(checkpoint) is dict: state_dict = checkpoint.get("model", {}) # stable-baselines3 elif path.endswith(".zip"): import zipfile try: archive = zipfile.ZipFile(path, 'r') with archive.open('policy.pth', mode="r") as file: state_dict = torch.load(file, map_location=self.device) except KeyError as e: logger.warning(str(e)) state_dict = {} else: raise ValueError("Cannot identify file type") # show state_dict if verbose: logger.info("Models") logger.info(f" |-- current: {len(self.state_dict().keys())} items") for name, tensor in self.state_dict().items(): logger.info(f" | |-- {name} : {list(tensor.shape)}") logger.info(f" |-- source: {len(state_dict.keys())} items") for name, tensor in state_dict.items(): logger.info(f" | |-- {name} : {list(tensor.shape)}") logger.info("Migration") # migrate the state_dict to current model new_state_dict = collections.OrderedDict() match_counter = collections.defaultdict(list) used_counter = collections.defaultdict(list) for name, tensor in self.state_dict().items(): for external_name, external_tensor in state_dict.items(): # mapped names if name_map.get(name, "") == external_name: if tensor.shape == external_tensor.shape: new_state_dict[name] = external_tensor match_counter[name].append(external_name) used_counter[external_name].append(name) if verbose: logger.info(f" |-- map: {name} <- {external_name}") break else: logger.warning(f"Shape mismatch for {name} <- {external_name} : {tensor.shape} != {external_tensor.shape}") # auto-mapped names if auto_mapping and name not in name_map: if tensor.shape == external_tensor.shape: if name.endswith(".weight"): if external_name.endswith(".weight"): new_state_dict[name] = external_tensor match_counter[name].append(external_name) used_counter[external_name].append(name) if verbose: logger.info(f" |-- auto: {name} <- {external_name}") elif name.endswith(".bias"): if external_name.endswith(".bias"): new_state_dict[name] = external_tensor match_counter[name].append(external_name) used_counter[external_name].append(name) if verbose: logger.info(f" |-- auto: {name} <- {external_name}") else: if not external_name.endswith(".weight") and not external_name.endswith(".bias"): new_state_dict[name] = external_tensor match_counter[name].append(external_name) used_counter[external_name].append(name) if verbose: logger.info(f" |-- auto: {name} <- {external_name}") # show ambiguous matches status = True for name, tensor in self.state_dict().items(): if len(match_counter.get(name, [])) > 1: logger.warning("Ambiguous match for {} <- [{}]".format(name, ", ".join(match_counter.get(name, [])))) status = False # show missing matches for name, tensor in self.state_dict().items(): if not match_counter.get(name, []): logger.warning(f"Missing match for {name}") status = False # show multiple uses for name, tensor in state_dict.items(): if len(used_counter.get(name, [])) > 1: logger.warning("Multiple use of {} -> [{}]".format(name, ", ".join(used_counter.get(name, [])))) status = False # load new state dict self.load_state_dict(new_state_dict, strict=False) self.eval() return status def freeze_parameters(self, freeze: bool = True) -> None: """Freeze or unfreeze internal parameters - Freeze: disable gradient computation (``parameters.requires_grad = False``) - Unfreeze: enable gradient computation (``parameters.requires_grad = True``) :param freeze: Freeze the internal parameters if True, otherwise unfreeze them (default: ``True``) :type freeze: bool, optional Example:: # freeze model parameters >>> model.freeze_parameters(True) # unfreeze model parameters >>> model.freeze_parameters(False) """ for parameters in self.parameters(): parameters.requires_grad = not freeze def update_parameters(self, model: torch.nn.Module, polyak: float = 1) -> None: """Update internal parameters by hard or soft (polyak averaging) update - Hard update: :math:`\\theta = \\theta_{net}` - Soft (polyak averaging) update: :math:`\\theta = (1 - \\rho) \\theta + \\rho \\theta_{net}` :param model: Model used to update the internal parameters :type model: torch.nn.Module (skrl.models.torch.Model) :param polyak: Polyak hyperparameter between 0 and 1 (default: ``1``). A hard update is performed when its value is 1 :type polyak: float, optional Example:: # hard update (from source model) >>> model.update_parameters(source_model) # soft update (from source model) >>> model.update_parameters(source_model, polyak=0.005) """ with torch.no_grad(): # hard update if polyak == 1: for parameters, model_parameters in zip(self.parameters(), model.parameters()): parameters.data.copy_(model_parameters.data) # soft update (use in-place operations to avoid creating new parameters) else: for parameters, model_parameters in zip(self.parameters(), model.parameters()): parameters.data.mul_(1 - polyak) parameters.data.add_(polyak * model_parameters.data)
Toni-SM/skrl/skrl/models/torch/tabular.py
from typing import Any, Mapping, Optional, Sequence, Tuple, Union import torch from skrl.models.torch import Model class TabularMixin: def __init__(self, num_envs: int = 1, role: str = "") -> None: """Tabular mixin model :param num_envs: Number of environments (default: 1) :type num_envs: int, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import torch >>> from skrl.models.torch import Model, TabularMixin >>> >>> class GreedyPolicy(TabularMixin, Model): ... def __init__(self, observation_space, action_space, device="cuda:0", num_envs=1): ... Model.__init__(self, observation_space, action_space, device) ... TabularMixin.__init__(self, num_envs) ... ... self.table = torch.ones((num_envs, self.num_observations, self.num_actions), ... dtype=torch.float32, device=self.device) ... ... def compute(self, inputs, role): ... actions = torch.argmax(self.table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]], ... dim=-1, keepdim=True).view(-1,1) ... return actions, {} ... >>> # given an observation_space: gym.spaces.Discrete with n=100 >>> # and an action_space: gym.spaces.Discrete with n=5 >>> model = GreedyPolicy(observation_space, action_space, num_envs=1) >>> >>> print(model) GreedyPolicy( (table): Tensor(shape=[1, 100, 5]) ) """ self.num_envs = num_envs def __repr__(self) -> str: """String representation of an object as torch.nn.Module """ lines = [] for name in self._get_tensor_names(): tensor = getattr(self, name) lines.append(f"({name}): {tensor.__class__.__name__}(shape={list(tensor.shape)})") main_str = self.__class__.__name__ + '(' if lines: main_str += "\n {}\n".format("\n ".join(lines)) main_str += ')' return main_str def _get_tensor_names(self) -> Sequence[str]: """Get the names of the tensors that the model is using :return: Tensor names :rtype: sequence of str """ tensors = [] for attr in dir(self): if not attr.startswith("__") and issubclass(type(getattr(self, attr)), torch.Tensor): tensors.append(attr) return sorted(tensors) def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is ``None``. The third component is a dictionary containing extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict Example:: >>> # given a batch of sample states with shape (1, 100) >>> actions, _, outputs = model.act({"states": states}) >>> print(actions[0], outputs) tensor([[3]], device='cuda:0') {} """ actions, outputs = self.compute(inputs, role) return actions, None, outputs def table(self) -> torch.Tensor: """Return the Q-table :return: Q-table :rtype: torch.Tensor Example:: >>> output = model.table() >>> print(output.shape) torch.Size([1, 100, 5]) """ return self.q_table def to(self, *args, **kwargs) -> Model: """Move the model to a different device :param args: Arguments to pass to the method :type args: tuple :param kwargs: Keyword arguments to pass to the method :type kwargs: dict :return: Model moved to the specified device :rtype: Model """ Model.to(self, *args, **kwargs) for name in self._get_tensor_names(): setattr(self, name, getattr(self, name).to(*args, **kwargs)) return self def state_dict(self, *args, **kwargs) -> Mapping: """Returns a dictionary containing a whole state of the module :return: A dictionary containing a whole state of the module :rtype: dict """ _state_dict = {name: getattr(self, name) for name in self._get_tensor_names()} Model.state_dict(self, destination=_state_dict) return _state_dict def load_state_dict(self, state_dict: Mapping, strict: bool = True) -> None: """Copies parameters and buffers from state_dict into this module and its descendants :param state_dict: A dict containing parameters and persistent buffers :type state_dict: dict :param strict: Whether to strictly enforce that the keys in state_dict match the keys returned by this module's state_dict() function (default: ``True``) :type strict: bool, optional """ Model.load_state_dict(self, state_dict, strict=False) for name, tensor in state_dict.items(): if hasattr(self, name) and isinstance(getattr(self, name), torch.Tensor): _tensor = getattr(self, name) if isinstance(_tensor, torch.Tensor): if _tensor.shape == tensor.shape and _tensor.dtype == tensor.dtype: setattr(self, name, tensor) else: raise ValueError(f"Tensor shape ({_tensor.shape} vs {tensor.shape}) or dtype ({_tensor.dtype} vs {tensor.dtype}) mismatch") else: raise ValueError(f"{name} is not a tensor of {self.__class__.__name__}") def save(self, path: str, state_dict: Optional[dict] = None) -> None: """Save the model to the specified path :param path: Path to save the model to :type path: str :param state_dict: State dictionary to save (default: ``None``). If None, the model's state_dict will be saved :type state_dict: dict, optional Example:: # save the current model to the specified path >>> model.save("/tmp/model.pt") """ # TODO: save state_dict torch.save({name: getattr(self, name) for name in self._get_tensor_names()}, path) def load(self, path: str) -> None: """Load the model from the specified path The final storage device is determined by the constructor of the model :param path: Path to load the model from :type path: str :raises ValueError: If the models are not compatible Example:: # load the model onto the CPU >>> model = Model(observation_space, action_space, device="cpu") >>> model.load("model.pt") # load the model onto the GPU 1 >>> model = Model(observation_space, action_space, device="cuda:1") >>> model.load("model.pt") """ tensors = torch.load(path) for name, tensor in tensors.items(): if hasattr(self, name) and isinstance(getattr(self, name), torch.Tensor): _tensor = getattr(self, name) if isinstance(_tensor, torch.Tensor): if _tensor.shape == tensor.shape and _tensor.dtype == tensor.dtype: setattr(self, name, tensor) else: raise ValueError(f"Tensor shape ({_tensor.shape} vs {tensor.shape}) or dtype ({_tensor.dtype} vs {tensor.dtype}) mismatch") else: raise ValueError(f"{name} is not a tensor of {self.__class__.__name__}")
Toni-SM/skrl/skrl/models/torch/__init__.py
from skrl.models.torch.base import Model # isort:skip from skrl.models.torch.categorical import CategoricalMixin from skrl.models.torch.deterministic import DeterministicMixin from skrl.models.torch.gaussian import GaussianMixin from skrl.models.torch.multicategorical import MultiCategoricalMixin from skrl.models.torch.multivariate_gaussian import MultivariateGaussianMixin from skrl.models.torch.tabular import TabularMixin
Toni-SM/skrl/skrl/models/torch/deterministic.py
from typing import Any, Mapping, Tuple, Union import gym import gymnasium import torch class DeterministicMixin: def __init__(self, clip_actions: bool = False, role: str = "") -> None: """Deterministic mixin model (deterministic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import torch >>> import torch.nn as nn >>> from skrl.models.torch import Model, DeterministicMixin >>> >>> class Value(DeterministicMixin, Model): ... def __init__(self, observation_space, action_space, device="cuda:0", clip_actions=False): ... Model.__init__(self, observation_space, action_space, device) ... DeterministicMixin.__init__(self, clip_actions) ... ... self.net = nn.Sequential(nn.Linear(self.num_observations, 32), ... nn.ELU(), ... nn.Linear(32, 32), ... nn.ELU(), ... nn.Linear(32, 1)) ... ... def compute(self, inputs, role): ... return self.net(inputs["states"]), {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Value(observation_space, action_space) >>> >>> print(model) Value( (net): Sequential( (0): Linear(in_features=60, out_features=32, bias=True) (1): ELU(alpha=1.0) (2): Linear(in_features=32, out_features=32, bias=True) (3): ELU(alpha=1.0) (4): Linear(in_features=32, out_features=1, bias=True) ) ) """ self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._clip_actions: self._clip_actions_min = torch.tensor(self.action_space.low, device=self.device, dtype=torch.float32) self._clip_actions_max = torch.tensor(self.action_space.high, device=self.device, dtype=torch.float32) def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act deterministically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is ``None``. The third component is a dictionary containing extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, _, outputs = model.act({"states": states}) >>> print(actions.shape, outputs) torch.Size([4096, 1]) {} """ # map from observations/states to actions actions, outputs = self.compute(inputs, role) # clip actions if self._clip_actions: actions = torch.clamp(actions, min=self._clip_actions_min, max=self._clip_actions_max) return actions, None, outputs
Toni-SM/skrl/skrl/models/torch/gaussian.py
from typing import Any, Mapping, Tuple, Union import gym import gymnasium import torch from torch.distributions import Normal class GaussianMixin: def __init__(self, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, reduction: str = "sum", role: str = "") -> None: """Gaussian mixin model (stochastic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``) :type max_log_std: float, optional :param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``). Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)`` :type reduction: str, optional :param role: Role play by the model (default: ``""``) :type role: str, optional :raises ValueError: If the reduction method is not valid Example:: # define the model >>> import torch >>> import torch.nn as nn >>> from skrl.models.torch import Model, GaussianMixin >>> >>> class Policy(GaussianMixin, Model): ... def __init__(self, observation_space, action_space, device="cuda:0", ... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): ... Model.__init__(self, observation_space, action_space, device) ... GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) ... ... self.net = nn.Sequential(nn.Linear(self.num_observations, 32), ... nn.ELU(), ... nn.Linear(32, 32), ... nn.ELU(), ... nn.Linear(32, self.num_actions)) ... self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) ... ... def compute(self, inputs, role): ... return self.net(inputs["states"]), self.log_std_parameter, {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( (net): Sequential( (0): Linear(in_features=60, out_features=32, bias=True) (1): ELU(alpha=1.0) (2): Linear(in_features=32, out_features=32, bias=True) (3): ELU(alpha=1.0) (4): Linear(in_features=32, out_features=8, bias=True) ) ) """ self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._clip_actions: self._clip_actions_min = torch.tensor(self.action_space.low, device=self.device, dtype=torch.float32) self._clip_actions_max = torch.tensor(self.action_space.high, device=self.device, dtype=torch.float32) self._clip_log_std = clip_log_std self._log_std_min = min_log_std self._log_std_max = max_log_std self._log_std = None self._num_samples = None self._distribution = None if reduction not in ["mean", "sum", "prod", "none"]: raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'") self._reduction = torch.mean if reduction == "mean" else torch.sum if reduction == "sum" \ else torch.prod if reduction == "prod" else None def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the mean actions ``"mean_actions"`` and extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape) torch.Size([4096, 8]) torch.Size([4096, 1]) torch.Size([4096, 8]) """ # map from states/observations to mean actions and log standard deviations mean_actions, log_std, outputs = self.compute(inputs, role) # clamp log standard deviations if self._clip_log_std: log_std = torch.clamp(log_std, self._log_std_min, self._log_std_max) self._log_std = log_std self._num_samples = mean_actions.shape[0] # distribution self._distribution = Normal(mean_actions, log_std.exp()) # sample using the reparameterization trick actions = self._distribution.rsample() # clip actions if self._clip_actions: actions = torch.clamp(actions, min=self._clip_actions_min, max=self._clip_actions_max) # log of the probability density function log_prob = self._distribution.log_prob(inputs.get("taken_actions", actions)) if self._reduction is not None: log_prob = self._reduction(log_prob, dim=-1) if log_prob.dim() != actions.dim(): log_prob = log_prob.unsqueeze(-1) outputs["mean_actions"] = mean_actions return actions, log_prob, outputs def get_entropy(self, role: str = "") -> torch.Tensor: """Compute and return the entropy of the model :return: Entropy of the model :rtype: torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> entropy = model.get_entropy() >>> print(entropy.shape) torch.Size([4096, 8]) """ if self._distribution is None: return torch.tensor(0.0, device=self.device) return self._distribution.entropy().to(self.device) def get_log_std(self, role: str = "") -> torch.Tensor: """Return the log standard deviation of the model :return: Log standard deviation of the model :rtype: torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> log_std = model.get_log_std() >>> print(log_std.shape) torch.Size([4096, 8]) """ return self._log_std.repeat(self._num_samples, 1) def distribution(self, role: str = "") -> torch.distributions.Normal: """Get the current distribution of the model :return: Distribution of the model :rtype: torch.distributions.Normal :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> distribution = model.distribution() >>> print(distribution) Normal(loc: torch.Size([4096, 8]), scale: torch.Size([4096, 8])) """ return self._distribution
Toni-SM/skrl/skrl/models/torch/multicategorical.py
from typing import Any, Mapping, Sequence, Tuple, Union import torch from torch.distributions import Categorical class MultiCategoricalMixin: def __init__(self, unnormalized_log_prob: bool = True, reduction: str = "sum", role: str = "") -> None: """MultiCategorical mixin model (stochastic model) :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``). Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)`` :type reduction: str, optional :param role: Role play by the model (default: ``""``) :type role: str, optional :raises ValueError: If the reduction method is not valid Example:: # define the model >>> import torch >>> import torch.nn as nn >>> from skrl.models.torch import Model, MultiCategoricalMixin >>> >>> class Policy(MultiCategoricalMixin, Model): ... def __init__(self, observation_space, action_space, device="cuda:0", unnormalized_log_prob=True, reduction="sum"): ... Model.__init__(self, observation_space, action_space, device) ... MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction) ... ... self.net = nn.Sequential(nn.Linear(self.num_observations, 32), ... nn.ELU(), ... nn.Linear(32, 32), ... nn.ELU(), ... nn.Linear(32, self.num_actions)) ... ... def compute(self, inputs, role): ... return self.net(inputs["states"]), {} ... >>> # given an observation_space: gym.spaces.Box with shape (4,) >>> # and an action_space: gym.spaces.MultiDiscrete with nvec = [3, 2] >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( (net): Sequential( (0): Linear(in_features=4, out_features=32, bias=True) (1): ELU(alpha=1.0) (2): Linear(in_features=32, out_features=32, bias=True) (3): ELU(alpha=1.0) (4): Linear(in_features=32, out_features=5, bias=True) ) ) """ self._unnormalized_log_prob = unnormalized_log_prob self._distributions = [] if reduction not in ["mean", "sum", "prod", "none"]: raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'") self._reduction = torch.mean if reduction == "mean" else torch.sum if reduction == "sum" \ else torch.prod if reduction == "prod" else None def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the network output ``"net_output"`` and extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 4) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["net_output"].shape) torch.Size([4096, 2]) torch.Size([4096, 1]) torch.Size([4096, 5]) """ # map from states/observations to normalized probabilities or unnormalized log probabilities net_output, outputs = self.compute(inputs, role) # unnormalized log probabilities if self._unnormalized_log_prob: self._distributions = [Categorical(logits=logits) for logits in torch.split(net_output, self.action_space.nvec.tolist(), dim=-1)] # normalized probabilities else: self._distributions = [Categorical(probs=probs) for probs in torch.split(net_output, self.action_space.nvec.tolist(), dim=-1)] # actions actions = torch.stack([distribution.sample() for distribution in self._distributions], dim=-1) # log of the probability density function log_prob = torch.stack([distribution.log_prob(_actions.view(-1)) for _actions, distribution \ in zip(torch.unbind(inputs.get("taken_actions", actions), dim=-1), self._distributions)], dim=-1) if self._reduction is not None: log_prob = self._reduction(log_prob, dim=-1) if log_prob.dim() != actions.dim(): log_prob = log_prob.unsqueeze(-1) outputs["net_output"] = net_output return actions, log_prob, outputs def get_entropy(self, role: str = "") -> torch.Tensor: """Compute and return the entropy of the model :return: Entropy of the model :rtype: torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> entropy = model.get_entropy() >>> print(entropy.shape) torch.Size([4096, 1]) """ if self._distributions: entropy = torch.stack([distribution.entropy().to(self.device) for distribution in self._distributions], dim=-1) if self._reduction is not None: return self._reduction(entropy, dim=-1).unsqueeze(-1) return entropy return torch.tensor(0.0, device=self.device) def distribution(self, role: str = "") -> torch.distributions.Categorical: """Get the current distribution of the model :return: First distributions of the model :rtype: torch.distributions.Categorical :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> distribution = model.distribution() >>> print(distribution) Categorical(probs: torch.Size([10, 3]), logits: torch.Size([10, 3])) """ # TODO: find a way to integrate in the class the distribution functions (e.g.: stddev) return self._distributions[0]
Toni-SM/skrl/skrl/models/torch/multivariate_gaussian.py
from typing import Any, Mapping, Tuple, Union import gym import gymnasium import torch from torch.distributions import MultivariateNormal class MultivariateGaussianMixin: def __init__(self, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, role: str = "") -> None: """Multivariate Gaussian mixin model (stochastic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``) :type max_log_std: float, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import torch >>> import torch.nn as nn >>> from skrl.models.torch import Model, MultivariateGaussianMixin >>> >>> class Policy(MultivariateGaussianMixin, Model): ... def __init__(self, observation_space, action_space, device="cuda:0", ... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): ... Model.__init__(self, observation_space, action_space, device) ... MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) ... ... self.net = nn.Sequential(nn.Linear(self.num_observations, 32), ... nn.ELU(), ... nn.Linear(32, 32), ... nn.ELU(), ... nn.Linear(32, self.num_actions)) ... self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) ... ... def compute(self, inputs, role): ... return self.net(inputs["states"]), self.log_std_parameter, {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( (net): Sequential( (0): Linear(in_features=60, out_features=32, bias=True) (1): ELU(alpha=1.0) (2): Linear(in_features=32, out_features=32, bias=True) (3): ELU(alpha=1.0) (4): Linear(in_features=32, out_features=8, bias=True) ) ) """ self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._clip_actions: self._clip_actions_min = torch.tensor(self.action_space.low, device=self.device, dtype=torch.float32) self._clip_actions_max = torch.tensor(self.action_space.high, device=self.device, dtype=torch.float32) self._clip_log_std = clip_log_std self._log_std_min = min_log_std self._log_std_max = max_log_std self._log_std = None self._num_samples = None self._distribution = None def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the mean actions ``"mean_actions"`` and extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape) torch.Size([4096, 8]) torch.Size([4096, 1]) torch.Size([4096, 8]) """ # map from states/observations to mean actions and log standard deviations mean_actions, log_std, outputs = self.compute(inputs, role) # clamp log standard deviations if self._clip_log_std: log_std = torch.clamp(log_std, self._log_std_min, self._log_std_max) self._log_std = log_std self._num_samples = mean_actions.shape[0] # distribution covariance = torch.diag(log_std.exp() * log_std.exp()) self._distribution = MultivariateNormal(mean_actions, scale_tril=covariance) # sample using the reparameterization trick actions = self._distribution.rsample() # clip actions if self._clip_actions: actions = torch.clamp(actions, min=self._clip_actions_min, max=self._clip_actions_max) # log of the probability density function log_prob = self._distribution.log_prob(inputs.get("taken_actions", actions)) if log_prob.dim() != actions.dim(): log_prob = log_prob.unsqueeze(-1) outputs["mean_actions"] = mean_actions return actions, log_prob, outputs def get_entropy(self, role: str = "") -> torch.Tensor: """Compute and return the entropy of the model :return: Entropy of the model :rtype: torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> entropy = model.get_entropy() >>> print(entropy.shape) torch.Size([4096]) """ if self._distribution is None: return torch.tensor(0.0, device=self.device) return self._distribution.entropy().to(self.device) def get_log_std(self, role: str = "") -> torch.Tensor: """Return the log standard deviation of the model :return: Log standard deviation of the model :rtype: torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> log_std = model.get_log_std() >>> print(log_std.shape) torch.Size([4096, 8]) """ return self._log_std.repeat(self._num_samples, 1) def distribution(self, role: str = "") -> torch.distributions.MultivariateNormal: """Get the current distribution of the model :return: Distribution of the model :rtype: torch.distributions.MultivariateNormal :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> distribution = model.distribution() >>> print(distribution) MultivariateNormal(loc: torch.Size([4096, 8]), scale_tril: torch.Size([4096, 8, 8])) """ return self._distribution
Toni-SM/skrl/skrl/models/torch/categorical.py
from typing import Any, Mapping, Tuple, Union import torch from torch.distributions import Categorical class CategoricalMixin: def __init__(self, unnormalized_log_prob: bool = True, role: str = "") -> None: """Categorical mixin model (stochastic model) :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import torch >>> import torch.nn as nn >>> from skrl.models.torch import Model, CategoricalMixin >>> >>> class Policy(CategoricalMixin, Model): ... def __init__(self, observation_space, action_space, device="cuda:0", unnormalized_log_prob=True): ... Model.__init__(self, observation_space, action_space, device) ... CategoricalMixin.__init__(self, unnormalized_log_prob) ... ... self.net = nn.Sequential(nn.Linear(self.num_observations, 32), ... nn.ELU(), ... nn.Linear(32, 32), ... nn.ELU(), ... nn.Linear(32, self.num_actions)) ... ... def compute(self, inputs, role): ... return self.net(inputs["states"]), {} ... >>> # given an observation_space: gym.spaces.Box with shape (4,) >>> # and an action_space: gym.spaces.Discrete with n = 2 >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( (net): Sequential( (0): Linear(in_features=4, out_features=32, bias=True) (1): ELU(alpha=1.0) (2): Linear(in_features=32, out_features=32, bias=True) (3): ELU(alpha=1.0) (4): Linear(in_features=32, out_features=2, bias=True) ) ) """ self._unnormalized_log_prob = unnormalized_log_prob self._distribution = None def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the network output ``"net_output"`` and extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 4) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["net_output"].shape) torch.Size([4096, 1]) torch.Size([4096, 1]) torch.Size([4096, 2]) """ # map from states/observations to normalized probabilities or unnormalized log probabilities net_output, outputs = self.compute(inputs, role) # unnormalized log probabilities if self._unnormalized_log_prob: self._distribution = Categorical(logits=net_output) # normalized probabilities else: self._distribution = Categorical(probs=net_output) # actions and log of the probability density function actions = self._distribution.sample() log_prob = self._distribution.log_prob(inputs.get("taken_actions", actions).view(-1)) outputs["net_output"] = net_output return actions.unsqueeze(-1), log_prob.unsqueeze(-1), outputs def get_entropy(self, role: str = "") -> torch.Tensor: """Compute and return the entropy of the model :return: Entropy of the model :rtype: torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> entropy = model.get_entropy() >>> print(entropy.shape) torch.Size([4096, 1]) """ if self._distribution is None: return torch.tensor(0.0, device=self.device) return self._distribution.entropy().to(self.device) def distribution(self, role: str = "") -> torch.distributions.Categorical: """Get the current distribution of the model :return: Distribution of the model :rtype: torch.distributions.Categorical :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: >>> distribution = model.distribution() >>> print(distribution) Categorical(probs: torch.Size([4096, 2]), logits: torch.Size([4096, 2])) """ return self._distribution
Toni-SM/skrl/skrl/models/jax/base.py
from typing import Any, Callable, Mapping, Optional, Sequence, Tuple, Union import gym import gymnasium import flax import jax import numpy as np from skrl import config class StateDict(flax.struct.PyTreeNode): apply_fn: Callable = flax.struct.field(pytree_node=False) params: flax.core.FrozenDict[str, Any] = flax.struct.field(pytree_node=True) @classmethod def create(cls, *, apply_fn, params, **kwargs): return cls(apply_fn=apply_fn, params=params, **kwargs) class Model(flax.linen.Module): observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space] action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space] device: Optional[Union[str, jax.Device]] = None def __init__(self, observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], device: Optional[Union[str, jax.Device]] = None, parent: Optional[Any] = None, name: Optional[str] = None) -> None: """Base class representing a function approximator The following properties are defined: - ``device`` (jax.Device): Device to be used for the computations - ``observation_space`` (int, sequence of int, gym.Space, gymnasium.Space): Observation/state space - ``action_space`` (int, sequence of int, gym.Space, gymnasium.Space): Action space - ``num_observations`` (int): Number of elements in the observation/state space - ``num_actions`` (int): Number of elements in the action space :param observation_space: Observation/state space or shape. The ``num_observations`` property will contain the size of that space :type observation_space: int, sequence of int, gym.Space, gymnasium.Space :param action_space: Action space or shape. The ``num_actions`` property will contain the size of that space :type action_space: int, sequence of int, gym.Space, gymnasium.Space :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param parent: The parent Module of this Module (default: ``None``). It is a Flax reserved attribute :type parent: str, optional :param name: The name of this Module (default: ``None``). It is a Flax reserved attribute :type name: str, optional Custom models should override the ``act`` method:: import flax.linen as nn from skrl.models.jax import Model class CustomModel(Model): def __init__(self, observation_space, action_space, device=None, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(64)(inputs["states"])) x = nn.relu(nn.Dense(self.num_actions)(x)) return x, None, {} """ self._jax = config.jax.backend == "jax" if device is None: self.device = jax.devices()[0] else: self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0] self.observation_space = observation_space self.action_space = action_space self.num_observations = None if observation_space is None else self._get_space_size(observation_space) self.num_actions = None if action_space is None else self._get_space_size(action_space) self.state_dict: StateDict self.training = False # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.ReservedModuleAttributeError self.parent = parent self.name = name def init_state_dict(self, role: str, inputs: Mapping[str, Union[np.ndarray, jax.Array]] = {}, key: Optional[jax.Array] = None) -> None: """Initialize state dictionary :param role: Role play by the model :type role: str :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states If not specified, the keys will be populated with observation and action space samples :type inputs: dict of np.ndarray or jax.Array, optional :param key: Pseudo-random number generator (PRNG) key (default: ``None``). If not provided, the skrl's PRNG key (``config.jax.key``) will be used :type key: jax.Array, optional """ if not inputs: inputs = {"states": self.observation_space.sample(), "taken_actions": self.action_space.sample()} if key is None: key = config.jax.key if isinstance(inputs["states"], (int, np.int32, np.int64)): inputs["states"] = np.array(inputs["states"]).reshape(-1,1) # init internal state dict self.state_dict = StateDict.create(apply_fn=self.apply, params=self.init(key, inputs, role)) def _get_space_size(self, space: Union[int, Sequence[int], gym.Space, gymnasium.Space], number_of_elements: bool = True) -> int: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, sequence of int, gym.Space, or gymnasium.Space :param number_of_elements: Whether the number of elements occupied by the space is returned (default: ``True``). If ``False``, the shape of the space is returned. It only affects Discrete and MultiDiscrete spaces :type number_of_elements: bool, optional :raises ValueError: If the space is not supported :return: Size of the space (number of elements) :rtype: int Example:: # from int >>> model._get_space_size(2) 2 # from sequence of int >>> model._get_space_size([2, 3]) 6 # Box space >>> space = gym.spaces.Box(low=-1, high=1, shape=(2, 3)) >>> model._get_space_size(space) 6 # Discrete space >>> space = gym.spaces.Discrete(4) >>> model._get_space_size(space) 4 >>> model._get_space_size(space, number_of_elements=False) 1 # MultiDiscrete space >>> space = gym.spaces.MultiDiscrete([5, 3, 2]) >>> model._get_space_size(space) 10 >>> model._get_space_size(space, number_of_elements=False) 3 # Dict space >>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)), ... 'b': gym.spaces.Discrete(4)}) >>> model._get_space_size(space) 10 >>> model._get_space_size(space, number_of_elements=False) 7 """ size = None if type(space) in [int, float]: size = space elif type(space) in [tuple, list]: size = np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): if number_of_elements: size = space.n else: size = 1 elif issubclass(type(space), gym.spaces.MultiDiscrete): if number_of_elements: size = np.sum(space.nvec) else: size = space.nvec.shape[0] elif issubclass(type(space), gym.spaces.Box): size = np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): if number_of_elements: size = space.n else: size = 1 elif issubclass(type(space), gymnasium.spaces.MultiDiscrete): if number_of_elements: size = np.sum(space.nvec) else: size = space.nvec.shape[0] elif issubclass(type(space), gymnasium.spaces.Box): size = np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): size = sum([self._get_space_size(space.spaces[key], number_of_elements) for key in space.spaces]) if size is None: raise ValueError(f"Space type {type(space)} not supported") return int(size) def tensor_to_space(self, tensor: Union[np.ndarray, jax.Array], space: Union[gym.Space, gymnasium.Space], start: int = 0) -> Union[Union[np.ndarray, jax.Array], dict]: """Map a flat tensor to a Gym/Gymnasium space The mapping is done in the following way: - Tensors belonging to Discrete spaces are returned without modification - Tensors belonging to Box spaces are reshaped to the corresponding space shape keeping the first dimension (number of samples) as they are - Tensors belonging to Dict spaces are mapped into a dictionary with the same keys as the original space :param tensor: Tensor to map from :type tensor: np.ndarray or jax.Array :param space: Space to map the tensor to :type space: gym.Space or gymnasium.Space :param start: Index of the first element of the tensor to map (default: ``0``) :type start: int, optional :raises ValueError: If the space is not supported :return: Mapped tensor or dictionary :rtype: np.ndarray or jax.Array, or dict Example:: >>> space = gym.spaces.Dict({'a': gym.spaces.Box(low=-1, high=1, shape=(2, 3)), ... 'b': gym.spaces.Discrete(4)}) >>> tensor = jnp.array([[-0.3, -0.2, -0.1, 0.1, 0.2, 0.3, 2]]) >>> >>> model.tensor_to_space(tensor, space) {'a': Array([[[-0.3, -0.2, -0.1], [ 0.1, 0.2, 0.3]]], dtype=float32), 'b': Array([[2.]], dtype=float32)} """ if issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return tensor elif issubclass(type(space), gym.spaces.Box): return tensor.reshape(tensor.shape[0], *space.shape) elif issubclass(type(space), gym.spaces.Dict): output = {} for k in sorted(space.keys()): end = start + self._get_space_size(space[k], number_of_elements=False) output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end) start = end return output else: if issubclass(type(space), gymnasium.spaces.Discrete): return tensor elif issubclass(type(space), gymnasium.spaces.Box): return tensor.reshape(tensor.shape[0], *space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): output = {} for k in sorted(space.keys()): end = start + self._get_space_size(space[k], number_of_elements=False) output[k] = self.tensor_to_space(tensor[:, start:end], space[k], end) start = end return output raise ValueError(f"Space type {type(space)} not supported") def random_act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[Union[np.ndarray, jax.Array], Union[Union[np.ndarray, jax.Array], None], Mapping[str, Union[Union[np.ndarray, jax.Array], Any]]]: """Act randomly according to the action space :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :raises NotImplementedError: Unsupported action space :return: Model output. The first component is the action to be taken by the agent :rtype: tuple of np.ndarray or jax.Array, None, and dict """ # discrete action space (Discrete) if issubclass(type(self.action_space), gym.spaces.Discrete) or issubclass(type(self.action_space), gymnasium.spaces.Discrete): actions = np.random.randint(self.action_space.n, size=(inputs["states"].shape[0], 1)) # continuous action space (Box) elif issubclass(type(self.action_space), gym.spaces.Box) or issubclass(type(self.action_space), gymnasium.spaces.Box): actions = np.random.uniform(low=self.action_space.low[0], high=self.action_space.high[0], size=(inputs["states"].shape[0], self.num_actions)) else: raise NotImplementedError(f"Action space type ({type(self.action_space)}) not supported") if self._jax: return jax.device_put(actions), None, {} return actions, None, {} def init_parameters(self, method_name: str = "normal", *args, **kwargs) -> None: """Initialize the model parameters according to the specified method name Method names are from the `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ module. Allowed method names are *uniform*, *normal*, *constant*, etc. :param method_name: `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ method name (default: ``"normal"``) :type method_name: str, optional :param args: Positional arguments of the method to be called :type args: tuple, optional :param kwargs: Key-value arguments of the method to be called :type kwargs: dict, optional Example:: # initialize all parameters with an orthogonal distribution with a scale of 0.5 >>> model.init_parameters("orthogonal", scale=0.5) # initialize all parameters as a normal distribution with a standard deviation of 0.1 >>> model.init_parameters("normal", stddev=0.1) """ if method_name in ["ones", "zeros"]: method = eval(f"flax.linen.initializers.{method_name}") else: method = eval(f"flax.linen.initializers.{method_name}(*args, **kwargs)") params = jax.tree_util.tree_map(lambda param: method(config.jax.key, param.shape), self.state_dict.params) self.state_dict = self.state_dict.replace(params=params) def init_weights(self, method_name: str = "normal", *args, **kwargs) -> None: """Initialize the model weights according to the specified method name Method names are from the `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ module. Allowed method names are *uniform*, *normal*, *constant*, etc. :param method_name: `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ method name (default: ``"normal"``) :type method_name: str, optional :param args: Positional arguments of the method to be called :type args: tuple, optional :param kwargs: Key-value arguments of the method to be called :type kwargs: dict, optional Example:: # initialize all weights with uniform distribution in range [-0.1, 0.1] >>> model.init_weights(method_name="uniform_", a=-0.1, b=0.1) # initialize all weights with normal distribution with mean 0 and standard deviation 0.25 >>> model.init_weights(method_name="normal_", mean=0.0, std=0.25) """ if method_name in ["ones", "zeros"]: method = eval(f"flax.linen.initializers.{method_name}") else: method = eval(f"flax.linen.initializers.{method_name}(*args, **kwargs)") params = jax.tree_util.tree_map_with_path(lambda path, param: method(config.jax.key, param.shape) if path[-1].key == "kernel" else param, self.state_dict.params) self.state_dict = self.state_dict.replace(params=params) def init_biases(self, method_name: str = "constant_", *args, **kwargs) -> None: """Initialize the model biases according to the specified method name Method names are from the `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ module. Allowed method names are *uniform*, *normal*, *constant*, etc. :param method_name: `flax.linen.initializers <https://flax.readthedocs.io/en/latest/api_reference/flax.linen/initializers.html>`_ method name (default: ``"normal"``) :type method_name: str, optional :param args: Positional arguments of the method to be called :type args: tuple, optional :param kwargs: Key-value arguments of the method to be called :type kwargs: dict, optional Example:: # initialize all biases with a constant value (0) >>> model.init_biases(method_name="constant_", val=0) # initialize all biases with normal distribution with mean 0 and standard deviation 0.25 >>> model.init_biases(method_name="normal_", mean=0.0, std=0.25) """ if method_name in ["ones", "zeros"]: method = eval(f"flax.linen.initializers.{method_name}") else: method = eval(f"flax.linen.initializers.{method_name}(*args, **kwargs)") params = jax.tree_util.tree_map_with_path(lambda path, param: method(config.jax.key, param.shape) if path[-1].key == "bias" else param, self.state_dict.params) self.state_dict = self.state_dict.replace(params=params) def get_specification(self) -> Mapping[str, Any]: """Returns the specification of the model The following keys are used by the agents for initialization: - ``"rnn"``: Recurrent Neural Network (RNN) specification for RNN, LSTM and GRU layers/cells - ``"sizes"``: List of RNN shapes (number of layers, number of environments, number of features in the RNN state). There must be as many tuples as there are states in the recurrent layer/cell. E.g., LSTM has 2 states (hidden and cell). :return: Dictionary containing advanced specification of the model :rtype: dict Example:: # model with a LSTM layer. # - number of layers: 1 # - number of environments: 4 # - number of features in the RNN state: 64 >>> model.get_specification() {'rnn': {'sizes': [(1, 4, 64), (1, 4, 64)]}} """ return {} def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act according to the specified behavior (to be implemented by the inheriting classes) Agents will call this method to obtain the decision to be taken given the state of the environment. The classes that inherit from the latter must only implement the ``.__call__()`` method :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :raises NotImplementedError: Child class must implement this method :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function for stochastic models or None for deterministic models. The third component is a dictionary containing extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict """ raise NotImplementedError def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: ``"train"`` for training or ``"eval"`` for evaluation :type mode: str :raises ValueError: If the mode is not ``"train"`` or ``"eval"`` """ if mode == "train": self.training = True elif mode == "eval": self.training = False else: raise ValueError("Invalid mode. Use 'train' for training or 'eval' for evaluation") def save(self, path: str, state_dict: Optional[dict] = None) -> None: """Save the model to the specified path :param path: Path to save the model to :type path: str :param state_dict: State dictionary to save (default: ``None``). If None, the model's state_dict will be saved :type state_dict: dict, optional Example:: # save the current model to the specified path >>> model.save("/tmp/model.flax") # TODO: save an older version of the model to the specified path """ # HACK: Does it make sense to use https://github.com/google/orbax with open(path, "wb") as file: file.write(flax.serialization.to_bytes(self.state_dict.params if state_dict is None else state_dict.params)) def load(self, path: str) -> None: """Load the model from the specified path :param path: Path to load the model from :type path: str Example:: # load the model >>> model = Model(observation_space, action_space) >>> model.load("model.flax") """ # HACK: Does it make sense to use https://github.com/google/orbax with open(path, "rb") as file: params = flax.serialization.from_bytes(self.state_dict.params, file.read()) self.state_dict = self.state_dict.replace(params=params) self.set_mode("eval") def migrate(self, state_dict: Optional[Mapping[str, Any]] = None, path: Optional[str] = None, name_map: Mapping[str, str] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal model's state dict to the current model .. warning:: This method is not implemented yet, just maintains compatibility with other ML frameworks :raises NotImplementedError: Not implemented """ raise NotImplementedError def freeze_parameters(self, freeze: bool = True) -> None: """Freeze or unfreeze internal parameters .. note:: This method does nothing, just maintains compatibility with other ML frameworks :param freeze: Freeze the internal parameters if True, otherwise unfreeze them (default: ``True``) :type freeze: bool, optional Example:: # freeze model parameters >>> model.freeze_parameters(True) # unfreeze model parameters >>> model.freeze_parameters(False) """ pass def update_parameters(self, model: flax.linen.Module, polyak: float = 1) -> None: """Update internal parameters by hard or soft (polyak averaging) update - Hard update: :math:`\\theta = \\theta_{net}` - Soft (polyak averaging) update: :math:`\\theta = (1 - \\rho) \\theta + \\rho \\theta_{net}` :param model: Model used to update the internal parameters :type model: flax.linen.Module (skrl.models.jax.Model) :param polyak: Polyak hyperparameter between 0 and 1 (default: ``1``). A hard update is performed when its value is 1 :type polyak: float, optional Example:: # hard update (from source model) >>> model.update_parameters(source_model) # soft update (from source model) >>> model.update_parameters(source_model, polyak=0.005) """ # hard update if polyak == 1: self.state_dict = self.state_dict.replace(params=model.state_dict.params) # soft update else: # HACK: Does it make sense to use https://optax.readthedocs.io/en/latest/api.html?#optax.incremental_update params = jax.tree_util.tree_map(lambda params, model_params: polyak * model_params + (1 - polyak) * params, self.state_dict.params, model.state_dict.params) self.state_dict = self.state_dict.replace(params=params)
Toni-SM/skrl/skrl/models/jax/__init__.py
from skrl.models.jax.base import Model # isort:skip from skrl.models.jax.categorical import CategoricalMixin from skrl.models.jax.deterministic import DeterministicMixin from skrl.models.jax.gaussian import GaussianMixin from skrl.models.jax.multicategorical import MultiCategoricalMixin
Toni-SM/skrl/skrl/models/jax/deterministic.py
from typing import Any, Mapping, Optional, Tuple, Union import gym import gymnasium import flax import jax import jax.numpy as jnp import numpy as np class DeterministicMixin: def __init__(self, clip_actions: bool = False, role: str = "") -> None: """Deterministic mixin model (deterministic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, DeterministicMixin >>> >>> class Value(DeterministicMixin, Model): ... def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... DeterministicMixin.__init__(self, clip_actions) ... ... @nn.compact # marks the given module method allowing inlined submodules ... def __call__(self, inputs, role): ... x = nn.elu(nn.Dense(32)([inputs["states"])) ... x = nn.elu(nn.Dense(32)(x)) ... x = nn.Dense(1)(x) ... return x, {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Value(observation_space, action_space) >>> >>> print(model) Value( # attributes observation_space = Box(-1.0, 1.0, (60,), float32) action_space = Box(-1.0, 1.0, (8,), float32) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ if not hasattr(self, "_d_clip_actions"): self._d_clip_actions = {} self._d_clip_actions[role] = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._d_clip_actions[role]: self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32) self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32) # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act deterministically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is ``None``. The third component is a dictionary containing extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, _, outputs = model.act({"states": states}) >>> print(actions.shape, outputs) (4096, 1) {} """ # map from observations/states to actions actions, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) # clip actions if self._d_clip_actions[role] if role in self._d_clip_actions else self._d_clip_actions[""]: actions = jnp.clip(actions, a_min=self.clip_actions_min, a_max=self.clip_actions_max) return actions, None, outputs
Toni-SM/skrl/skrl/models/jax/gaussian.py
from typing import Any, Mapping, Optional, Tuple, Union from functools import partial import gym import gymnasium import flax import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("reduction")) def _gaussian(loc, log_std, log_std_min, log_std_max, clip_actions_min, clip_actions_max, taken_actions, key, reduction): # clamp log standard deviations log_std = jnp.clip(log_std, a_min=log_std_min, a_max=log_std_max) # distribution scale = jnp.exp(log_std) # sample actions actions = jax.random.normal(key, loc.shape) * scale + loc # clip actions actions = jnp.clip(actions, a_min=clip_actions_min, a_max=clip_actions_max) # log of the probability density function taken_actions = actions if taken_actions is None else taken_actions log_prob = -jnp.square(taken_actions - loc) / (2 * jnp.square(scale)) - jnp.log(scale) - 0.5 * jnp.log(2 * jnp.pi) if reduction is not None: log_prob = reduction(log_prob, axis=-1) if log_prob.ndim != actions.ndim: log_prob = jnp.expand_dims(log_prob, -1) return actions, log_prob, log_std, scale @jax.jit def _entropy(scale): return 0.5 + 0.5 * jnp.log(2 * jnp.pi) + jnp.log(scale) class GaussianMixin: def __init__(self, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, reduction: str = "sum", role: str = "") -> None: """Gaussian mixin model (stochastic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``) :type max_log_std: float, optional :param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``). Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)`` :type reduction: str, optional :param role: Role play by the model (default: ``""``) :type role: str, optional :raises ValueError: If the reduction method is not valid Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, GaussianMixin >>> >>> class Policy(GaussianMixin, Model): ... def __init__(self, observation_space, action_space, device=None, ... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) ... ... def setup(self): ... self.layer_1 = nn.Dense(32) ... self.layer_2 = nn.Dense(32) ... self.layer_3 = nn.Dense(self.num_actions) ... ... self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions)) ... ... def __call__(self, inputs, role): ... x = nn.elu(self.layer_1(inputs["states"])) ... x = nn.elu(self.layer_2(x)) ... return self.layer_3(x), self.log_std_parameter, {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( # attributes observation_space = Box(-1.0, 1.0, (60,), float32) action_space = Box(-1.0, 1.0, (8,), float32) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._clip_actions: self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32) self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32) else: self.clip_actions_min = -jnp.inf self.clip_actions_max = jnp.inf self._clip_log_std = clip_log_std if self._clip_log_std: self._log_std_min = min_log_std self._log_std_max = max_log_std else: self._log_std_min = -jnp.inf self._log_std_max = jnp.inf if reduction not in ["mean", "sum", "prod", "none"]: raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'") self._reduction = jnp.mean if reduction == "mean" else jnp.sum if reduction == "sum" \ else jnp.prod if reduction == "prod" else None self._i = 0 self._key = config.jax.key # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the mean actions ``"mean_actions"`` and extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape) (4096, 8) (4096, 1) (4096, 8) """ self._i += 1 subkey = jax.random.fold_in(self._key, self._i) inputs["key"] = subkey # map from states/observations to mean actions and log standard deviations mean_actions, log_std, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) actions, log_prob, log_std, stddev = _gaussian(mean_actions, log_std, self._log_std_min, self._log_std_max, self.clip_actions_min, self.clip_actions_max, inputs.get("taken_actions", None), subkey, self._reduction) outputs["mean_actions"] = mean_actions # avoid jax.errors.UnexpectedTracerError outputs["log_std"] = log_std outputs["stddev"] = stddev return actions, log_prob, outputs def get_entropy(self, stddev: jax.Array, role: str = "") -> jax.Array: """Compute and return the entropy of the model :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Entropy of the model :rtype: jax.Array Example:: # given a standard deviation array: stddev >>> entropy = model.get_entropy(stddev) >>> print(entropy.shape) (4096, 8) """ return _entropy(stddev)
Toni-SM/skrl/skrl/models/jax/multicategorical.py
from typing import Any, Mapping, Optional, Tuple, Union from functools import partial import flax import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("unnormalized_log_prob")) def _categorical(net_output, unnormalized_log_prob, taken_actions, key): # normalize if unnormalized_log_prob: logits = net_output - jax.scipy.special.logsumexp(net_output, axis=-1, keepdims=True) # probs = jax.nn.softmax(logits) else: probs = net_output / net_output.sum(-1, keepdims=True) eps = jnp.finfo(probs.dtype).eps logits = jnp.log(probs.clip(min=eps, max=1 - eps)) # sample actions actions = jax.random.categorical(key, logits, axis=-1, shape=None) # log of the probability density function taken_actions = actions if taken_actions is None else taken_actions.astype(jnp.int32).reshape(-1) log_prob = jax.nn.log_softmax(logits)[jnp.arange(taken_actions.shape[0]), taken_actions] return actions.reshape(-1, 1), log_prob.reshape(-1, 1) @jax.jit def _entropy(logits): logits = logits - jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True) logits = logits.clip(min=jnp.finfo(logits.dtype).min) p_log_p = logits * jax.nn.softmax(logits) return -p_log_p.sum(-1) class MultiCategoricalMixin: def __init__(self, unnormalized_log_prob: bool = True, reduction: str = "sum", role: str = "") -> None: """MultiCategorical mixin model (stochastic model) :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``). Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)`` :type reduction: str, optional :param role: Role play by the model (default: ``""``) :type role: str, optional :raises ValueError: If the reduction method is not valid Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, MultiCategoricalMixin >>> >>> class Policy(MultiCategoricalMixin, Model): ... def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction) ... ... @nn.compact # marks the given module method allowing inlined submodules ... def __call__(self, inputs, role): ... x = nn.elu(nn.Dense(32)(inputs["states"])) ... x = nn.elu(nn.Dense(32)(x)) ... x = nn.Dense(self.num_actions)(x) ... return x, {} ... >>> # given an observation_space: gym.spaces.Box with shape (4,) >>> # and an action_space: gym.spaces.MultiDiscrete with nvec = [3, 2] >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( # attributes observation_space = Box(-1.0, 1.0, (4,), float32) action_space = MultiDiscrete([3 2]) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ self._unnormalized_log_prob = unnormalized_log_prob if reduction not in ["mean", "sum", "prod", "none"]: raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'") self._reduction = jnp.mean if reduction == "mean" else jnp.sum if reduction == "sum" \ else jnp.prod if reduction == "prod" else None self._i = 0 self._key = config.jax.key self._action_space_nvec = np.cumsum(self.action_space.nvec).tolist() self._action_space_shape = self._get_space_size(self.action_space, number_of_elements=False) # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the network output ``"net_output"`` and extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 4) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["net_output"].shape) (4096, 2) (4096, 1) (4096, 5) """ self._i += 1 subkey = jax.random.fold_in(self._key, self._i) inputs["key"] = subkey # map from states/observations to normalized probabilities or unnormalized log probabilities net_output, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) # split inputs net_outputs = jnp.split(net_output, self._action_space_nvec, axis=-1) if "taken_actions" in inputs: taken_actions = jnp.split(inputs["taken_actions"], self._action_space_shape, axis=-1) else: taken_actions = [None] * self._action_space_shape # compute actions and log_prob actions, log_prob = [], [] for _net_output, _taken_actions in zip(net_outputs, taken_actions): _actions, _log_prob = _categorical(_net_output, self._unnormalized_log_prob, _taken_actions, subkey) actions.append(_actions) log_prob.append(_log_prob) actions = jnp.concatenate(actions, axis=-1) log_prob = jnp.concatenate(log_prob, axis=-1) if self._reduction is not None: log_prob = self._reduction(log_prob, axis=-1) if log_prob.ndim != actions.ndim: log_prob = jnp.expand_dims(log_prob, -1) outputs["net_output"] = net_output # avoid jax.errors.UnexpectedTracerError outputs["stddev"] = jnp.full_like(log_prob, jnp.nan) return actions, log_prob, outputs def get_entropy(self, logits: jax.Array, role: str = "") -> jax.Array: """Compute and return the entropy of the model :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Entropy of the model :rtype: jax.Array Example:: # given a standard deviation array: stddev >>> entropy = model.get_entropy(stddev) >>> print(entropy.shape) (4096, 8) """ return _entropy(logits)
Toni-SM/skrl/skrl/models/jax/categorical.py
from typing import Any, Mapping, Optional, Tuple, Union from functools import partial import flax import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("unnormalized_log_prob")) def _categorical(net_output, unnormalized_log_prob, taken_actions, key): # normalize if unnormalized_log_prob: logits = net_output - jax.scipy.special.logsumexp(net_output, axis=-1, keepdims=True) # probs = jax.nn.softmax(logits) else: probs = net_output / net_output.sum(-1, keepdims=True) eps = jnp.finfo(probs.dtype).eps logits = jnp.log(probs.clip(min=eps, max=1 - eps)) # sample actions actions = jax.random.categorical(key, logits, axis=-1, shape=None) # log of the probability density function taken_actions = actions if taken_actions is None else taken_actions.astype(jnp.int32).reshape(-1) log_prob = jax.nn.log_softmax(logits)[jnp.arange(taken_actions.shape[0]), taken_actions] return actions.reshape(-1, 1), log_prob.reshape(-1, 1) @jax.jit def _entropy(logits): logits = logits - jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True) logits = logits.clip(min=jnp.finfo(logits.dtype).min) p_log_p = logits * jax.nn.softmax(logits) return -p_log_p.sum(-1) class CategoricalMixin: def __init__(self, unnormalized_log_prob: bool = True, role: str = "") -> None: """Categorical mixin model (stochastic model) :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, CategoricalMixin >>> >>> class Policy(CategoricalMixin, Model): ... def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... CategoricalMixin.__init__(self, unnormalized_log_prob) ... ... @nn.compact # marks the given module method allowing inlined submodules ... def __call__(self, inputs, role): ... x = nn.elu(nn.Dense(32)(inputs["states"])) ... x = nn.elu(nn.Dense(32)(x)) ... x = nn.Dense(self.num_actions)(x) ... return x, {} ... >>> # given an observation_space: gym.spaces.Box with shape (4,) >>> # and an action_space: gym.spaces.Discrete with n = 2 >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( # attributes observation_space = Box(-1.0, 1.0, (4,), float32) action_space = Discrete(2) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ self._unnormalized_log_prob = unnormalized_log_prob self._i = 0 self._key = config.jax.key # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the network output ``"net_output"`` and extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 4) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["net_output"].shape) (4096, 1) (4096, 1) (4096, 2) """ self._i += 1 subkey = jax.random.fold_in(self._key, self._i) inputs["key"] = subkey # map from states/observations to normalized probabilities or unnormalized log probabilities net_output, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) actions, log_prob = _categorical(net_output, self._unnormalized_log_prob, inputs.get("taken_actions", None), subkey) outputs["net_output"] = net_output # avoid jax.errors.UnexpectedTracerError outputs["stddev"] = jnp.full_like(log_prob, jnp.nan) return actions, log_prob, outputs def get_entropy(self, logits: jax.Array, role: str = "") -> jax.Array: """Compute and return the entropy of the model :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Entropy of the model :rtype: jax.Array Example:: # given a standard deviation array: stddev >>> entropy = model.get_entropy(stddev) >>> print(entropy.shape) (4096, 8) """ return _entropy(logits)
Toni-SM/skrl/skrl/multi_agents/__init__.py
Toni-SM/skrl/skrl/multi_agents/torch/base.py
from typing import Any, Mapping, Optional, Sequence, Union import collections import copy import datetime import os import gym import gymnasium import numpy as np import torch from torch.utils.tensorboard import SummaryWriter from skrl import logger from skrl.memories.torch import Memory from skrl.models.torch import Model class MultiAgent: def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Mapping[str, Model]], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None, action_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Base class that represent a RL multi-agent :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ self.possible_agents = possible_agents self.num_agents = len(self.possible_agents) self.models = models self.memories = memories self.observation_spaces = observation_spaces self.action_spaces = action_spaces self.cfg = cfg if cfg is not None else {} self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) # convert the models to their respective device for _models in self.models.values(): for model in _models.values(): if model is not None: model.to(model.device) self.tracking_data = collections.defaultdict(list) self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000) self._track_rewards = collections.deque(maxlen=100) self._track_timesteps = collections.deque(maxlen=100) self._cumulative_rewards = None self._cumulative_timesteps = None self.training = True # checkpoint self.checkpoint_modules = {uid: {} for uid in self.possible_agents} self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000) self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False) self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": True, "modules": {}} # experiment directory directory = self.cfg.get("experiment", {}).get("directory", "") experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "") if not directory: directory = os.path.join(os.getcwd(), "runs") if not experiment_name: experiment_name = f"{datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S-%f')}_{self.__class__.__name__}" self.experiment_dir = os.path.join(directory, experiment_name) def __str__(self) -> str: """Generate a representation of the agent as string :return: Representation of the agent as string :rtype: str """ string = f"Multi-agent: {repr(self)}" for k, v in self.cfg.items(): if type(v) is dict: string += f"\n |-- {k}" for k1, v1 in v.items(): string += f"\n | |-- {k1}: {v1}" else: string += f"\n |-- {k}: {v}" return string def _as_dict(self, _input: Any) -> Mapping[str, Any]: """Convert a configuration value into a dictionary according to the number of agents :param _input: Configuration value :type _input: Any :raises ValueError: The configuration value is a dictionary different from the number of agents :return: Configuration value as a dictionary :rtype: list of any configuration value """ if _input and isinstance(_input, collections.abc.Mapping): if set(_input) < set(self.possible_agents): logger.error("The configuration value does not match possible agents") raise ValueError("The configuration value does not match possible agents") elif set(_input) >= set(self.possible_agents): return _input return {name: copy.deepcopy(_input) for name in self.possible_agents} def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any: """Empty preprocess method This method is defined because PyTorch multiprocessing can't pickle lambdas :param _input: Input to preprocess :type _input: Any :return: Preprocessed input :rtype: Any """ return _input def _get_internal_value(self, _module: Any) -> Any: """Get internal module/variable state/value :param _module: Module or variable :type _module: Any :return: Module/variable state/value :rtype: Any """ return _module.state_dict() if hasattr(_module, "state_dict") else _module def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent This method should be called before the agent is used. It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory :param trainer_cfg: Trainer configuration :type trainer_cfg: dict, optional """ # setup Weights & Biases if self.cfg.get("experiment", {}).get("wandb", False): # save experiment config trainer_cfg = trainer_cfg if trainer_cfg is not None else {} try: models_cfg = {uid: {k: v.net._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents} except AttributeError: models_cfg = {uid: {k: v._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents} config={**self.cfg, **trainer_cfg, **models_cfg} # set default values wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {})) wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1]) wandb_kwargs.setdefault("sync_tensorboard", True) wandb_kwargs.setdefault("config", {}) wandb_kwargs["config"].update(config) # init Weights & Biases import wandb wandb.init(**wandb_kwargs) # main entry to log data for consumption and visualization by TensorBoard if self.write_interval > 0: self.writer = SummaryWriter(log_dir=self.experiment_dir) if self.checkpoint_interval > 0: os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True) def track_data(self, tag: str, value: float) -> None: """Track data to TensorBoard Currently only scalar data are supported :param tag: Data identifier (e.g. 'Loss / policy loss') :type tag: str :param value: Value to track :type value: float """ self.tracking_data[tag].append(value) def write_tracking_data(self, timestep: int, timesteps: int) -> None: """Write tracking data to TensorBoard :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for k, v in self.tracking_data.items(): if k.endswith("(min)"): self.writer.add_scalar(k, np.min(v), timestep) elif k.endswith("(max)"): self.writer.add_scalar(k, np.max(v), timestep) else: self.writer.add_scalar(k, np.mean(v), timestep) # reset data containers for next iteration self._track_rewards.clear() self._track_timesteps.clear() self.tracking_data.clear() def write_checkpoint(self, timestep: int, timesteps: int) -> None: """Write checkpoint (modules) to disk The checkpoints are saved in the directory 'checkpoints' in the experiment directory. The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time. :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f")) # separated modules if self.checkpoint_store_separately: for uid in self.possible_agents: for name, module in self.checkpoint_modules[uid].items(): torch.save(self._get_internal_value(module), os.path.join(self.experiment_dir, "checkpoints", f"{uid}_{name}_{tag}.pt")) # whole agent else: modules = {uid: {name: self._get_internal_value(module) for name, module in self.checkpoint_modules[uid].items()} \ for uid in self.possible_agents} torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pt")) # best modules if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]: # separated modules if self.checkpoint_store_separately: for uid in self.possible_agents: for name in self.checkpoint_modules[uid].keys(): torch.save(self.checkpoint_best_modules["modules"][uid][name], os.path.join(self.experiment_dir, "checkpoints", f"best_{uid}_{name}.pt")) # whole agent else: modules = {uid: {name: self.checkpoint_best_modules["modules"][uid][name] \ for name in self.checkpoint_modules[uid].keys()} for uid in self.possible_agents} torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", "best_agent.pt")) self.checkpoint_best_modules["saved"] = True def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: dictionary of torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Actions :rtype: torch.Tensor """ raise NotImplementedError def record_transition(self, states: Mapping[str, torch.Tensor], actions: Mapping[str, torch.Tensor], rewards: Mapping[str, torch.Tensor], next_states: Mapping[str, torch.Tensor], terminated: Mapping[str, torch.Tensor], truncated: Mapping[str, torch.Tensor], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory (to be implemented by the inheriting classes) Inheriting classes must call this method to record episode information (rewards, timesteps, etc.). In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded. :param states: Observations/states of the environment used to make the decision :type states: dictionary of torch.Tensor :param actions: Actions taken by the agent :type actions: dictionary of torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of torch.Tensor :param next_states: Next observations/states of the environment :type next_states: dictionary of torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of torch.Tensor :param infos: Additional information about the environment :type infos: dictionary of any supported type :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ _rewards = next(iter(rewards.values())) # compute the cumulative sum of the rewards and timesteps if self._cumulative_rewards is None: self._cumulative_rewards = torch.zeros_like(_rewards, dtype=torch.float32) self._cumulative_timesteps = torch.zeros_like(_rewards, dtype=torch.int32) self._cumulative_rewards.add_(_rewards) self._cumulative_timesteps.add_(1) # check ended episodes finished_episodes = (next(iter(terminated.values())) + next(iter(truncated.values()))).nonzero(as_tuple=False) if finished_episodes.numel(): # storage cumulative rewards and timesteps self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist()) self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist()) # reset the cumulative rewards and timesteps self._cumulative_rewards[finished_episodes] = 0 self._cumulative_timesteps[finished_episodes] = 0 # record data if self.write_interval > 0: self.tracking_data["Reward / Instantaneous reward (max)"].append(torch.max(_rewards).item()) self.tracking_data["Reward / Instantaneous reward (min)"].append(torch.min(_rewards).item()) self.tracking_data["Reward / Instantaneous reward (mean)"].append(torch.mean(_rewards).item()) if len(self._track_rewards): track_rewards = np.array(self._track_rewards) track_timesteps = np.array(self._track_timesteps) self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards)) self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards)) self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards)) self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps)) self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps)) self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps)) def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ for _models in self.models.values(): for model in _models.values(): if model is not None: model.set_mode(mode) def set_running_mode(self, mode: str) -> None: """Set the current running mode (training or evaluation) This method sets the value of the ``training`` property (boolean). This property can be used to know if the agent is running in training or evaluation mode. :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ self.training = mode == "train" def save(self, path: str) -> None: """Save the agent to the specified path :param path: Path to save the model to :type path: str """ modules = {uid: {name: self._get_internal_value(module) for name, module in self.checkpoint_modules[uid].items()} \ for uid in self.possible_agents} torch.save(modules, path) def load(self, path: str) -> None: """Load the model from the specified path The final storage device is determined by the constructor of the model :param path: Path to load the model from :type path: str """ modules = torch.load(path, map_location=self.device) if type(modules) is dict: for uid in self.possible_agents: if uid not in modules: logger.warning(f"Cannot load modules for {uid}. The agent doesn't have such an instance") continue for name, data in modules[uid].items(): module = self.checkpoint_modules[uid].get(name, None) if module is not None: if hasattr(module, "load_state_dict"): module.load_state_dict(data) if hasattr(module, "eval"): module.eval() else: raise NotImplementedError else: logger.warning(f"Cannot load the {uid}:{name} module. The agent doesn't have such an instance") def migrate(self, path: str, name_map: Mapping[str, Mapping[str, str]] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal checkpoint to the current agent The final storage device is determined by the constructor of the agent. For ambiguous models (where 2 or more parameters, for source or current model, have equal shape) it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully :param path: Path to the external checkpoint to migrate from :type path: str :param name_map: Name map to use for the migration (default: ``{}``). Keys are the current parameter names and values are the external parameter names :type name_map: Mapping[str, Mapping[str, str]], optional :param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``) :type auto_mapping: bool, optional :param verbose: Show model names and migration (default: ``False``) :type verbose: bool, optional :raises ValueError: If the correct file type cannot be identified from the ``path`` parameter :return: True if the migration was successful, False otherwise. Migration is successful if all parameters of the current model are found in the external model :rtype: bool """ raise NotImplementedError def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ timestep += 1 # update best models and write checkpoints if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval: # update best models reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31)) if reward > self.checkpoint_best_modules["reward"]: self.checkpoint_best_modules["timestep"] = timestep self.checkpoint_best_modules["reward"] = reward self.checkpoint_best_modules["saved"] = False self.checkpoint_best_modules["modules"] = {uid: {k: copy.deepcopy(self._get_internal_value(v)) \ for k, v in self.checkpoint_modules[uid].items()} for uid in self.possible_agents} # write checkpoints self.write_checkpoint(timestep, timesteps) # write to tensorboard if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval: self.write_tracking_data(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes """ raise NotImplementedError
Toni-SM/skrl/skrl/multi_agents/torch/__init__.py
from skrl.multi_agents.torch.base import MultiAgent
Toni-SM/skrl/skrl/multi_agents/torch/mappo/mappo.py
from typing import Any, Mapping, Optional, Sequence, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.multi_agents.torch import MultiAgent from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] MAPPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "shared_state_preprocessor": None, # shared state preprocessor class (see skrl.resources.preprocessors) "shared_state_preprocessor_kwargs": {}, # shared state preprocessor's kwargs (e.g. {"size": env.shared_observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class MAPPO(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None, shared_observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None) -> None: """Multi-Agent Proximal Policy Optimization (MAPPO) https://arxiv.org/abs/2103.01955 :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :param shared_observation_spaces: Shared observation/state space or shape (default: ``None``) :type shared_observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional """ _cfg = copy.deepcopy(MAPPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) self.shared_observation_spaces = shared_observation_spaces # models self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents} self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents} for uid in self.possible_agents: self.checkpoint_modules[uid]["policy"] = self.policies[uid] self.checkpoint_modules[uid]["value"] = self.values[uid] # configuration self._learning_epochs = self._as_dict(self.cfg["learning_epochs"]) self._mini_batches = self._as_dict(self.cfg["mini_batches"]) self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"]) self._ratio_clip = self._as_dict(self.cfg["ratio_clip"]) self._value_clip = self._as_dict(self.cfg["value_clip"]) self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"]) self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"]) self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"]) self._kl_threshold = self._as_dict(self.cfg["kl_threshold"]) self._learning_rate = self._as_dict(self.cfg["learning_rate"]) self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"]) self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"]) self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"]) self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"]) self._shared_state_preprocessor = self._as_dict(self.cfg["shared_state_preprocessor"]) self._shared_state_preprocessor_kwargs = self._as_dict(self.cfg["shared_state_preprocessor_kwargs"]) self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"]) self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"]) self._discount_factor = self._as_dict(self.cfg["discount_factor"]) self._lambda = self._as_dict(self.cfg["lambda"]) self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"]) # set up optimizer and learning rate scheduler self.optimizers = {} self.schedulers = {} for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] if policy is not None and value is not None: if policy is value: optimizer = torch.optim.Adam(policy.parameters(), lr=self._learning_rate[uid]) else: optimizer = torch.optim.Adam(itertools.chain(policy.parameters(), value.parameters()), lr=self._learning_rate[uid]) self.optimizers[uid] = optimizer if self._learning_rate_scheduler[uid] is not None: self.schedulers[uid] = self._learning_rate_scheduler[uid](optimizer, **self._learning_rate_scheduler_kwargs[uid]) self.checkpoint_modules[uid]["optimizer"] = self.optimizers[uid] # set up preprocessors if self._state_preprocessor[uid] is not None: self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid] else: self._state_preprocessor[uid] = self._empty_preprocessor if self._shared_state_preprocessor[uid] is not None: self._shared_state_preprocessor[uid] = self._shared_state_preprocessor[uid](**self._shared_state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["shared_state_preprocessor"] = self._shared_state_preprocessor[uid] else: self._shared_state_preprocessor[uid] = self._empty_preprocessor if self._value_preprocessor[uid] is not None: self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid] else: self._value_preprocessor[uid] = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memories for uid in self.possible_agents: self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="shared_states", size=self.shared_observation_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="rewards", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="terminated", size=1, dtype=torch.bool) self.memories[uid].create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="values", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="returns", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "shared_states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = [] self._current_shared_next_states = [] def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # # sample random actions # # TODO: fix for stochasticity, rnn and log_prob # if timestep < self._random_timesteps: # return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents] actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)} log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)} outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)} self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Mapping[str, torch.Tensor], actions: Mapping[str, torch.Tensor], rewards: Mapping[str, torch.Tensor], next_states: Mapping[str, torch.Tensor], terminated: Mapping[str, torch.Tensor], truncated: Mapping[str, torch.Tensor], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of torch.Tensor :param actions: Actions taken by the agent :type actions: dictionary of torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of torch.Tensor :param next_states: Next observations/states of the environment :type next_states: dictionary of torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of torch.Tensor :param infos: Additional information about the environment :type infos: dictionary of any supported type :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memories: shared_states = infos["shared_states"] self._current_shared_next_states = infos["shared_next_states"] for uid in self.possible_agents: # reward shaping if self._rewards_shaper is not None: rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps) # compute values values, _, _ = self.values[uid].act({"states": self._shared_state_preprocessor[uid](shared_states[uid])}, role="value") values = self._value_preprocessor[uid](values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap[uid]: rewards[uid] += self._discount_factor[uid] * values * truncated[uid] # storage transition in memory self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid], terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values, shared_states=shared_states[uid]) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] memory = self.memories[uid] # compute returns and advantages with torch.no_grad(): value.train(False) last_values, _, _ = value.act({"states": self._shared_state_preprocessor[uid](self._current_shared_next_states[uid].float())}, role="value") value.train(True) last_values = self._value_preprocessor[uid](last_values, inverse=True) values = memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True)) memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True)) memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid]) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs[uid]): kl_divergences = [] # mini-batches loop for sampled_states, sampled_shared_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages \ in sampled_batches: sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch) sampled_shared_states = self._shared_state_preprocessor[uid](sampled_shared_states, train=not epoch) _, next_log_prob, _ = policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]: break # compute entropy loss if self._entropy_loss_scale[uid]: entropy_loss = -self._entropy_loss_scale[uid] * policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip[uid], 1.0 + self._ratio_clip[uid]) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = value.act({"states": sampled_shared_states}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip[uid], max=self._value_clip[uid]) value_loss = self._value_loss_scale[uid] * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizers[uid].zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip[uid] > 0: if policy is value: nn.utils.clip_grad_norm_(policy.parameters(), self._grad_norm_clip[uid]) else: nn.utils.clip_grad_norm_(itertools.chain(policy.parameters(), value.parameters()), self._grad_norm_clip[uid]) self.optimizers[uid].step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale[uid]: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler[uid]: if isinstance(self.schedulers[uid], KLAdaptiveLR): self.schedulers[uid].step(torch.tensor(kl_divergences).mean()) else: self.schedulers[uid].step() # record data self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) if self._entropy_loss_scale: self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Policy / Standard deviation ({uid})", policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler[uid]: self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid].get_last_lr()[0])
Toni-SM/skrl/skrl/multi_agents/torch/mappo/__init__.py
from skrl.multi_agents.torch.mappo.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
Toni-SM/skrl/skrl/multi_agents/torch/ippo/__init__.py
from skrl.multi_agents.torch.ippo.ippo import IPPO, IPPO_DEFAULT_CONFIG
Toni-SM/skrl/skrl/multi_agents/torch/ippo/ippo.py
from typing import Any, Mapping, Optional, Sequence, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.multi_agents.torch import MultiAgent from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] IPPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class IPPO(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Independent Proximal Policy Optimization (IPPO) https://arxiv.org/abs/2011.09533 :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ _cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) # models self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents} self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents} for uid in self.possible_agents: self.checkpoint_modules[uid]["policy"] = self.policies[uid] self.checkpoint_modules[uid]["value"] = self.values[uid] # configuration self._learning_epochs = self._as_dict(self.cfg["learning_epochs"]) self._mini_batches = self._as_dict(self.cfg["mini_batches"]) self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"]) self._ratio_clip = self._as_dict(self.cfg["ratio_clip"]) self._value_clip = self._as_dict(self.cfg["value_clip"]) self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"]) self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"]) self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"]) self._kl_threshold = self._as_dict(self.cfg["kl_threshold"]) self._learning_rate = self._as_dict(self.cfg["learning_rate"]) self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"]) self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"]) self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"]) self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"]) self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"]) self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"]) self._discount_factor = self._as_dict(self.cfg["discount_factor"]) self._lambda = self._as_dict(self.cfg["lambda"]) self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"]) # set up optimizer and learning rate scheduler self.optimizers = {} self.schedulers = {} for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] if policy is not None and value is not None: if policy is value: optimizer = torch.optim.Adam(policy.parameters(), lr=self._learning_rate[uid]) else: optimizer = torch.optim.Adam(itertools.chain(policy.parameters(), value.parameters()), lr=self._learning_rate[uid]) self.optimizers[uid] = optimizer if self._learning_rate_scheduler[uid] is not None: self.schedulers[uid] = self._learning_rate_scheduler[uid](optimizer, **self._learning_rate_scheduler_kwargs[uid]) self.checkpoint_modules[uid]["optimizer"] = self.optimizers[uid] # set up preprocessors if self._state_preprocessor[uid] is not None: self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid] else: self._state_preprocessor[uid] = self._empty_preprocessor if self._value_preprocessor[uid] is not None: self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid] else: self._value_preprocessor[uid] = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memories for uid in self.possible_agents: self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="rewards", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="terminated", size=1, dtype=torch.bool) self.memories[uid].create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="values", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="returns", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = [] self._current_next_states = [] def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # # sample random actions # # TODO: fix for stochasticity, rnn and log_prob # if timestep < self._random_timesteps: # return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents] actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)} log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)} outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)} self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Mapping[str, torch.Tensor], actions: Mapping[str, torch.Tensor], rewards: Mapping[str, torch.Tensor], next_states: Mapping[str, torch.Tensor], terminated: Mapping[str, torch.Tensor], truncated: Mapping[str, torch.Tensor], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of torch.Tensor :param actions: Actions taken by the agent :type actions: dictionary of torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of torch.Tensor :param next_states: Next observations/states of the environment :type next_states: dictionary of torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of torch.Tensor :param infos: Additional information about the environment :type infos: dictionary of any supported type :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memories: self._current_next_states = next_states for uid in self.possible_agents: # reward shaping if self._rewards_shaper is not None: rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps) # compute values values, _, _ = self.values[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="value") values = self._value_preprocessor[uid](values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap[uid]: rewards[uid] += self._discount_factor[uid] * values * truncated[uid] # storage transition in memory self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid], terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] memory = self.memories[uid] # compute returns and advantages with torch.no_grad(): value.train(False) last_values, _, _ = value.act({"states": self._state_preprocessor[uid](self._current_next_states[uid].float())}, role="value") value.train(True) last_values = self._value_preprocessor[uid](last_values, inverse=True) values = memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True)) memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True)) memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid]) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs[uid]): kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch) _, next_log_prob, _ = policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]: break # compute entropy loss if self._entropy_loss_scale[uid]: entropy_loss = -self._entropy_loss_scale[uid] * policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip[uid], 1.0 + self._ratio_clip[uid]) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = value.act({"states": sampled_states}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip[uid], max=self._value_clip[uid]) value_loss = self._value_loss_scale[uid] * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizers[uid].zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip[uid] > 0: if policy is value: nn.utils.clip_grad_norm_(policy.parameters(), self._grad_norm_clip[uid]) else: nn.utils.clip_grad_norm_(itertools.chain(policy.parameters(), value.parameters()), self._grad_norm_clip[uid]) self.optimizers[uid].step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale[uid]: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler[uid]: if isinstance(self.schedulers[uid], KLAdaptiveLR): self.schedulers[uid].step(torch.tensor(kl_divergences).mean()) else: self.schedulers[uid].step() # record data self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) if self._entropy_loss_scale: self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Policy / Standard deviation ({uid})", policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler[uid]: self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid].get_last_lr()[0])
Toni-SM/skrl/skrl/multi_agents/jax/base.py
from typing import Any, Mapping, Optional, Sequence, Union import collections import copy import datetime import os import pickle import gym import gymnasium import flax import jax import numpy as np from skrl import config, logger from skrl.memories.jax import Memory from skrl.models.jax import Model class MultiAgent: def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Mapping[str, Model]], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None, action_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Base class that represent a RL multi-agent :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.jax.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.jax.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict """ self._jax = config.jax.backend == "jax" self.possible_agents = possible_agents self.num_agents = len(self.possible_agents) self.models = models self.memories = memories self.observation_spaces = observation_spaces self.action_spaces = action_spaces self.cfg = cfg if cfg is not None else {} if device is None: self.device = jax.devices()[0] else: self.device = device if isinstance(device, jax.Device) else jax.devices(device)[0] # convert the models to their respective device for _models in self.models.values(): for model in _models.values(): if model is not None: pass self.tracking_data = collections.defaultdict(list) self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000) self._track_rewards = collections.deque(maxlen=100) self._track_timesteps = collections.deque(maxlen=100) self._cumulative_rewards = None self._cumulative_timesteps = None self.training = True # checkpoint self.checkpoint_modules = {uid: {} for uid in self.possible_agents} self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000) self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False) self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": True, "modules": {}} # experiment directory directory = self.cfg.get("experiment", {}).get("directory", "") experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "") if not directory: directory = os.path.join(os.getcwd(), "runs") if not experiment_name: experiment_name = f"{datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S-%f')}_{self.__class__.__name__}" self.experiment_dir = os.path.join(directory, experiment_name) def __str__(self) -> str: """Generate a representation of the agent as string :return: Representation of the agent as string :rtype: str """ string = f"Multi-agent: {repr(self)}" for k, v in self.cfg.items(): if type(v) is dict: string += f"\n |-- {k}" for k1, v1 in v.items(): string += f"\n | |-- {k1}: {v1}" else: string += f"\n |-- {k}: {v}" return string def _as_dict(self, _input: Any) -> Mapping[str, Any]: """Convert a configuration value into a dictionary according to the number of agents :param _input: Configuration value :type _input: Any :raises ValueError: The configuration value is a dictionary different from the number of agents :return: Configuration value as a dictionary :rtype: list of any configuration value """ if _input and isinstance(_input, collections.abc.Mapping): if set(_input) < set(self.possible_agents): logger.error("The configuration value does not match possible agents") raise ValueError("The configuration value does not match possible agents") elif set(_input) >= set(self.possible_agents): return _input try: return {name: copy.deepcopy(_input) for name in self.possible_agents} except TypeError: return {name: _input for name in self.possible_agents} def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any: """Empty preprocess method This method is defined because PyTorch multiprocessing can't pickle lambdas :param _input: Input to preprocess :type _input: Any :return: Preprocessed input :rtype: Any """ return _input def _get_internal_value(self, _module: Any) -> Any: """Get internal module/variable state/value :param _module: Module or variable :type _module: Any :return: Module/variable state/value :rtype: Any """ return _module.state_dict.params if hasattr(_module, "state_dict") else _module def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent This method should be called before the agent is used. It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory :param trainer_cfg: Trainer configuration :type trainer_cfg: dict, optional """ # setup Weights & Biases if self.cfg.get("experiment", {}).get("wandb", False): # save experiment config trainer_cfg = trainer_cfg if trainer_cfg is not None else {} try: models_cfg = {uid: {k: v.net._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents} except AttributeError: models_cfg = {uid: {k: v._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents} config={**self.cfg, **trainer_cfg, **models_cfg} # set default values wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {})) wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1]) wandb_kwargs.setdefault("sync_tensorboard", True) wandb_kwargs.setdefault("config", {}) wandb_kwargs["config"].update(config) # init Weights & Biases import wandb wandb.init(**wandb_kwargs) # main entry to log data for consumption and visualization by TensorBoard if self.write_interval > 0: self.writer = None # tensorboard via torch SummaryWriter try: from torch.utils.tensorboard import SummaryWriter self.writer = SummaryWriter(log_dir=self.experiment_dir) except ImportError as e: pass # tensorboard via tensorflow if self.writer is None: try: import tensorflow class _SummaryWriter: def __init__(self, log_dir): self.writer = tensorflow.summary.create_file_writer(logdir=log_dir) def add_scalar(self, tag, value, step): with self.writer.as_default(): tensorflow.summary.scalar(tag, value, step=step) self.writer = _SummaryWriter(log_dir=self.experiment_dir) except ImportError as e: pass # tensorboard via tensorboardX if self.writer is None: try: import tensorboardX self.writer = tensorboardX.SummaryWriter(log_dir=self.experiment_dir) except ImportError as e: pass # show warnings and exit if self.writer is None: logger.warning("No package found to write events to Tensorboard.") logger.warning("Set agent's `write_interval` setting to 0 to disable writing") logger.warning("or install one of the following packages:") logger.warning(" - PyTorch: https://pytorch.org/get-started/locally") logger.warning(" - TensorFlow: https://www.tensorflow.org/install") logger.warning(" - TensorboardX: https://github.com/lanpa/tensorboardX#install") logger.warning("The current running process will be terminated.") exit() if self.checkpoint_interval > 0: os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True) def track_data(self, tag: str, value: float) -> None: """Track data to TensorBoard Currently only scalar data are supported :param tag: Data identifier (e.g. 'Loss / policy loss') :type tag: str :param value: Value to track :type value: float """ self.tracking_data[tag].append(value) def write_tracking_data(self, timestep: int, timesteps: int) -> None: """Write tracking data to TensorBoard :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for k, v in self.tracking_data.items(): if k.endswith("(min)"): self.writer.add_scalar(k, np.min(v), timestep) elif k.endswith("(max)"): self.writer.add_scalar(k, np.max(v), timestep) else: self.writer.add_scalar(k, np.mean(v), timestep) # reset data containers for next iteration self._track_rewards.clear() self._track_timesteps.clear() self.tracking_data.clear() def write_checkpoint(self, timestep: int, timesteps: int) -> None: """Write checkpoint (modules) to disk The checkpoints are saved in the directory 'checkpoints' in the experiment directory. The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time. :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f")) # separated modules if self.checkpoint_store_separately: for uid in self.possible_agents: for name, module in self.checkpoint_modules[uid].items(): with open(os.path.join(self.experiment_dir, "checkpoints", f"{uid}_{name}_{tag}.pickle"), "wb") as file: pickle.dump(flax.serialization.to_bytes(self._get_internal_value(module)), file, protocol=4) # whole agent else: modules = {uid: {name: flax.serialization.to_bytes(self._get_internal_value(module)) for name, module in self.checkpoint_modules[uid].items()} \ for uid in self.possible_agents} with open(os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pickle"), "wb") as file: pickle.dump(modules, file, protocol=4) # best modules if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]: # separated modules if self.checkpoint_store_separately: for uid in self.possible_agents: for name, module in self.checkpoint_modules.items(): with open(os.path.join(self.experiment_dir, "checkpoints", f"best_{uid}_{name}.pickle"), "wb") as file: pickle.dump(flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][uid][name]), file, protocol=4) # whole agent else: modules = {uid: {name: flax.serialization.to_bytes(self.checkpoint_best_modules["modules"][uid][name]) \ for name in self.checkpoint_modules[uid].keys()} for uid in self.possible_agents} with open(os.path.join(self.experiment_dir, "checkpoints", "best_agent.pickle"), "wb") as file: pickle.dump(modules, file, protocol=4) self.checkpoint_best_modules["saved"] = True def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: dictionary of np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Actions :rtype: np.ndarray or jax.Array """ raise NotImplementedError def record_transition(self, states: Mapping[str, Union[np.ndarray, jax.Array]], actions: Mapping[str, Union[np.ndarray, jax.Array]], rewards: Mapping[str, Union[np.ndarray, jax.Array]], next_states: Mapping[str, Union[np.ndarray, jax.Array]], terminated: Mapping[str, Union[np.ndarray, jax.Array]], truncated: Mapping[str, Union[np.ndarray, jax.Array]], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory (to be implemented by the inheriting classes) Inheriting classes must call this method to record episode information (rewards, timesteps, etc.). In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded. :param states: Observations/states of the environment used to make the decision :type states: dictionary of np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: dictionary of np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: dictionary of np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: dictionary of any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ if self.write_interval > 0: _rewards = next(iter(rewards.values())) _terminated = next(iter(terminated.values())) _truncated = next(iter(truncated.values())) # compute the cumulative sum of the rewards and timesteps if self._cumulative_rewards is None: self._cumulative_rewards = np.zeros_like(_rewards, dtype=np.float32) self._cumulative_timesteps = np.zeros_like(_rewards, dtype=np.int32) # TODO: find a better way to avoid https://jax.readthedocs.io/en/latest/errors.html#jax.errors.ConcretizationTypeError if self._jax: _rewards = jax.device_get(_rewards) _terminated = jax.device_get(_terminated) _truncated = jax.device_get(_truncated) self._cumulative_rewards += _rewards self._cumulative_timesteps += 1 # check ended episodes finished_episodes = (_terminated + _truncated).nonzero()[0] if finished_episodes.size: # storage cumulative rewards and timesteps self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist()) self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist()) # reset the cumulative rewards and timesteps self._cumulative_rewards[finished_episodes] = 0 self._cumulative_timesteps[finished_episodes] = 0 # record data self.tracking_data["Reward / Instantaneous reward (max)"].append(np.max(_rewards).item()) self.tracking_data["Reward / Instantaneous reward (min)"].append(np.min(_rewards).item()) self.tracking_data["Reward / Instantaneous reward (mean)"].append(np.mean(_rewards).item()) if len(self._track_rewards): track_rewards = np.array(self._track_rewards) track_timesteps = np.array(self._track_timesteps) self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards)) self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards)) self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards)) self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps)) self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps)) self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps)) def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ for _models in self.models.values(): for model in _models.values(): if model is not None: model.set_mode(mode) def set_running_mode(self, mode: str) -> None: """Set the current running mode (training or evaluation) This method sets the value of the ``training`` property (boolean). This property can be used to know if the agent is running in training or evaluation mode. :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ self.training = mode == "train" def save(self, path: str) -> None: """Save the agent to the specified path :param path: Path to save the model to :type path: str """ modules = {uid: {name: flax.serialization.to_bytes(self._get_internal_value(module)) \ for name, module in self.checkpoint_modules[uid].items()} for uid in self.possible_agents} # HACK: Does it make sense to use https://github.com/google/orbax # file.write(flax.serialization.to_bytes(modules)) with open(path, "wb") as file: pickle.dump(modules, file, protocol=4) def load(self, path: str) -> None: """Load the model from the specified path :param path: Path to load the model from :type path: str """ with open(path, "rb") as file: modules = pickle.load(file) if type(modules) is dict: for uid in self.possible_agents: if uid not in modules: logger.warning(f"Cannot load modules for {uid}. The agent doesn't have such an instance") continue for name, data in modules[uid].items(): module = self.checkpoint_modules[uid].get(name, None) if module is not None: if hasattr(module, "load_state_dict"): params = flax.serialization.from_bytes(module.state_dict.params, data) module.state_dict = module.state_dict.replace(params=params) else: pass # TODO:raise NotImplementedError else: logger.warning(f"Cannot load the {uid}:{name} module. The agent doesn't have such an instance") def migrate(self, path: str, name_map: Mapping[str, Mapping[str, str]] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal checkpoint to the current agent :raises NotImplementedError: Not yet implemented """ raise NotImplementedError def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ timestep += 1 # update best models and write checkpoints if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval: # update best models reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31)) if reward > self.checkpoint_best_modules["reward"]: self.checkpoint_best_modules["timestep"] = timestep self.checkpoint_best_modules["reward"] = reward self.checkpoint_best_modules["saved"] = False self.checkpoint_best_modules["modules"] = {uid: {k: copy.deepcopy(self._get_internal_value(v)) \ for k, v in self.checkpoint_modules[uid].items()} for uid in self.possible_agents} # write checkpoints self.write_checkpoint(timestep, timesteps) # write to tensorboard if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval: self.write_tracking_data(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes """ raise NotImplementedError
Toni-SM/skrl/skrl/multi_agents/jax/__init__.py
from skrl.multi_agents.jax.base import MultiAgent
Toni-SM/skrl/skrl/multi_agents/jax/mappo/mappo.py
from typing import Any, Mapping, Optional, Sequence, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.multi_agents.jax import MultiAgent from skrl.resources.optimizers.jax import Adam from skrl.resources.schedulers.jax import KLAdaptiveLR # [start-config-dict-jax] MAPPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "shared_state_preprocessor": None, # shared state preprocessor class (see skrl.resources.preprocessors) "shared_state_preprocessor_kwargs": {}, # shared state preprocessor's kwargs (e.g. {"size": env.shared_observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] def compute_gae(rewards: np.ndarray, dones: np.ndarray, values: np.ndarray, next_values: np.ndarray, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> np.ndarray: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: np.ndarray :param dones: Signals to indicate that episodes have ended :type dones: np.ndarray :param values: Values obtained by the agent :type values: np.ndarray :param next_values: Next values obtained by the agent :type next_values: np.ndarray :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: np.ndarray """ advantage = 0 advantages = np.zeros_like(rewards) not_dones = np.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _compute_gae(rewards: jax.Array, dones: jax.Array, values: jax.Array, next_values: jax.Array, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> jax.Array: advantage = 0 advantages = jnp.zeros_like(rewards) not_dones = jnp.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages = advantages.at[i].set(advantage) # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages @functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale")) def _update_policy(policy_act, policy_state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, ratio_clip, get_entropy, entropy_loss_scale): # compute policy loss def _policy_loss(params): _, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params) # compute approximate KL divergence ratio = next_log_prob - sampled_log_prob kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean() # compute policy loss ratio = jnp.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip) # compute entropy loss entropy_loss = 0 if entropy_loss_scale: entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean() return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"]) (policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params) return grad, policy_loss, entropy_loss, kl_divergence, stddev @functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values")) def _update_value(value_act, value_state_dict, sampled_states, sampled_values, sampled_returns, value_loss_scale, clip_predicted_values, value_clip): # compute value loss def _value_loss(params): predicted_values, _, _ = value_act({"states": sampled_states}, "value", params) if clip_predicted_values: predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip) return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean() value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params) return grad, value_loss class MAPPO(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None, shared_observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None) -> None: """Multi-Agent Proximal Policy Optimization (MAPPO) https://arxiv.org/abs/2103.01955 :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.jax.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.jax.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :param shared_observation_spaces: Shared observation/state space or shape (default: ``None``) :type shared_observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional """ # _cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = MAPPO_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) self.shared_observation_spaces = shared_observation_spaces # models self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents} self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents} for uid in self.possible_agents: self.checkpoint_modules[uid]["policy"] = self.policies[uid] self.checkpoint_modules[uid]["value"] = self.values[uid] # configuration self._learning_epochs = self._as_dict(self.cfg["learning_epochs"]) self._mini_batches = self._as_dict(self.cfg["mini_batches"]) self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"]) self._ratio_clip = self._as_dict(self.cfg["ratio_clip"]) self._value_clip = self._as_dict(self.cfg["value_clip"]) self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"]) self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"]) self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"]) self._kl_threshold = self._as_dict(self.cfg["kl_threshold"]) self._learning_rate = self._as_dict(self.cfg["learning_rate"]) self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"]) self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"]) self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"]) self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"]) self._shared_state_preprocessor = self._as_dict(self.cfg["shared_state_preprocessor"]) self._shared_state_preprocessor_kwargs = self._as_dict(self.cfg["shared_state_preprocessor_kwargs"]) self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"]) self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"]) self._discount_factor = self._as_dict(self.cfg["discount_factor"]) self._lambda = self._as_dict(self.cfg["lambda"]) self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"]) # set up optimizer and learning rate scheduler self.policy_optimizer = {} self.value_optimizer = {} self.schedulers = {} for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] if policy is not None and value is not None: # scheduler scale = True self.schedulers[uid] = None if self._learning_rate_scheduler[uid] is not None: if self._learning_rate_scheduler[uid] == KLAdaptiveLR: scale = False self.schedulers[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid]) else: self._learning_rate[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid]) # optimizer self.policy_optimizer[uid] = Adam(model=policy, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale) self.value_optimizer[uid] = Adam(model=value, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale) self.checkpoint_modules[uid]["policy_optimizer"] = self.policy_optimizer[uid] self.checkpoint_modules[uid]["value_optimizer"] = self.value_optimizer[uid] # set up preprocessors if self._state_preprocessor[uid] is not None: self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid] else: self._state_preprocessor[uid] = self._empty_preprocessor if self._shared_state_preprocessor[uid] is not None: self._shared_state_preprocessor[uid] = self._shared_state_preprocessor[uid](**self._shared_state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["shared_state_preprocessor"] = self._shared_state_preprocessor[uid] else: self._shared_state_preprocessor[uid] = self._empty_preprocessor if self._value_preprocessor[uid] is not None: self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid] else: self._value_preprocessor[uid] = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memories for uid in self.possible_agents: self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="shared_states", size=self.shared_observation_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="terminated", size=1, dtype=jnp.int8) self.memories[uid].create_tensor(name="log_prob", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="values", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="returns", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="advantages", size=1, dtype=jnp.float32) # tensors sampled during training self._tensors_names = ["states", "shared_states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = [] self._current_shared_next_states = [] # set up models for just-in-time compilation with XLA for uid in self.possible_agents: self.policies[uid].apply = jax.jit(self.policies[uid].apply, static_argnums=2) if self.values[uid] is not None: self.values[uid].apply = jax.jit(self.values[uid].apply, static_argnums=2) def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # # sample random actions # # TODO: fix for stochasticity, rnn and log_prob # if timestep < self._random_timesteps: # return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents] actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)} log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)} outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)} if not self._jax: # numpy backend actions = {jax.device_get(_actions) for _actions in actions} log_prob = {jax.device_get(_log_prob) for _log_prob in log_prob} self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Mapping[str, Union[np.ndarray, jax.Array]], actions: Mapping[str, Union[np.ndarray, jax.Array]], rewards: Mapping[str, Union[np.ndarray, jax.Array]], next_states: Mapping[str, Union[np.ndarray, jax.Array]], terminated: Mapping[str, Union[np.ndarray, jax.Array]], truncated: Mapping[str, Union[np.ndarray, jax.Array]], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: dictionary of np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: dictionary of np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: dictionary of any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memories: shared_states = infos["shared_states"] self._current_shared_next_states = infos["shared_next_states"] for uid in self.possible_agents: # reward shaping if self._rewards_shaper is not None: rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps) # compute values values, _, _ = self.values[uid].act({"states": self._shared_state_preprocessor[uid](shared_states[uid])}, role="value") if not self._jax: # numpy backend values = jax.device_get(values) values = self._value_preprocessor[uid](values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap[uid]: rewards[uid] += self._discount_factor[uid] * values * truncated[uid] # storage transition in memory self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid], terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values, shared_states=shared_states[uid]) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] memory = self.memories[uid] # compute returns and advantages value.training = False last_values, _, _ = value.act({"states": self._shared_state_preprocessor[uid](self._current_shared_next_states[uid])}, role="value") # TODO: .float() value.training = True if not self._jax: # numpy backend last_values = jax.device_get(last_values) last_values = self._value_preprocessor[uid](last_values, inverse=True) values = memory.get_tensor_by_name("values") if self._jax: returns, advantages = _compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) else: returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True)) memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True)) memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid]) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs[uid]): kl_divergences = [] # mini-batches loop for sampled_states, sampled_shared_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages \ in sampled_batches: sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch) sampled_shared_states = self._shared_state_preprocessor[uid](sampled_shared_states, train=not epoch) # compute policy loss grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(policy.act, policy.state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, self._ratio_clip[uid], policy.get_entropy, self._entropy_loss_scale[uid]) kl_divergences.append(kl_divergence.item()) # early stopping with KL divergence if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]: break # optimization step (policy) self.policy_optimizer[uid] = self.policy_optimizer[uid].step(grad, policy, self.schedulers[uid]._lr if self.schedulers[uid] else None) # compute value loss grad, value_loss = _update_value(value.act, value.state_dict, sampled_shared_states, sampled_values, sampled_returns, self._value_loss_scale[uid], self._clip_predicted_values[uid], self._value_clip[uid]) # optimization step (value) self.value_optimizer[uid] = self.value_optimizer[uid].step(grad, value, self.schedulers[uid]._lr if self.schedulers[uid] else None) # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale[uid]: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler[uid]: if isinstance(self.schedulers[uid], KLAdaptiveLR): self.schedulers[uid].step(np.mean(kl_divergences)) # record data self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) if self._entropy_loss_scale: self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Policy / Standard deviation ({uid})", stddev.mean().item()) if self._learning_rate_scheduler[uid]: self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid]._lr)
Toni-SM/skrl/skrl/multi_agents/jax/mappo/__init__.py
from skrl.multi_agents.jax.mappo.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
Toni-SM/skrl/skrl/multi_agents/jax/ippo/__init__.py
from skrl.multi_agents.jax.ippo.ippo import IPPO, IPPO_DEFAULT_CONFIG
Toni-SM/skrl/skrl/multi_agents/jax/ippo/ippo.py
from typing import Any, Mapping, Optional, Sequence, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.multi_agents.jax import MultiAgent from skrl.resources.optimizers.jax import Adam from skrl.resources.schedulers.jax import KLAdaptiveLR # [start-config-dict-jax] IPPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] def compute_gae(rewards: np.ndarray, dones: np.ndarray, values: np.ndarray, next_values: np.ndarray, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> np.ndarray: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: np.ndarray :param dones: Signals to indicate that episodes have ended :type dones: np.ndarray :param values: Values obtained by the agent :type values: np.ndarray :param next_values: Next values obtained by the agent :type next_values: np.ndarray :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: np.ndarray """ advantage = 0 advantages = np.zeros_like(rewards) not_dones = np.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _compute_gae(rewards: jax.Array, dones: jax.Array, values: jax.Array, next_values: jax.Array, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> jax.Array: advantage = 0 advantages = jnp.zeros_like(rewards) not_dones = jnp.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages = advantages.at[i].set(advantage) # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages @functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale")) def _update_policy(policy_act, policy_state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, ratio_clip, get_entropy, entropy_loss_scale): # compute policy loss def _policy_loss(params): _, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params) # compute approximate KL divergence ratio = next_log_prob - sampled_log_prob kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean() # compute policy loss ratio = jnp.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip) # compute entropy loss entropy_loss = 0 if entropy_loss_scale: entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean() return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"]) (policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params) return grad, policy_loss, entropy_loss, kl_divergence, stddev @functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values")) def _update_value(value_act, value_state_dict, sampled_states, sampled_values, sampled_returns, value_loss_scale, clip_predicted_values, value_clip): # compute value loss def _value_loss(params): predicted_values, _, _ = value_act({"states": sampled_states}, "value", params) if clip_predicted_values: predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip) return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean() value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params) return grad, value_loss class IPPO(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None) -> None: """Independent Proximal Policy Optimization (IPPO) https://arxiv.org/abs/2011.09533 :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.jax.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.jax.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict """ # _cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = IPPO_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) # models self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents} self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents} for uid in self.possible_agents: self.checkpoint_modules[uid]["policy"] = self.policies[uid] self.checkpoint_modules[uid]["value"] = self.values[uid] # configuration self._learning_epochs = self._as_dict(self.cfg["learning_epochs"]) self._mini_batches = self._as_dict(self.cfg["mini_batches"]) self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"]) self._ratio_clip = self._as_dict(self.cfg["ratio_clip"]) self._value_clip = self._as_dict(self.cfg["value_clip"]) self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"]) self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"]) self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"]) self._kl_threshold = self._as_dict(self.cfg["kl_threshold"]) self._learning_rate = self._as_dict(self.cfg["learning_rate"]) self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"]) self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"]) self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"]) self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"]) self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"]) self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"]) self._discount_factor = self._as_dict(self.cfg["discount_factor"]) self._lambda = self._as_dict(self.cfg["lambda"]) self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"]) # set up optimizer and learning rate scheduler self.policy_optimizer = {} self.value_optimizer = {} self.schedulers = {} for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] if policy is not None and value is not None: # scheduler scale = True self.schedulers[uid] = None if self._learning_rate_scheduler[uid] is not None: if self._learning_rate_scheduler[uid] == KLAdaptiveLR: scale = False self.schedulers[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid]) else: self._learning_rate[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid]) # optimizer self.policy_optimizer[uid] = Adam(model=policy, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale) self.value_optimizer[uid] = Adam(model=value, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale) self.checkpoint_modules[uid]["policy_optimizer"] = self.policy_optimizer[uid] self.checkpoint_modules[uid]["value_optimizer"] = self.value_optimizer[uid] # set up preprocessors if self._state_preprocessor[uid] is not None: self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid] else: self._state_preprocessor[uid] = self._empty_preprocessor if self._value_preprocessor[uid] is not None: self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid] else: self._value_preprocessor[uid] = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memories for uid in self.possible_agents: self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="terminated", size=1, dtype=jnp.int8) self.memories[uid].create_tensor(name="log_prob", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="values", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="returns", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="advantages", size=1, dtype=jnp.float32) # tensors sampled during training self._tensors_names = ["states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = [] self._current_next_states = [] # set up models for just-in-time compilation with XLA for uid in self.possible_agents: self.policies[uid].apply = jax.jit(self.policies[uid].apply, static_argnums=2) if self.values[uid] is not None: self.values[uid].apply = jax.jit(self.values[uid].apply, static_argnums=2) def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # # sample random actions # # TODO: fix for stochasticity, rnn and log_prob # if timestep < self._random_timesteps: # return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents] actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)} log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)} outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)} if not self._jax: # numpy backend actions = {jax.device_get(_actions) for _actions in actions} log_prob = {jax.device_get(_log_prob) for _log_prob in log_prob} self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Mapping[str, Union[np.ndarray, jax.Array]], actions: Mapping[str, Union[np.ndarray, jax.Array]], rewards: Mapping[str, Union[np.ndarray, jax.Array]], next_states: Mapping[str, Union[np.ndarray, jax.Array]], terminated: Mapping[str, Union[np.ndarray, jax.Array]], truncated: Mapping[str, Union[np.ndarray, jax.Array]], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: dictionary of np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: dictionary of np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: dictionary of any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memories: self._current_next_states = next_states for uid in self.possible_agents: # reward shaping if self._rewards_shaper is not None: rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps) # compute values values, _, _ = self.values[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="value") if not self._jax: # numpy backend values = jax.device_get(values) values = self._value_preprocessor[uid](values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap[uid]: rewards[uid] += self._discount_factor[uid] * values * truncated[uid] # storage transition in memory self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid], terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] memory = self.memories[uid] # compute returns and advantages value.training = False last_values, _, _ = value.act({"states": self._state_preprocessor[uid](self._current_next_states[uid])}, role="value") # TODO: .float() value.training = True if not self._jax: # numpy backend last_values = jax.device_get(last_values) last_values = self._value_preprocessor[uid](last_values, inverse=True) values = memory.get_tensor_by_name("values") if self._jax: returns, advantages = _compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) else: returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True)) memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True)) memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid]) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs[uid]): kl_divergences = [] # mini-batches loop for sampled_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages in sampled_batches: sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch) # compute policy loss grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(policy.act, policy.state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, self._ratio_clip[uid], policy.get_entropy, self._entropy_loss_scale[uid]) kl_divergences.append(kl_divergence.item()) # early stopping with KL divergence if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]: break # optimization step (policy) self.policy_optimizer[uid] = self.policy_optimizer[uid].step(grad, policy, self.schedulers[uid]._lr if self.schedulers[uid] else None) # compute value loss grad, value_loss = _update_value(value.act, value.state_dict, sampled_states, sampled_values, sampled_returns, self._value_loss_scale[uid], self._clip_predicted_values[uid], self._value_clip[uid]) # optimization step (value) self.value_optimizer[uid] = self.value_optimizer[uid].step(grad, value, self.schedulers[uid]._lr if self.schedulers[uid] else None) # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale[uid]: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler[uid]: if isinstance(self.schedulers[uid], KLAdaptiveLR): self.schedulers[uid].step(np.mean(kl_divergences)) # record data self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) if self._entropy_loss_scale: self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Policy / Standard deviation ({uid})", stddev.mean().item()) if self._learning_rate_scheduler[uid]: self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid]._lr)
Toni-SM/skrl/skrl/utils/control.py
import isaacgym.torch_utils as torch_utils import torch def ik(jacobian_end_effector, current_position, current_orientation, goal_position, goal_orientation, damping_factor=0.05): """ Damped Least Squares method: https://www.math.ucsd.edu/~sbuss/ResearchWeb/ikmethods/iksurvey.pdf """ # compute position and orientation error position_error = goal_position - current_position q_r = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation)) orientation_error = q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1) dpose = torch.cat([position_error, orientation_error], -1).unsqueeze(-1) # solve damped least squares (dO = J.T * V) transpose = torch.transpose(jacobian_end_effector, 1, 2) lmbda = torch.eye(6).to(jacobian_end_effector.device) * (damping_factor ** 2) return (transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ dpose) def osc(jacobian_end_effector, mass_matrix, current_position, current_orientation, goal_position, goal_orientation, current_dof_velocities, kp=5, kv=2): """ https://studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/ """ mass_matrix_end_effector = torch.inverse(jacobian_end_effector @ torch.inverse(mass_matrix) @ torch.transpose(jacobian_end_effector, 1, 2)) # compute position and orientation error position_error = kp * (goal_position - current_position) q_r = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation)) orientation_error = q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1) dpose = torch.cat([position_error, orientation_error], -1) return torch.transpose(jacobian_end_effector, 1, 2) @ mass_matrix_end_effector @ (kp * dpose).unsqueeze(-1) - kv * mass_matrix @ current_dof_velocities
Toni-SM/skrl/skrl/utils/huggingface.py
from skrl import __version__, logger def download_model_from_huggingface(repo_id: str, filename: str = "agent.pt") -> str: """Download a model from Hugging Face Hub :param repo_id: Hugging Face user or organization name and a repo name separated by a ``/`` :type repo_id: str :param filename: The name of the model file in the repo (default: ``"agent.pt"``) :type filename: str, optional :raises ImportError: The Hugging Face Hub package (huggingface-hub) is not installed :raises huggingface_hub.utils._errors.HfHubHTTPError: Any HTTP error raised in Hugging Face Hub :return: Local path of file or if networking is off, last version of file cached on disk :rtype: str Example:: # download trained agent from the skrl organization (https://huggingface.co/skrl) >>> from skrl.utils.huggingface import download_model_from_huggingface >>> download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO") '/home/user/.cache/huggingface/hub/models--skrl--OmniIsaacGymEnvs-Cartpole-PPO/snapshots/892e629903de6bf3ef102ae760406a5dd0f6f873/agent.pt' # download model (e.g. "policy.pth") from another user/organization (e.g. "org/ddpg-Pendulum-v1") >>> from skrl.utils.huggingface import download_model_from_huggingface >>> download_model_from_huggingface("org/ddpg-Pendulum-v1", "policy.pth") '/home/user/.cache/huggingface/hub/models--org--ddpg-Pendulum-v1/snapshots/b44ee96f93ff2e296156b002a2ca4646e197ba32/policy.pth' """ logger.info(f"Downloading model from Hugging Face Hub: {repo_id}/{filename}") try: import huggingface_hub except ImportError: logger.error("Hugging Face Hub package is not installed. Use 'pip install huggingface-hub' to install it") huggingface_hub = None if huggingface_hub is None: raise ImportError("Hugging Face Hub package is not installed. Use 'pip install huggingface-hub' to install it") # download and cache the model from Hugging Face Hub downloaded_model_file = huggingface_hub.hf_hub_download(repo_id=repo_id, filename=filename, library_name="skrl", library_version=__version__) return downloaded_model_file
Toni-SM/skrl/skrl/utils/postprocessing.py
from typing import List, Tuple, Union import collections import csv import glob import os import numpy as np import torch class MemoryFileIterator(): def __init__(self, pathname: str) -> None: """Python iterator for loading data from exported memories The iterator will load the next memory file in the list of path names. The output of the iterator is a tuple of the filename and the memory data where the memory data is a dictionary of torch.Tensor (PyTorch), numpy.ndarray (NumPy) or lists (CSV) depending on the format and the keys of the dictionary are the names of the variables Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) Expected output shapes: - PyTorch: (memory_size, num_envs, data_size) - NumPy: (memory_size, num_envs, data_size) - Comma-separated values: (memory_size * num_envs, data_size) :param pathname: String containing a path specification for the exported memories. Python `glob <https://docs.python.org/3/library/glob.html#glob.glob>`_ method is used to find all files matching the path specification :type pathname: str """ self.n = 0 self.file_paths = sorted(glob.glob(pathname)) def __iter__(self) -> 'MemoryFileIterator': """Return self to make iterable""" return self def __next__(self) -> Tuple[str, dict]: """Return next batch :return: Tuple of file name and data :rtype: tuple """ if self.n >= len(self.file_paths): raise StopIteration if self.file_paths[self.n].endswith(".pt"): return self._format_torch() elif self.file_paths[self.n].endswith(".npz"): return self._format_numpy() elif self.file_paths[self.n].endswith(".csv"): return self._format_csv() else: raise ValueError(f"Unsupported format for {self.file_paths[self.n]}. Available formats: .pt, .csv, .npz") def _format_numpy(self) -> Tuple[str, dict]: """Load numpy array from file :return: Tuple of file name and data :rtype: tuple """ filename = os.path.basename(self.file_paths[self.n]) data = np.load(self.file_paths[self.n]) self.n += 1 return filename, data def _format_torch(self) -> Tuple[str, dict]: """Load PyTorch tensor from file :return: Tuple of file name and data :rtype: tuple """ filename = os.path.basename(self.file_paths[self.n]) data = torch.load(self.file_paths[self.n]) self.n += 1 return filename, data def _format_csv(self) -> Tuple[str, dict]: """Load CSV file from file :return: Tuple of file name and data :rtype: tuple """ filename = os.path.basename(self.file_paths[self.n]) with open(self.file_paths[self.n], 'r') as f: reader = csv.reader(f) # parse header try: header = next(reader, None) data = collections.defaultdict(int) for h in header: h.split(".")[1] # check header format data[h.split(".")[0]] += 1 names = sorted(list(data.keys())) sizes = [data[name] for name in names] indexes = [(low, high) for low, high in zip(np.cumsum(sizes) - np.array(sizes), np.cumsum(sizes))] except: self.n += 1 return filename, {} # parse data data = {name: [] for name in names} for row in reader: for name, index in zip(names, indexes): data[name].append([float(item) if item not in ["True", "False"] else bool(item) \ for item in row[index[0]:index[1]]]) self.n += 1 return filename, data class TensorboardFileIterator(): def __init__(self, pathname: str, tags: Union[str, List[str]]) -> None: """Python iterator for loading data from Tensorboard files The iterator will load the next Tensorboard file in the list of path names. The iterator's output is a tuple of the directory name and the Tensorboard variables selected by the tags. The Tensorboard data is returned as a dictionary with the tag as the key and a list of steps and values as the value :param pathname: String containing a path specification for the Tensorboard files. Python `glob <https://docs.python.org/3/library/glob.html#glob.glob>`_ method is used to find all files matching the path specification :type pathname: str :param tags: String or list of strings containing the tags of the variables to load :type tags: str or list of str """ self.n = 0 self.file_paths = sorted(glob.glob(pathname)) self.tags = [tags] if isinstance(tags, str) else tags def __iter__(self) -> 'TensorboardFileIterator': """Return self to make iterable""" return self def __next__(self) -> Tuple[str, dict]: """Return next batch :return: Tuple of directory name and data :rtype: tuple """ from tensorflow.python.summary.summary_iterator import summary_iterator if self.n >= len(self.file_paths): raise StopIteration file_path = self.file_paths[self.n] self.n += 1 data = {} for event in summary_iterator(file_path): try: # get Tensorboard data step = event.step tag = event.summary.value[0].tag value = event.summary.value[0].simple_value # record data if tag in self.tags: if not tag in data: data[tag] = [] data[tag].append([step, value]) except Exception as e: pass return os.path.dirname(file_path).split(os.sep)[-1], data
Toni-SM/skrl/skrl/utils/__init__.py
from typing import Optional import os import random import sys import time import numpy as np from skrl import config, logger def set_seed(seed: Optional[int] = None, deterministic: bool = False) -> int: """ Set the seed for the random number generators Due to NumPy's legacy seeding constraint the seed must be between 0 and 2**32 - 1. Otherwise a NumPy exception (``ValueError: Seed must be between 0 and 2**32 - 1``) will be raised Modified packages: - random - numpy - torch (if available) - jax (skrl's PRNG key: ``config.jax.key``) Example:: # fixed seed >>> from skrl.utils import set_seed >>> set_seed(42) [skrl:INFO] Seed: 42 42 # random seed >>> from skrl.utils import set_seed >>> set_seed() [skrl:INFO] Seed: 1776118066 1776118066 # enable deterministic. The following environment variables should be established: # - CUDA 10.1: CUDA_LAUNCH_BLOCKING=1 # - CUDA 10.2 or later: CUBLAS_WORKSPACE_CONFIG=:16:8 or CUBLAS_WORKSPACE_CONFIG=:4096:8 >>> from skrl.utils import set_seed >>> set_seed(42, deterministic=True) [skrl:INFO] Seed: 42 [skrl:WARNING] PyTorch/cuDNN deterministic algorithms are enabled. This may affect performance 42 :param seed: The seed to set. Is None, a random seed will be generated (default: ``None``) :type seed: int, optional :param deterministic: Whether PyTorch is configured to use deterministic algorithms (default: ``False``). The following environment variables should be established for CUDA 10.1 (``CUDA_LAUNCH_BLOCKING=1``) and for CUDA 10.2 or later (``CUBLAS_WORKSPACE_CONFIG=:16:8`` or ``CUBLAS_WORKSPACE_CONFIG=:4096:8``). See PyTorch `Reproducibility <https://pytorch.org/docs/stable/notes/randomness.html>`_ for details :type deterministic: bool, optional :return: Seed :rtype: int """ # generate a random seed if seed is None: try: seed = int.from_bytes(os.urandom(4), byteorder=sys.byteorder) except NotImplementedError: seed = int(time.time() * 1000) seed %= 2 ** 31 # NumPy's legacy seeding seed must be between 0 and 2**32 - 1 seed = int(seed) logger.info(f"Seed: {seed}") # numpy random.seed(seed) np.random.seed(seed) # torch try: import torch torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # On CUDA 10.1, set environment variable CUDA_LAUNCH_BLOCKING=1 # On CUDA 10.2 or later, set environment variable CUBLAS_WORKSPACE_CONFIG=:16:8 or CUBLAS_WORKSPACE_CONFIG=:4096:8 logger.warning("PyTorch/cuDNN deterministic algorithms are enabled. This may affect performance") except ImportError: pass except Exception as e: logger.warning(f"PyTorch seeding error: {e}") # jax config.jax.key = seed return seed