file_path
stringlengths
21
202
content
stringlengths
12
1.02M
size
int64
12
1.02M
lang
stringclasses
9 values
avg_line_length
float64
3.33
100
max_line_length
int64
10
993
alphanum_fraction
float64
0.27
0.93
Toni-SM/skrl/skrl/models/jax/deterministic.py
from typing import Any, Mapping, Optional, Tuple, Union import gym import gymnasium import flax import jax import jax.numpy as jnp import numpy as np class DeterministicMixin: def __init__(self, clip_actions: bool = False, role: str = "") -> None: """Deterministic mixin model (deterministic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, DeterministicMixin >>> >>> class Value(DeterministicMixin, Model): ... def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... DeterministicMixin.__init__(self, clip_actions) ... ... @nn.compact # marks the given module method allowing inlined submodules ... def __call__(self, inputs, role): ... x = nn.elu(nn.Dense(32)([inputs["states"])) ... x = nn.elu(nn.Dense(32)(x)) ... x = nn.Dense(1)(x) ... return x, {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Value(observation_space, action_space) >>> >>> print(model) Value( # attributes observation_space = Box(-1.0, 1.0, (60,), float32) action_space = Box(-1.0, 1.0, (8,), float32) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ if not hasattr(self, "_d_clip_actions"): self._d_clip_actions = {} self._d_clip_actions[role] = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._d_clip_actions[role]: self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32) self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32) # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act deterministically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is ``None``. The third component is a dictionary containing extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, _, outputs = model.act({"states": states}) >>> print(actions.shape, outputs) (4096, 1) {} """ # map from observations/states to actions actions, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) # clip actions if self._d_clip_actions[role] if role in self._d_clip_actions else self._d_clip_actions[""]: actions = jnp.clip(actions, a_min=self.clip_actions_min, a_max=self.clip_actions_max) return actions, None, outputs
4,542
Python
44.888888
129
0.57948
Toni-SM/skrl/skrl/models/jax/gaussian.py
from typing import Any, Mapping, Optional, Tuple, Union from functools import partial import gym import gymnasium import flax import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("reduction")) def _gaussian(loc, log_std, log_std_min, log_std_max, clip_actions_min, clip_actions_max, taken_actions, key, reduction): # clamp log standard deviations log_std = jnp.clip(log_std, a_min=log_std_min, a_max=log_std_max) # distribution scale = jnp.exp(log_std) # sample actions actions = jax.random.normal(key, loc.shape) * scale + loc # clip actions actions = jnp.clip(actions, a_min=clip_actions_min, a_max=clip_actions_max) # log of the probability density function taken_actions = actions if taken_actions is None else taken_actions log_prob = -jnp.square(taken_actions - loc) / (2 * jnp.square(scale)) - jnp.log(scale) - 0.5 * jnp.log(2 * jnp.pi) if reduction is not None: log_prob = reduction(log_prob, axis=-1) if log_prob.ndim != actions.ndim: log_prob = jnp.expand_dims(log_prob, -1) return actions, log_prob, log_std, scale @jax.jit def _entropy(scale): return 0.5 + 0.5 * jnp.log(2 * jnp.pi) + jnp.log(scale) class GaussianMixin: def __init__(self, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, reduction: str = "sum", role: str = "") -> None: """Gaussian mixin model (stochastic model) :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: ``False``) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: ``True``) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation if ``clip_log_std`` is True (default: ``-20``) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation if ``clip_log_std`` is True (default: ``2``) :type max_log_std: float, optional :param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``). Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)`` :type reduction: str, optional :param role: Role play by the model (default: ``""``) :type role: str, optional :raises ValueError: If the reduction method is not valid Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, GaussianMixin >>> >>> class Policy(GaussianMixin, Model): ... def __init__(self, observation_space, action_space, device=None, ... clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) ... ... def setup(self): ... self.layer_1 = nn.Dense(32) ... self.layer_2 = nn.Dense(32) ... self.layer_3 = nn.Dense(self.num_actions) ... ... self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions)) ... ... def __call__(self, inputs, role): ... x = nn.elu(self.layer_1(inputs["states"])) ... x = nn.elu(self.layer_2(x)) ... return self.layer_3(x), self.log_std_parameter, {} ... >>> # given an observation_space: gym.spaces.Box with shape (60,) >>> # and an action_space: gym.spaces.Box with shape (8,) >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( # attributes observation_space = Box(-1.0, 1.0, (60,), float32) action_space = Box(-1.0, 1.0, (8,), float32) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ self._clip_actions = clip_actions and (issubclass(type(self.action_space), gym.Space) or \ issubclass(type(self.action_space), gymnasium.Space)) if self._clip_actions: self.clip_actions_min = jnp.array(self.action_space.low, dtype=jnp.float32) self.clip_actions_max = jnp.array(self.action_space.high, dtype=jnp.float32) else: self.clip_actions_min = -jnp.inf self.clip_actions_max = jnp.inf self._clip_log_std = clip_log_std if self._clip_log_std: self._log_std_min = min_log_std self._log_std_max = max_log_std else: self._log_std_min = -jnp.inf self._log_std_max = jnp.inf if reduction not in ["mean", "sum", "prod", "none"]: raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'") self._reduction = jnp.mean if reduction == "mean" else jnp.sum if reduction == "sum" \ else jnp.prod if reduction == "prod" else None self._i = 0 self._key = config.jax.key # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the mean actions ``"mean_actions"`` and extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 60) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["mean_actions"].shape) (4096, 8) (4096, 1) (4096, 8) """ self._i += 1 subkey = jax.random.fold_in(self._key, self._i) inputs["key"] = subkey # map from states/observations to mean actions and log standard deviations mean_actions, log_std, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) actions, log_prob, log_std, stddev = _gaussian(mean_actions, log_std, self._log_std_min, self._log_std_max, self.clip_actions_min, self.clip_actions_max, inputs.get("taken_actions", None), subkey, self._reduction) outputs["mean_actions"] = mean_actions # avoid jax.errors.UnexpectedTracerError outputs["log_std"] = log_std outputs["stddev"] = stddev return actions, log_prob, outputs def get_entropy(self, stddev: jax.Array, role: str = "") -> jax.Array: """Compute and return the entropy of the model :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Entropy of the model :rtype: jax.Array Example:: # given a standard deviation array: stddev >>> entropy = model.get_entropy(stddev) >>> print(entropy.shape) (4096, 8) """ return _entropy(stddev)
9,496
Python
42.764977
139
0.548757
Toni-SM/skrl/skrl/models/jax/multicategorical.py
from typing import Any, Mapping, Optional, Tuple, Union from functools import partial import flax import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("unnormalized_log_prob")) def _categorical(net_output, unnormalized_log_prob, taken_actions, key): # normalize if unnormalized_log_prob: logits = net_output - jax.scipy.special.logsumexp(net_output, axis=-1, keepdims=True) # probs = jax.nn.softmax(logits) else: probs = net_output / net_output.sum(-1, keepdims=True) eps = jnp.finfo(probs.dtype).eps logits = jnp.log(probs.clip(min=eps, max=1 - eps)) # sample actions actions = jax.random.categorical(key, logits, axis=-1, shape=None) # log of the probability density function taken_actions = actions if taken_actions is None else taken_actions.astype(jnp.int32).reshape(-1) log_prob = jax.nn.log_softmax(logits)[jnp.arange(taken_actions.shape[0]), taken_actions] return actions.reshape(-1, 1), log_prob.reshape(-1, 1) @jax.jit def _entropy(logits): logits = logits - jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True) logits = logits.clip(min=jnp.finfo(logits.dtype).min) p_log_p = logits * jax.nn.softmax(logits) return -p_log_p.sum(-1) class MultiCategoricalMixin: def __init__(self, unnormalized_log_prob: bool = True, reduction: str = "sum", role: str = "") -> None: """MultiCategorical mixin model (stochastic model) :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param reduction: Reduction method for returning the log probability density function: (default: ``"sum"``). Supported values are ``"mean"``, ``"sum"``, ``"prod"`` and ``"none"``. If "``none"``, the log probability density function is returned as a tensor of shape ``(num_samples, num_actions)`` instead of ``(num_samples, 1)`` :type reduction: str, optional :param role: Role play by the model (default: ``""``) :type role: str, optional :raises ValueError: If the reduction method is not valid Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, MultiCategoricalMixin >>> >>> class Policy(MultiCategoricalMixin, Model): ... def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, reduction="sum", **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... MultiCategoricalMixin.__init__(self, unnormalized_log_prob, reduction) ... ... @nn.compact # marks the given module method allowing inlined submodules ... def __call__(self, inputs, role): ... x = nn.elu(nn.Dense(32)(inputs["states"])) ... x = nn.elu(nn.Dense(32)(x)) ... x = nn.Dense(self.num_actions)(x) ... return x, {} ... >>> # given an observation_space: gym.spaces.Box with shape (4,) >>> # and an action_space: gym.spaces.MultiDiscrete with nvec = [3, 2] >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( # attributes observation_space = Box(-1.0, 1.0, (4,), float32) action_space = MultiDiscrete([3 2]) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ self._unnormalized_log_prob = unnormalized_log_prob if reduction not in ["mean", "sum", "prod", "none"]: raise ValueError("reduction must be one of 'mean', 'sum', 'prod' or 'none'") self._reduction = jnp.mean if reduction == "mean" else jnp.sum if reduction == "sum" \ else jnp.prod if reduction == "prod" else None self._i = 0 self._key = config.jax.key self._action_space_nvec = np.cumsum(self.action_space.nvec).tolist() self._action_space_shape = self._get_space_size(self.action_space, number_of_elements=False) # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the network output ``"net_output"`` and extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 4) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["net_output"].shape) (4096, 2) (4096, 1) (4096, 5) """ self._i += 1 subkey = jax.random.fold_in(self._key, self._i) inputs["key"] = subkey # map from states/observations to normalized probabilities or unnormalized log probabilities net_output, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) # split inputs net_outputs = jnp.split(net_output, self._action_space_nvec, axis=-1) if "taken_actions" in inputs: taken_actions = jnp.split(inputs["taken_actions"], self._action_space_shape, axis=-1) else: taken_actions = [None] * self._action_space_shape # compute actions and log_prob actions, log_prob = [], [] for _net_output, _taken_actions in zip(net_outputs, taken_actions): _actions, _log_prob = _categorical(_net_output, self._unnormalized_log_prob, _taken_actions, subkey) actions.append(_actions) log_prob.append(_log_prob) actions = jnp.concatenate(actions, axis=-1) log_prob = jnp.concatenate(log_prob, axis=-1) if self._reduction is not None: log_prob = self._reduction(log_prob, axis=-1) if log_prob.ndim != actions.ndim: log_prob = jnp.expand_dims(log_prob, -1) outputs["net_output"] = net_output # avoid jax.errors.UnexpectedTracerError outputs["stddev"] = jnp.full_like(log_prob, jnp.nan) return actions, log_prob, outputs def get_entropy(self, logits: jax.Array, role: str = "") -> jax.Array: """Compute and return the entropy of the model :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Entropy of the model :rtype: jax.Array Example:: # given a standard deviation array: stddev >>> entropy = model.get_entropy(stddev) >>> print(entropy.shape) (4096, 8) """ return _entropy(logits)
8,769
Python
44.440414
140
0.583647
Toni-SM/skrl/skrl/models/jax/categorical.py
from typing import Any, Mapping, Optional, Tuple, Union from functools import partial import flax import jax import jax.numpy as jnp import numpy as np from skrl import config # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @partial(jax.jit, static_argnames=("unnormalized_log_prob")) def _categorical(net_output, unnormalized_log_prob, taken_actions, key): # normalize if unnormalized_log_prob: logits = net_output - jax.scipy.special.logsumexp(net_output, axis=-1, keepdims=True) # probs = jax.nn.softmax(logits) else: probs = net_output / net_output.sum(-1, keepdims=True) eps = jnp.finfo(probs.dtype).eps logits = jnp.log(probs.clip(min=eps, max=1 - eps)) # sample actions actions = jax.random.categorical(key, logits, axis=-1, shape=None) # log of the probability density function taken_actions = actions if taken_actions is None else taken_actions.astype(jnp.int32).reshape(-1) log_prob = jax.nn.log_softmax(logits)[jnp.arange(taken_actions.shape[0]), taken_actions] return actions.reshape(-1, 1), log_prob.reshape(-1, 1) @jax.jit def _entropy(logits): logits = logits - jax.scipy.special.logsumexp(logits, axis=-1, keepdims=True) logits = logits.clip(min=jnp.finfo(logits.dtype).min) p_log_p = logits * jax.nn.softmax(logits) return -p_log_p.sum(-1) class CategoricalMixin: def __init__(self, unnormalized_log_prob: bool = True, role: str = "") -> None: """Categorical mixin model (stochastic model) :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: ``True``). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param role: Role play by the model (default: ``""``) :type role: str, optional Example:: # define the model >>> import flax.linen as nn >>> from skrl.models.jax import Model, CategoricalMixin >>> >>> class Policy(CategoricalMixin, Model): ... def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs): ... Model.__init__(self, observation_space, action_space, device, **kwargs) ... CategoricalMixin.__init__(self, unnormalized_log_prob) ... ... @nn.compact # marks the given module method allowing inlined submodules ... def __call__(self, inputs, role): ... x = nn.elu(nn.Dense(32)(inputs["states"])) ... x = nn.elu(nn.Dense(32)(x)) ... x = nn.Dense(self.num_actions)(x) ... return x, {} ... >>> # given an observation_space: gym.spaces.Box with shape (4,) >>> # and an action_space: gym.spaces.Discrete with n = 2 >>> model = Policy(observation_space, action_space) >>> >>> print(model) Policy( # attributes observation_space = Box(-1.0, 1.0, (4,), float32) action_space = Discrete(2) device = StreamExecutorGpuDevice(id=0, process_index=0, slice_index=0) ) """ self._unnormalized_log_prob = unnormalized_log_prob self._i = 0 self._key = config.jax.key # https://flax.readthedocs.io/en/latest/api_reference/flax.errors.html#flax.errors.IncorrectPostInitOverrideError flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[Union[np.ndarray, jax.Array], Any]], role: str = "", params: Optional[jax.Array] = None) -> Tuple[jax.Array, Union[jax.Array, None], Mapping[str, Union[jax.Array, Any]]]: """Act stochastically in response to the state of the environment :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically np.ndarray or jax.Array :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the network output ``"net_output"`` and extra output values :rtype: tuple of jax.Array, jax.Array or None, and dict Example:: >>> # given a batch of sample states with shape (4096, 4) >>> actions, log_prob, outputs = model.act({"states": states}) >>> print(actions.shape, log_prob.shape, outputs["net_output"].shape) (4096, 1) (4096, 1) (4096, 2) """ self._i += 1 subkey = jax.random.fold_in(self._key, self._i) inputs["key"] = subkey # map from states/observations to normalized probabilities or unnormalized log probabilities net_output, outputs = self.apply(self.state_dict.params if params is None else params, inputs, role) actions, log_prob = _categorical(net_output, self._unnormalized_log_prob, inputs.get("taken_actions", None), subkey) outputs["net_output"] = net_output # avoid jax.errors.UnexpectedTracerError outputs["stddev"] = jnp.full_like(log_prob, jnp.nan) return actions, log_prob, outputs def get_entropy(self, logits: jax.Array, role: str = "") -> jax.Array: """Compute and return the entropy of the model :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Entropy of the model :rtype: jax.Array Example:: # given a standard deviation array: stddev >>> entropy = model.get_entropy(stddev) >>> print(entropy.shape) (4096, 8) """ return _entropy(logits)
6,846
Python
42.062893
129
0.580193
Toni-SM/skrl/skrl/multi_agents/torch/base.py
from typing import Any, Mapping, Optional, Sequence, Union import collections import copy import datetime import os import gym import gymnasium import numpy as np import torch from torch.utils.tensorboard import SummaryWriter from skrl import logger from skrl.memories.torch import Memory from skrl.models.torch import Model class MultiAgent: def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Mapping[str, Model]], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None, action_spaces: Optional[Mapping[str, Union[int, Sequence[int], gym.Space, gymnasium.Space]]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Base class that represent a RL multi-agent :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ self.possible_agents = possible_agents self.num_agents = len(self.possible_agents) self.models = models self.memories = memories self.observation_spaces = observation_spaces self.action_spaces = action_spaces self.cfg = cfg if cfg is not None else {} self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) # convert the models to their respective device for _models in self.models.values(): for model in _models.values(): if model is not None: model.to(model.device) self.tracking_data = collections.defaultdict(list) self.write_interval = self.cfg.get("experiment", {}).get("write_interval", 1000) self._track_rewards = collections.deque(maxlen=100) self._track_timesteps = collections.deque(maxlen=100) self._cumulative_rewards = None self._cumulative_timesteps = None self.training = True # checkpoint self.checkpoint_modules = {uid: {} for uid in self.possible_agents} self.checkpoint_interval = self.cfg.get("experiment", {}).get("checkpoint_interval", 1000) self.checkpoint_store_separately = self.cfg.get("experiment", {}).get("store_separately", False) self.checkpoint_best_modules = {"timestep": 0, "reward": -2 ** 31, "saved": True, "modules": {}} # experiment directory directory = self.cfg.get("experiment", {}).get("directory", "") experiment_name = self.cfg.get("experiment", {}).get("experiment_name", "") if not directory: directory = os.path.join(os.getcwd(), "runs") if not experiment_name: experiment_name = f"{datetime.datetime.now().strftime('%y-%m-%d_%H-%M-%S-%f')}_{self.__class__.__name__}" self.experiment_dir = os.path.join(directory, experiment_name) def __str__(self) -> str: """Generate a representation of the agent as string :return: Representation of the agent as string :rtype: str """ string = f"Multi-agent: {repr(self)}" for k, v in self.cfg.items(): if type(v) is dict: string += f"\n |-- {k}" for k1, v1 in v.items(): string += f"\n | |-- {k1}: {v1}" else: string += f"\n |-- {k}: {v}" return string def _as_dict(self, _input: Any) -> Mapping[str, Any]: """Convert a configuration value into a dictionary according to the number of agents :param _input: Configuration value :type _input: Any :raises ValueError: The configuration value is a dictionary different from the number of agents :return: Configuration value as a dictionary :rtype: list of any configuration value """ if _input and isinstance(_input, collections.abc.Mapping): if set(_input) < set(self.possible_agents): logger.error("The configuration value does not match possible agents") raise ValueError("The configuration value does not match possible agents") elif set(_input) >= set(self.possible_agents): return _input return {name: copy.deepcopy(_input) for name in self.possible_agents} def _empty_preprocessor(self, _input: Any, *args, **kwargs) -> Any: """Empty preprocess method This method is defined because PyTorch multiprocessing can't pickle lambdas :param _input: Input to preprocess :type _input: Any :return: Preprocessed input :rtype: Any """ return _input def _get_internal_value(self, _module: Any) -> Any: """Get internal module/variable state/value :param _module: Module or variable :type _module: Any :return: Module/variable state/value :rtype: Any """ return _module.state_dict() if hasattr(_module, "state_dict") else _module def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent This method should be called before the agent is used. It will initialize the TensoBoard writer (and optionally Weights & Biases) and create the checkpoints directory :param trainer_cfg: Trainer configuration :type trainer_cfg: dict, optional """ # setup Weights & Biases if self.cfg.get("experiment", {}).get("wandb", False): # save experiment config trainer_cfg = trainer_cfg if trainer_cfg is not None else {} try: models_cfg = {uid: {k: v.net._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents} except AttributeError: models_cfg = {uid: {k: v._modules for (k, v) in self.models[uid].items()} for uid in self.possible_agents} config={**self.cfg, **trainer_cfg, **models_cfg} # set default values wandb_kwargs = copy.deepcopy(self.cfg.get("experiment", {}).get("wandb_kwargs", {})) wandb_kwargs.setdefault("name", os.path.split(self.experiment_dir)[-1]) wandb_kwargs.setdefault("sync_tensorboard", True) wandb_kwargs.setdefault("config", {}) wandb_kwargs["config"].update(config) # init Weights & Biases import wandb wandb.init(**wandb_kwargs) # main entry to log data for consumption and visualization by TensorBoard if self.write_interval > 0: self.writer = SummaryWriter(log_dir=self.experiment_dir) if self.checkpoint_interval > 0: os.makedirs(os.path.join(self.experiment_dir, "checkpoints"), exist_ok=True) def track_data(self, tag: str, value: float) -> None: """Track data to TensorBoard Currently only scalar data are supported :param tag: Data identifier (e.g. 'Loss / policy loss') :type tag: str :param value: Value to track :type value: float """ self.tracking_data[tag].append(value) def write_tracking_data(self, timestep: int, timesteps: int) -> None: """Write tracking data to TensorBoard :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for k, v in self.tracking_data.items(): if k.endswith("(min)"): self.writer.add_scalar(k, np.min(v), timestep) elif k.endswith("(max)"): self.writer.add_scalar(k, np.max(v), timestep) else: self.writer.add_scalar(k, np.mean(v), timestep) # reset data containers for next iteration self._track_rewards.clear() self._track_timesteps.clear() self.tracking_data.clear() def write_checkpoint(self, timestep: int, timesteps: int) -> None: """Write checkpoint (modules) to disk The checkpoints are saved in the directory 'checkpoints' in the experiment directory. The name of the checkpoint is the current timestep if timestep is not None, otherwise it is the current time. :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ tag = str(timestep if timestep is not None else datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f")) # separated modules if self.checkpoint_store_separately: for uid in self.possible_agents: for name, module in self.checkpoint_modules[uid].items(): torch.save(self._get_internal_value(module), os.path.join(self.experiment_dir, "checkpoints", f"{uid}_{name}_{tag}.pt")) # whole agent else: modules = {uid: {name: self._get_internal_value(module) for name, module in self.checkpoint_modules[uid].items()} \ for uid in self.possible_agents} torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", f"agent_{tag}.pt")) # best modules if self.checkpoint_best_modules["modules"] and not self.checkpoint_best_modules["saved"]: # separated modules if self.checkpoint_store_separately: for uid in self.possible_agents: for name in self.checkpoint_modules[uid].keys(): torch.save(self.checkpoint_best_modules["modules"][uid][name], os.path.join(self.experiment_dir, "checkpoints", f"best_{uid}_{name}.pt")) # whole agent else: modules = {uid: {name: self.checkpoint_best_modules["modules"][uid][name] \ for name in self.checkpoint_modules[uid].keys()} for uid in self.possible_agents} torch.save(modules, os.path.join(self.experiment_dir, "checkpoints", "best_agent.pt")) self.checkpoint_best_modules["saved"] = True def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: dictionary of torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes :return: Actions :rtype: torch.Tensor """ raise NotImplementedError def record_transition(self, states: Mapping[str, torch.Tensor], actions: Mapping[str, torch.Tensor], rewards: Mapping[str, torch.Tensor], next_states: Mapping[str, torch.Tensor], terminated: Mapping[str, torch.Tensor], truncated: Mapping[str, torch.Tensor], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory (to be implemented by the inheriting classes) Inheriting classes must call this method to record episode information (rewards, timesteps, etc.). In addition to recording environment transition (such as states, rewards, etc.), agent information can be recorded. :param states: Observations/states of the environment used to make the decision :type states: dictionary of torch.Tensor :param actions: Actions taken by the agent :type actions: dictionary of torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of torch.Tensor :param next_states: Next observations/states of the environment :type next_states: dictionary of torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of torch.Tensor :param infos: Additional information about the environment :type infos: dictionary of any supported type :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ _rewards = next(iter(rewards.values())) # compute the cumulative sum of the rewards and timesteps if self._cumulative_rewards is None: self._cumulative_rewards = torch.zeros_like(_rewards, dtype=torch.float32) self._cumulative_timesteps = torch.zeros_like(_rewards, dtype=torch.int32) self._cumulative_rewards.add_(_rewards) self._cumulative_timesteps.add_(1) # check ended episodes finished_episodes = (next(iter(terminated.values())) + next(iter(truncated.values()))).nonzero(as_tuple=False) if finished_episodes.numel(): # storage cumulative rewards and timesteps self._track_rewards.extend(self._cumulative_rewards[finished_episodes][:, 0].reshape(-1).tolist()) self._track_timesteps.extend(self._cumulative_timesteps[finished_episodes][:, 0].reshape(-1).tolist()) # reset the cumulative rewards and timesteps self._cumulative_rewards[finished_episodes] = 0 self._cumulative_timesteps[finished_episodes] = 0 # record data if self.write_interval > 0: self.tracking_data["Reward / Instantaneous reward (max)"].append(torch.max(_rewards).item()) self.tracking_data["Reward / Instantaneous reward (min)"].append(torch.min(_rewards).item()) self.tracking_data["Reward / Instantaneous reward (mean)"].append(torch.mean(_rewards).item()) if len(self._track_rewards): track_rewards = np.array(self._track_rewards) track_timesteps = np.array(self._track_timesteps) self.tracking_data["Reward / Total reward (max)"].append(np.max(track_rewards)) self.tracking_data["Reward / Total reward (min)"].append(np.min(track_rewards)) self.tracking_data["Reward / Total reward (mean)"].append(np.mean(track_rewards)) self.tracking_data["Episode / Total timesteps (max)"].append(np.max(track_timesteps)) self.tracking_data["Episode / Total timesteps (min)"].append(np.min(track_timesteps)) self.tracking_data["Episode / Total timesteps (mean)"].append(np.mean(track_timesteps)) def set_mode(self, mode: str) -> None: """Set the model mode (training or evaluation) :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ for _models in self.models.values(): for model in _models.values(): if model is not None: model.set_mode(mode) def set_running_mode(self, mode: str) -> None: """Set the current running mode (training or evaluation) This method sets the value of the ``training`` property (boolean). This property can be used to know if the agent is running in training or evaluation mode. :param mode: Mode: 'train' for training or 'eval' for evaluation :type mode: str """ self.training = mode == "train" def save(self, path: str) -> None: """Save the agent to the specified path :param path: Path to save the model to :type path: str """ modules = {uid: {name: self._get_internal_value(module) for name, module in self.checkpoint_modules[uid].items()} \ for uid in self.possible_agents} torch.save(modules, path) def load(self, path: str) -> None: """Load the model from the specified path The final storage device is determined by the constructor of the model :param path: Path to load the model from :type path: str """ modules = torch.load(path, map_location=self.device) if type(modules) is dict: for uid in self.possible_agents: if uid not in modules: logger.warning(f"Cannot load modules for {uid}. The agent doesn't have such an instance") continue for name, data in modules[uid].items(): module = self.checkpoint_modules[uid].get(name, None) if module is not None: if hasattr(module, "load_state_dict"): module.load_state_dict(data) if hasattr(module, "eval"): module.eval() else: raise NotImplementedError else: logger.warning(f"Cannot load the {uid}:{name} module. The agent doesn't have such an instance") def migrate(self, path: str, name_map: Mapping[str, Mapping[str, str]] = {}, auto_mapping: bool = True, verbose: bool = False) -> bool: """Migrate the specified extrernal checkpoint to the current agent The final storage device is determined by the constructor of the agent. For ambiguous models (where 2 or more parameters, for source or current model, have equal shape) it is necessary to define the ``name_map``, at least for those parameters, to perform the migration successfully :param path: Path to the external checkpoint to migrate from :type path: str :param name_map: Name map to use for the migration (default: ``{}``). Keys are the current parameter names and values are the external parameter names :type name_map: Mapping[str, Mapping[str, str]], optional :param auto_mapping: Automatically map the external state dict to the current state dict (default: ``True``) :type auto_mapping: bool, optional :param verbose: Show model names and migration (default: ``False``) :type verbose: bool, optional :raises ValueError: If the correct file type cannot be identified from the ``path`` parameter :return: True if the migration was successful, False otherwise. Migration is successful if all parameters of the current model are found in the external model :rtype: bool """ raise NotImplementedError def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ timestep += 1 # update best models and write checkpoints if timestep > 1 and self.checkpoint_interval > 0 and not timestep % self.checkpoint_interval: # update best models reward = np.mean(self.tracking_data.get("Reward / Total reward (mean)", -2 ** 31)) if reward > self.checkpoint_best_modules["reward"]: self.checkpoint_best_modules["timestep"] = timestep self.checkpoint_best_modules["reward"] = reward self.checkpoint_best_modules["saved"] = False self.checkpoint_best_modules["modules"] = {uid: {k: copy.deepcopy(self._get_internal_value(v)) \ for k, v in self.checkpoint_modules[uid].items()} for uid in self.possible_agents} # write checkpoints self.write_checkpoint(timestep, timesteps) # write to tensorboard if timestep > 1 and self.write_interval > 0 and not timestep % self.write_interval: self.write_tracking_data(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :raises NotImplementedError: The method is not implemented by the inheriting classes """ raise NotImplementedError
22,137
Python
45.024948
128
0.610787
Toni-SM/skrl/skrl/multi_agents/torch/__init__.py
from skrl.multi_agents.torch.base import MultiAgent
52
Python
25.499987
51
0.846154
Toni-SM/skrl/skrl/multi_agents/torch/mappo/mappo.py
from typing import Any, Mapping, Optional, Sequence, Union import copy import itertools import gym import gymnasium import torch import torch.nn as nn import torch.nn.functional as F from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.multi_agents.torch import MultiAgent from skrl.resources.schedulers.torch import KLAdaptiveLR # [start-config-dict-torch] MAPPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "shared_state_preprocessor": None, # shared state preprocessor class (see skrl.resources.preprocessors) "shared_state_preprocessor_kwargs": {}, # shared state preprocessor's kwargs (e.g. {"size": env.shared_observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-torch] class MAPPO(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None, shared_observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None) -> None: """Multi-Agent Proximal Policy Optimization (MAPPO) https://arxiv.org/abs/2103.01955 :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict :param shared_observation_spaces: Shared observation/state space or shape (default: ``None``) :type shared_observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional """ _cfg = copy.deepcopy(MAPPO_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) self.shared_observation_spaces = shared_observation_spaces # models self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents} self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents} for uid in self.possible_agents: self.checkpoint_modules[uid]["policy"] = self.policies[uid] self.checkpoint_modules[uid]["value"] = self.values[uid] # configuration self._learning_epochs = self._as_dict(self.cfg["learning_epochs"]) self._mini_batches = self._as_dict(self.cfg["mini_batches"]) self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"]) self._ratio_clip = self._as_dict(self.cfg["ratio_clip"]) self._value_clip = self._as_dict(self.cfg["value_clip"]) self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"]) self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"]) self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"]) self._kl_threshold = self._as_dict(self.cfg["kl_threshold"]) self._learning_rate = self._as_dict(self.cfg["learning_rate"]) self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"]) self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"]) self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"]) self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"]) self._shared_state_preprocessor = self._as_dict(self.cfg["shared_state_preprocessor"]) self._shared_state_preprocessor_kwargs = self._as_dict(self.cfg["shared_state_preprocessor_kwargs"]) self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"]) self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"]) self._discount_factor = self._as_dict(self.cfg["discount_factor"]) self._lambda = self._as_dict(self.cfg["lambda"]) self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"]) # set up optimizer and learning rate scheduler self.optimizers = {} self.schedulers = {} for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] if policy is not None and value is not None: if policy is value: optimizer = torch.optim.Adam(policy.parameters(), lr=self._learning_rate[uid]) else: optimizer = torch.optim.Adam(itertools.chain(policy.parameters(), value.parameters()), lr=self._learning_rate[uid]) self.optimizers[uid] = optimizer if self._learning_rate_scheduler[uid] is not None: self.schedulers[uid] = self._learning_rate_scheduler[uid](optimizer, **self._learning_rate_scheduler_kwargs[uid]) self.checkpoint_modules[uid]["optimizer"] = self.optimizers[uid] # set up preprocessors if self._state_preprocessor[uid] is not None: self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid] else: self._state_preprocessor[uid] = self._empty_preprocessor if self._shared_state_preprocessor[uid] is not None: self._shared_state_preprocessor[uid] = self._shared_state_preprocessor[uid](**self._shared_state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["shared_state_preprocessor"] = self._shared_state_preprocessor[uid] else: self._shared_state_preprocessor[uid] = self._empty_preprocessor if self._value_preprocessor[uid] is not None: self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid] else: self._value_preprocessor[uid] = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memories for uid in self.possible_agents: self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="shared_states", size=self.shared_observation_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=torch.float32) self.memories[uid].create_tensor(name="rewards", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="terminated", size=1, dtype=torch.bool) self.memories[uid].create_tensor(name="log_prob", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="values", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="returns", size=1, dtype=torch.float32) self.memories[uid].create_tensor(name="advantages", size=1, dtype=torch.float32) # tensors sampled during training self._tensors_names = ["states", "shared_states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = [] self._current_shared_next_states = [] def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # # sample random actions # # TODO: fix for stochasticity, rnn and log_prob # if timestep < self._random_timesteps: # return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents] actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)} log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)} outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)} self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Mapping[str, torch.Tensor], actions: Mapping[str, torch.Tensor], rewards: Mapping[str, torch.Tensor], next_states: Mapping[str, torch.Tensor], terminated: Mapping[str, torch.Tensor], truncated: Mapping[str, torch.Tensor], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of torch.Tensor :param actions: Actions taken by the agent :type actions: dictionary of torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of torch.Tensor :param next_states: Next observations/states of the environment :type next_states: dictionary of torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of torch.Tensor :param infos: Additional information about the environment :type infos: dictionary of any supported type :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memories: shared_states = infos["shared_states"] self._current_shared_next_states = infos["shared_next_states"] for uid in self.possible_agents: # reward shaping if self._rewards_shaper is not None: rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps) # compute values values, _, _ = self.values[uid].act({"states": self._shared_state_preprocessor[uid](shared_states[uid])}, role="value") values = self._value_preprocessor[uid](values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap[uid]: rewards[uid] += self._discount_factor[uid] * values * truncated[uid] # storage transition in memory self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid], terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values, shared_states=shared_states[uid]) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ def compute_gae(rewards: torch.Tensor, dones: torch.Tensor, values: torch.Tensor, next_values: torch.Tensor, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> torch.Tensor: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: torch.Tensor :param dones: Signals to indicate that episodes have ended :type dones: torch.Tensor :param values: Values obtained by the agent :type values: torch.Tensor :param next_values: Next values obtained by the agent :type next_values: torch.Tensor :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: torch.Tensor """ advantage = 0 advantages = torch.zeros_like(rewards) not_dones = dones.logical_not() memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else last_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] memory = self.memories[uid] # compute returns and advantages with torch.no_grad(): value.train(False) last_values, _, _ = value.act({"states": self._shared_state_preprocessor[uid](self._current_shared_next_states[uid].float())}, role="value") value.train(True) last_values = self._value_preprocessor[uid](last_values, inverse=True) values = memory.get_tensor_by_name("values") returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True)) memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True)) memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid]) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs[uid]): kl_divergences = [] # mini-batches loop for sampled_states, sampled_shared_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages \ in sampled_batches: sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch) sampled_shared_states = self._shared_state_preprocessor[uid](sampled_shared_states, train=not epoch) _, next_log_prob, _ = policy.act({"states": sampled_states, "taken_actions": sampled_actions}, role="policy") # compute approximate KL divergence with torch.no_grad(): ratio = next_log_prob - sampled_log_prob kl_divergence = ((torch.exp(ratio) - 1) - ratio).mean() kl_divergences.append(kl_divergence) # early stopping with KL divergence if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]: break # compute entropy loss if self._entropy_loss_scale[uid]: entropy_loss = -self._entropy_loss_scale[uid] * policy.get_entropy(role="policy").mean() else: entropy_loss = 0 # compute policy loss ratio = torch.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * torch.clip(ratio, 1.0 - self._ratio_clip[uid], 1.0 + self._ratio_clip[uid]) policy_loss = -torch.min(surrogate, surrogate_clipped).mean() # compute value loss predicted_values, _, _ = value.act({"states": sampled_shared_states}, role="value") if self._clip_predicted_values: predicted_values = sampled_values + torch.clip(predicted_values - sampled_values, min=-self._value_clip[uid], max=self._value_clip[uid]) value_loss = self._value_loss_scale[uid] * F.mse_loss(sampled_returns, predicted_values) # optimization step self.optimizers[uid].zero_grad() (policy_loss + entropy_loss + value_loss).backward() if self._grad_norm_clip[uid] > 0: if policy is value: nn.utils.clip_grad_norm_(policy.parameters(), self._grad_norm_clip[uid]) else: nn.utils.clip_grad_norm_(itertools.chain(policy.parameters(), value.parameters()), self._grad_norm_clip[uid]) self.optimizers[uid].step() # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale[uid]: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler[uid]: if isinstance(self.schedulers[uid], KLAdaptiveLR): self.schedulers[uid].step(torch.tensor(kl_divergences).mean()) else: self.schedulers[uid].step() # record data self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) if self._entropy_loss_scale: self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Policy / Standard deviation ({uid})", policy.distribution(role="policy").stddev.mean().item()) if self._learning_rate_scheduler[uid]: self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid].get_last_lr()[0])
25,533
Python
51.110204
156
0.596013
Toni-SM/skrl/skrl/multi_agents/torch/mappo/__init__.py
from skrl.multi_agents.torch.mappo.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
76
Python
37.499981
75
0.828947
Toni-SM/skrl/skrl/multi_agents/torch/ippo/__init__.py
from skrl.multi_agents.torch.ippo.ippo import IPPO, IPPO_DEFAULT_CONFIG
72
Python
35.499982
71
0.819444
Toni-SM/skrl/skrl/multi_agents/jax/__init__.py
from skrl.multi_agents.jax.base import MultiAgent
50
Python
24.499988
49
0.84
Toni-SM/skrl/skrl/multi_agents/jax/mappo/mappo.py
from typing import Any, Mapping, Optional, Sequence, Union import copy import functools import gym import gymnasium import jax import jax.numpy as jnp import numpy as np from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.multi_agents.jax import MultiAgent from skrl.resources.optimizers.jax import Adam from skrl.resources.schedulers.jax import KLAdaptiveLR # [start-config-dict-jax] MAPPO_DEFAULT_CONFIG = { "rollouts": 16, # number of rollouts before updating "learning_epochs": 8, # number of learning epochs during each update "mini_batches": 2, # number of mini batches during each learning epoch "discount_factor": 0.99, # discount factor (gamma) "lambda": 0.95, # TD(lambda) coefficient (lam) for computing returns and advantages "learning_rate": 1e-3, # learning rate "learning_rate_scheduler": None, # learning rate scheduler class (see torch.optim.lr_scheduler) "learning_rate_scheduler_kwargs": {}, # learning rate scheduler's kwargs (e.g. {"step_size": 1e-3}) "state_preprocessor": None, # state preprocessor class (see skrl.resources.preprocessors) "state_preprocessor_kwargs": {}, # state preprocessor's kwargs (e.g. {"size": env.observation_space}) "shared_state_preprocessor": None, # shared state preprocessor class (see skrl.resources.preprocessors) "shared_state_preprocessor_kwargs": {}, # shared state preprocessor's kwargs (e.g. {"size": env.shared_observation_space}) "value_preprocessor": None, # value preprocessor class (see skrl.resources.preprocessors) "value_preprocessor_kwargs": {}, # value preprocessor's kwargs (e.g. {"size": 1}) "random_timesteps": 0, # random exploration steps "learning_starts": 0, # learning starts after this many steps "grad_norm_clip": 0.5, # clipping coefficient for the norm of the gradients "ratio_clip": 0.2, # clipping coefficient for computing the clipped surrogate objective "value_clip": 0.2, # clipping coefficient for computing the value loss (if clip_predicted_values is True) "clip_predicted_values": False, # clip predicted values during value loss computation "entropy_loss_scale": 0.0, # entropy loss scaling factor "value_loss_scale": 1.0, # value loss scaling factor "kl_threshold": 0, # KL divergence threshold for early stopping "rewards_shaper": None, # rewards shaping function: Callable(reward, timestep, timesteps) -> reward "time_limit_bootstrap": False, # bootstrap at timeout termination (episode truncation) "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-config-dict-jax] def compute_gae(rewards: np.ndarray, dones: np.ndarray, values: np.ndarray, next_values: np.ndarray, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> np.ndarray: """Compute the Generalized Advantage Estimator (GAE) :param rewards: Rewards obtained by the agent :type rewards: np.ndarray :param dones: Signals to indicate that episodes have ended :type dones: np.ndarray :param values: Values obtained by the agent :type values: np.ndarray :param next_values: Next values obtained by the agent :type next_values: np.ndarray :param discount_factor: Discount factor :type discount_factor: float :param lambda_coefficient: Lambda coefficient :type lambda_coefficient: float :return: Generalized Advantage Estimator :rtype: np.ndarray """ advantage = 0 advantages = np.zeros_like(rewards) not_dones = np.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages[i] = advantage # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages # https://jax.readthedocs.io/en/latest/faq.html#strategy-1-jit-compiled-helper-function @jax.jit def _compute_gae(rewards: jax.Array, dones: jax.Array, values: jax.Array, next_values: jax.Array, discount_factor: float = 0.99, lambda_coefficient: float = 0.95) -> jax.Array: advantage = 0 advantages = jnp.zeros_like(rewards) not_dones = jnp.logical_not(dones) memory_size = rewards.shape[0] # advantages computation for i in reversed(range(memory_size)): next_values = values[i + 1] if i < memory_size - 1 else next_values advantage = rewards[i] - values[i] + discount_factor * not_dones[i] * (next_values + lambda_coefficient * advantage) advantages = advantages.at[i].set(advantage) # returns computation returns = advantages + values # normalize advantages advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-8) return returns, advantages @functools.partial(jax.jit, static_argnames=("policy_act", "get_entropy", "entropy_loss_scale")) def _update_policy(policy_act, policy_state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, ratio_clip, get_entropy, entropy_loss_scale): # compute policy loss def _policy_loss(params): _, next_log_prob, outputs = policy_act({"states": sampled_states, "taken_actions": sampled_actions}, "policy", params) # compute approximate KL divergence ratio = next_log_prob - sampled_log_prob kl_divergence = ((jnp.exp(ratio) - 1) - ratio).mean() # compute policy loss ratio = jnp.exp(next_log_prob - sampled_log_prob) surrogate = sampled_advantages * ratio surrogate_clipped = sampled_advantages * jnp.clip(ratio, 1.0 - ratio_clip, 1.0 + ratio_clip) # compute entropy loss entropy_loss = 0 if entropy_loss_scale: entropy_loss = -entropy_loss_scale * get_entropy(outputs["stddev"], role="policy").mean() return -jnp.minimum(surrogate, surrogate_clipped).mean(), (entropy_loss, kl_divergence, outputs["stddev"]) (policy_loss, (entropy_loss, kl_divergence, stddev)), grad = jax.value_and_grad(_policy_loss, has_aux=True)(policy_state_dict.params) return grad, policy_loss, entropy_loss, kl_divergence, stddev @functools.partial(jax.jit, static_argnames=("value_act", "clip_predicted_values")) def _update_value(value_act, value_state_dict, sampled_states, sampled_values, sampled_returns, value_loss_scale, clip_predicted_values, value_clip): # compute value loss def _value_loss(params): predicted_values, _, _ = value_act({"states": sampled_states}, "value", params) if clip_predicted_values: predicted_values = sampled_values + jnp.clip(predicted_values - sampled_values, -value_clip, value_clip) return value_loss_scale * ((sampled_returns - predicted_values) ** 2).mean() value_loss, grad = jax.value_and_grad(_value_loss, has_aux=False)(value_state_dict.params) return grad, value_loss class MAPPO(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Mapping[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, jax.Device]] = None, cfg: Optional[dict] = None, shared_observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None) -> None: """Multi-Agent Proximal Policy Optimization (MAPPO) https://arxiv.org/abs/2103.01955 :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.jax.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.jax.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param cfg: Configuration dictionary :type cfg: dict :param shared_observation_spaces: Shared observation/state space or shape (default: ``None``) :type shared_observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional """ # _cfg = copy.deepcopy(IPPO_DEFAULT_CONFIG) # TODO: TypeError: cannot pickle 'jax.Device' object _cfg = MAPPO_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) self.shared_observation_spaces = shared_observation_spaces # models self.policies = {uid: self.models[uid].get("policy", None) for uid in self.possible_agents} self.values = {uid: self.models[uid].get("value", None) for uid in self.possible_agents} for uid in self.possible_agents: self.checkpoint_modules[uid]["policy"] = self.policies[uid] self.checkpoint_modules[uid]["value"] = self.values[uid] # configuration self._learning_epochs = self._as_dict(self.cfg["learning_epochs"]) self._mini_batches = self._as_dict(self.cfg["mini_batches"]) self._rollouts = self.cfg["rollouts"] self._rollout = 0 self._grad_norm_clip = self._as_dict(self.cfg["grad_norm_clip"]) self._ratio_clip = self._as_dict(self.cfg["ratio_clip"]) self._value_clip = self._as_dict(self.cfg["value_clip"]) self._clip_predicted_values = self._as_dict(self.cfg["clip_predicted_values"]) self._value_loss_scale = self._as_dict(self.cfg["value_loss_scale"]) self._entropy_loss_scale = self._as_dict(self.cfg["entropy_loss_scale"]) self._kl_threshold = self._as_dict(self.cfg["kl_threshold"]) self._learning_rate = self._as_dict(self.cfg["learning_rate"]) self._learning_rate_scheduler = self._as_dict(self.cfg["learning_rate_scheduler"]) self._learning_rate_scheduler_kwargs = self._as_dict(self.cfg["learning_rate_scheduler_kwargs"]) self._state_preprocessor = self._as_dict(self.cfg["state_preprocessor"]) self._state_preprocessor_kwargs = self._as_dict(self.cfg["state_preprocessor_kwargs"]) self._shared_state_preprocessor = self._as_dict(self.cfg["shared_state_preprocessor"]) self._shared_state_preprocessor_kwargs = self._as_dict(self.cfg["shared_state_preprocessor_kwargs"]) self._value_preprocessor = self._as_dict(self.cfg["value_preprocessor"]) self._value_preprocessor_kwargs = self._as_dict(self.cfg["value_preprocessor_kwargs"]) self._discount_factor = self._as_dict(self.cfg["discount_factor"]) self._lambda = self._as_dict(self.cfg["lambda"]) self._random_timesteps = self.cfg["random_timesteps"] self._learning_starts = self.cfg["learning_starts"] self._rewards_shaper = self.cfg["rewards_shaper"] self._time_limit_bootstrap = self._as_dict(self.cfg["time_limit_bootstrap"]) # set up optimizer and learning rate scheduler self.policy_optimizer = {} self.value_optimizer = {} self.schedulers = {} for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] if policy is not None and value is not None: # scheduler scale = True self.schedulers[uid] = None if self._learning_rate_scheduler[uid] is not None: if self._learning_rate_scheduler[uid] == KLAdaptiveLR: scale = False self.schedulers[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid]) else: self._learning_rate[uid] = self._learning_rate_scheduler[uid](self._learning_rate[uid], **self._learning_rate_scheduler_kwargs[uid]) # optimizer self.policy_optimizer[uid] = Adam(model=policy, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale) self.value_optimizer[uid] = Adam(model=value, lr=self._learning_rate[uid], grad_norm_clip=self._grad_norm_clip[uid], scale=scale) self.checkpoint_modules[uid]["policy_optimizer"] = self.policy_optimizer[uid] self.checkpoint_modules[uid]["value_optimizer"] = self.value_optimizer[uid] # set up preprocessors if self._state_preprocessor[uid] is not None: self._state_preprocessor[uid] = self._state_preprocessor[uid](**self._state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["state_preprocessor"] = self._state_preprocessor[uid] else: self._state_preprocessor[uid] = self._empty_preprocessor if self._shared_state_preprocessor[uid] is not None: self._shared_state_preprocessor[uid] = self._shared_state_preprocessor[uid](**self._shared_state_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["shared_state_preprocessor"] = self._shared_state_preprocessor[uid] else: self._shared_state_preprocessor[uid] = self._empty_preprocessor if self._value_preprocessor[uid] is not None: self._value_preprocessor[uid] = self._value_preprocessor[uid](**self._value_preprocessor_kwargs[uid]) self.checkpoint_modules[uid]["value_preprocessor"] = self._value_preprocessor[uid] else: self._value_preprocessor[uid] = self._empty_preprocessor def init(self, trainer_cfg: Optional[Mapping[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # create tensors in memories for uid in self.possible_agents: self.memories[uid].create_tensor(name="states", size=self.observation_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="shared_states", size=self.shared_observation_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="actions", size=self.action_spaces[uid], dtype=jnp.float32) self.memories[uid].create_tensor(name="rewards", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="terminated", size=1, dtype=jnp.int8) self.memories[uid].create_tensor(name="log_prob", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="values", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="returns", size=1, dtype=jnp.float32) self.memories[uid].create_tensor(name="advantages", size=1, dtype=jnp.float32) # tensors sampled during training self._tensors_names = ["states", "shared_states", "actions", "log_prob", "values", "returns", "advantages"] # create temporary variables needed for storage and computation self._current_log_prob = [] self._current_shared_next_states = [] # set up models for just-in-time compilation with XLA for uid in self.possible_agents: self.policies[uid].apply = jax.jit(self.policies[uid].apply, static_argnums=2) if self.values[uid] is not None: self.values[uid].apply = jax.jit(self.values[uid].apply, static_argnums=2) def act(self, states: Mapping[str, Union[np.ndarray, jax.Array]], timestep: int, timesteps: int) -> Union[np.ndarray, jax.Array]: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of np.ndarray or jax.Array :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: np.ndarray or jax.Array """ # # sample random actions # # TODO: fix for stochasticity, rnn and log_prob # if timestep < self._random_timesteps: # return self.policy.random_act({"states": states}, role="policy") # sample stochastic actions data = [self.policies[uid].act({"states": self._state_preprocessor[uid](states[uid])}, role="policy") for uid in self.possible_agents] actions = {uid: d[0] for uid, d in zip(self.possible_agents, data)} log_prob = {uid: d[1] for uid, d in zip(self.possible_agents, data)} outputs = {uid: d[2] for uid, d in zip(self.possible_agents, data)} if not self._jax: # numpy backend actions = {jax.device_get(_actions) for _actions in actions} log_prob = {jax.device_get(_log_prob) for _log_prob in log_prob} self._current_log_prob = log_prob return actions, log_prob, outputs def record_transition(self, states: Mapping[str, Union[np.ndarray, jax.Array]], actions: Mapping[str, Union[np.ndarray, jax.Array]], rewards: Mapping[str, Union[np.ndarray, jax.Array]], next_states: Mapping[str, Union[np.ndarray, jax.Array]], terminated: Mapping[str, Union[np.ndarray, jax.Array]], truncated: Mapping[str, Union[np.ndarray, jax.Array]], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of np.ndarray or jax.Array :param actions: Actions taken by the agent :type actions: dictionary of np.ndarray or jax.Array :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of np.ndarray or jax.Array :param next_states: Next observations/states of the environment :type next_states: dictionary of np.ndarray or jax.Array :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of np.ndarray or jax.Array :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of np.ndarray or jax.Array :param infos: Additional information about the environment :type infos: dictionary of any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) if self.memories: shared_states = infos["shared_states"] self._current_shared_next_states = infos["shared_next_states"] for uid in self.possible_agents: # reward shaping if self._rewards_shaper is not None: rewards[uid] = self._rewards_shaper(rewards[uid], timestep, timesteps) # compute values values, _, _ = self.values[uid].act({"states": self._shared_state_preprocessor[uid](shared_states[uid])}, role="value") if not self._jax: # numpy backend values = jax.device_get(values) values = self._value_preprocessor[uid](values, inverse=True) # time-limit (truncation) boostrapping if self._time_limit_bootstrap[uid]: rewards[uid] += self._discount_factor[uid] * values * truncated[uid] # storage transition in memory self.memories[uid].add_samples(states=states[uid], actions=actions[uid], rewards=rewards[uid], next_states=next_states[uid], terminated=terminated[uid], truncated=truncated[uid], log_prob=self._current_log_prob[uid], values=values, shared_states=shared_states[uid]) def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ pass def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ self._rollout += 1 if not self._rollout % self._rollouts and timestep >= self._learning_starts: self.set_mode("train") self._update(timestep, timesteps) self.set_mode("eval") # write tracking data and checkpoints super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ for uid in self.possible_agents: policy = self.policies[uid] value = self.values[uid] memory = self.memories[uid] # compute returns and advantages value.training = False last_values, _, _ = value.act({"states": self._shared_state_preprocessor[uid](self._current_shared_next_states[uid])}, role="value") # TODO: .float() value.training = True if not self._jax: # numpy backend last_values = jax.device_get(last_values) last_values = self._value_preprocessor[uid](last_values, inverse=True) values = memory.get_tensor_by_name("values") if self._jax: returns, advantages = _compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) else: returns, advantages = compute_gae(rewards=memory.get_tensor_by_name("rewards"), dones=memory.get_tensor_by_name("terminated"), values=values, next_values=last_values, discount_factor=self._discount_factor[uid], lambda_coefficient=self._lambda[uid]) memory.set_tensor_by_name("values", self._value_preprocessor[uid](values, train=True)) memory.set_tensor_by_name("returns", self._value_preprocessor[uid](returns, train=True)) memory.set_tensor_by_name("advantages", advantages) # sample mini-batches from memory sampled_batches = memory.sample_all(names=self._tensors_names, mini_batches=self._mini_batches[uid]) cumulative_policy_loss = 0 cumulative_entropy_loss = 0 cumulative_value_loss = 0 # learning epochs for epoch in range(self._learning_epochs[uid]): kl_divergences = [] # mini-batches loop for sampled_states, sampled_shared_states, sampled_actions, sampled_log_prob, sampled_values, sampled_returns, sampled_advantages \ in sampled_batches: sampled_states = self._state_preprocessor[uid](sampled_states, train=not epoch) sampled_shared_states = self._shared_state_preprocessor[uid](sampled_shared_states, train=not epoch) # compute policy loss grad, policy_loss, entropy_loss, kl_divergence, stddev = _update_policy(policy.act, policy.state_dict, sampled_states, sampled_actions, sampled_log_prob, sampled_advantages, self._ratio_clip[uid], policy.get_entropy, self._entropy_loss_scale[uid]) kl_divergences.append(kl_divergence.item()) # early stopping with KL divergence if self._kl_threshold[uid] and kl_divergence > self._kl_threshold[uid]: break # optimization step (policy) self.policy_optimizer[uid] = self.policy_optimizer[uid].step(grad, policy, self.schedulers[uid]._lr if self.schedulers[uid] else None) # compute value loss grad, value_loss = _update_value(value.act, value.state_dict, sampled_shared_states, sampled_values, sampled_returns, self._value_loss_scale[uid], self._clip_predicted_values[uid], self._value_clip[uid]) # optimization step (value) self.value_optimizer[uid] = self.value_optimizer[uid].step(grad, value, self.schedulers[uid]._lr if self.schedulers[uid] else None) # update cumulative losses cumulative_policy_loss += policy_loss.item() cumulative_value_loss += value_loss.item() if self._entropy_loss_scale[uid]: cumulative_entropy_loss += entropy_loss.item() # update learning rate if self._learning_rate_scheduler[uid]: if isinstance(self.schedulers[uid], KLAdaptiveLR): self.schedulers[uid].step(np.mean(kl_divergences)) # record data self.track_data(f"Loss / Policy loss ({uid})", cumulative_policy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Loss / Value loss ({uid})", cumulative_value_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) if self._entropy_loss_scale: self.track_data(f"Loss / Entropy loss ({uid})", cumulative_entropy_loss / (self._learning_epochs[uid] * self._mini_batches[uid])) self.track_data(f"Policy / Standard deviation ({uid})", stddev.mean().item()) if self._learning_rate_scheduler[uid]: self.track_data(f"Learning / Learning rate ({uid})", self.schedulers[uid]._lr)
30,433
Python
50.846678
162
0.588309
Toni-SM/skrl/skrl/multi_agents/jax/mappo/__init__.py
from skrl.multi_agents.jax.mappo.mappo import MAPPO, MAPPO_DEFAULT_CONFIG
74
Python
36.499982
73
0.824324
Toni-SM/skrl/skrl/multi_agents/jax/ippo/__init__.py
from skrl.multi_agents.jax.ippo.ippo import IPPO, IPPO_DEFAULT_CONFIG
70
Python
34.499983
69
0.814286
Toni-SM/skrl/skrl/utils/control.py
import isaacgym.torch_utils as torch_utils import torch def ik(jacobian_end_effector, current_position, current_orientation, goal_position, goal_orientation, damping_factor=0.05): """ Damped Least Squares method: https://www.math.ucsd.edu/~sbuss/ResearchWeb/ikmethods/iksurvey.pdf """ # compute position and orientation error position_error = goal_position - current_position q_r = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation)) orientation_error = q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1) dpose = torch.cat([position_error, orientation_error], -1).unsqueeze(-1) # solve damped least squares (dO = J.T * V) transpose = torch.transpose(jacobian_end_effector, 1, 2) lmbda = torch.eye(6).to(jacobian_end_effector.device) * (damping_factor ** 2) return (transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ dpose) def osc(jacobian_end_effector, mass_matrix, current_position, current_orientation, goal_position, goal_orientation, current_dof_velocities, kp=5, kv=2): """ https://studywolf.wordpress.com/2013/09/17/robot-control-4-operation-space-control/ """ mass_matrix_end_effector = torch.inverse(jacobian_end_effector @ torch.inverse(mass_matrix) @ torch.transpose(jacobian_end_effector, 1, 2)) # compute position and orientation error position_error = kp * (goal_position - current_position) q_r = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation)) orientation_error = q_r[:, 0:3] * torch.sign(q_r[:, 3]).unsqueeze(-1) dpose = torch.cat([position_error, orientation_error], -1) return torch.transpose(jacobian_end_effector, 1, 2) @ mass_matrix_end_effector @ (kp * dpose).unsqueeze(-1) - kv * mass_matrix @ current_dof_velocities
1,888
Python
40.977777
155
0.692267
Toni-SM/skrl/skrl/utils/huggingface.py
from skrl import __version__, logger def download_model_from_huggingface(repo_id: str, filename: str = "agent.pt") -> str: """Download a model from Hugging Face Hub :param repo_id: Hugging Face user or organization name and a repo name separated by a ``/`` :type repo_id: str :param filename: The name of the model file in the repo (default: ``"agent.pt"``) :type filename: str, optional :raises ImportError: The Hugging Face Hub package (huggingface-hub) is not installed :raises huggingface_hub.utils._errors.HfHubHTTPError: Any HTTP error raised in Hugging Face Hub :return: Local path of file or if networking is off, last version of file cached on disk :rtype: str Example:: # download trained agent from the skrl organization (https://huggingface.co/skrl) >>> from skrl.utils.huggingface import download_model_from_huggingface >>> download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO") '/home/user/.cache/huggingface/hub/models--skrl--OmniIsaacGymEnvs-Cartpole-PPO/snapshots/892e629903de6bf3ef102ae760406a5dd0f6f873/agent.pt' # download model (e.g. "policy.pth") from another user/organization (e.g. "org/ddpg-Pendulum-v1") >>> from skrl.utils.huggingface import download_model_from_huggingface >>> download_model_from_huggingface("org/ddpg-Pendulum-v1", "policy.pth") '/home/user/.cache/huggingface/hub/models--org--ddpg-Pendulum-v1/snapshots/b44ee96f93ff2e296156b002a2ca4646e197ba32/policy.pth' """ logger.info(f"Downloading model from Hugging Face Hub: {repo_id}/{filename}") try: import huggingface_hub except ImportError: logger.error("Hugging Face Hub package is not installed. Use 'pip install huggingface-hub' to install it") huggingface_hub = None if huggingface_hub is None: raise ImportError("Hugging Face Hub package is not installed. Use 'pip install huggingface-hub' to install it") # download and cache the model from Hugging Face Hub downloaded_model_file = huggingface_hub.hf_hub_download(repo_id=repo_id, filename=filename, library_name="skrl", library_version=__version__) return downloaded_model_file
2,401
Python
50.106382
147
0.66389
Toni-SM/skrl/skrl/utils/postprocessing.py
from typing import List, Tuple, Union import collections import csv import glob import os import numpy as np import torch class MemoryFileIterator(): def __init__(self, pathname: str) -> None: """Python iterator for loading data from exported memories The iterator will load the next memory file in the list of path names. The output of the iterator is a tuple of the filename and the memory data where the memory data is a dictionary of torch.Tensor (PyTorch), numpy.ndarray (NumPy) or lists (CSV) depending on the format and the keys of the dictionary are the names of the variables Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) Expected output shapes: - PyTorch: (memory_size, num_envs, data_size) - NumPy: (memory_size, num_envs, data_size) - Comma-separated values: (memory_size * num_envs, data_size) :param pathname: String containing a path specification for the exported memories. Python `glob <https://docs.python.org/3/library/glob.html#glob.glob>`_ method is used to find all files matching the path specification :type pathname: str """ self.n = 0 self.file_paths = sorted(glob.glob(pathname)) def __iter__(self) -> 'MemoryFileIterator': """Return self to make iterable""" return self def __next__(self) -> Tuple[str, dict]: """Return next batch :return: Tuple of file name and data :rtype: tuple """ if self.n >= len(self.file_paths): raise StopIteration if self.file_paths[self.n].endswith(".pt"): return self._format_torch() elif self.file_paths[self.n].endswith(".npz"): return self._format_numpy() elif self.file_paths[self.n].endswith(".csv"): return self._format_csv() else: raise ValueError(f"Unsupported format for {self.file_paths[self.n]}. Available formats: .pt, .csv, .npz") def _format_numpy(self) -> Tuple[str, dict]: """Load numpy array from file :return: Tuple of file name and data :rtype: tuple """ filename = os.path.basename(self.file_paths[self.n]) data = np.load(self.file_paths[self.n]) self.n += 1 return filename, data def _format_torch(self) -> Tuple[str, dict]: """Load PyTorch tensor from file :return: Tuple of file name and data :rtype: tuple """ filename = os.path.basename(self.file_paths[self.n]) data = torch.load(self.file_paths[self.n]) self.n += 1 return filename, data def _format_csv(self) -> Tuple[str, dict]: """Load CSV file from file :return: Tuple of file name and data :rtype: tuple """ filename = os.path.basename(self.file_paths[self.n]) with open(self.file_paths[self.n], 'r') as f: reader = csv.reader(f) # parse header try: header = next(reader, None) data = collections.defaultdict(int) for h in header: h.split(".")[1] # check header format data[h.split(".")[0]] += 1 names = sorted(list(data.keys())) sizes = [data[name] for name in names] indexes = [(low, high) for low, high in zip(np.cumsum(sizes) - np.array(sizes), np.cumsum(sizes))] except: self.n += 1 return filename, {} # parse data data = {name: [] for name in names} for row in reader: for name, index in zip(names, indexes): data[name].append([float(item) if item not in ["True", "False"] else bool(item) \ for item in row[index[0]:index[1]]]) self.n += 1 return filename, data class TensorboardFileIterator(): def __init__(self, pathname: str, tags: Union[str, List[str]]) -> None: """Python iterator for loading data from Tensorboard files The iterator will load the next Tensorboard file in the list of path names. The iterator's output is a tuple of the directory name and the Tensorboard variables selected by the tags. The Tensorboard data is returned as a dictionary with the tag as the key and a list of steps and values as the value :param pathname: String containing a path specification for the Tensorboard files. Python `glob <https://docs.python.org/3/library/glob.html#glob.glob>`_ method is used to find all files matching the path specification :type pathname: str :param tags: String or list of strings containing the tags of the variables to load :type tags: str or list of str """ self.n = 0 self.file_paths = sorted(glob.glob(pathname)) self.tags = [tags] if isinstance(tags, str) else tags def __iter__(self) -> 'TensorboardFileIterator': """Return self to make iterable""" return self def __next__(self) -> Tuple[str, dict]: """Return next batch :return: Tuple of directory name and data :rtype: tuple """ from tensorflow.python.summary.summary_iterator import summary_iterator if self.n >= len(self.file_paths): raise StopIteration file_path = self.file_paths[self.n] self.n += 1 data = {} for event in summary_iterator(file_path): try: # get Tensorboard data step = event.step tag = event.summary.value[0].tag value = event.summary.value[0].simple_value # record data if tag in self.tags: if not tag in data: data[tag] = [] data[tag].append([step, value]) except Exception as e: pass return os.path.dirname(file_path).split(os.sep)[-1], data
6,210
Python
34.289773
124
0.56876
Toni-SM/skrl/skrl/utils/__init__.py
from typing import Optional import os import random import sys import time import numpy as np from skrl import config, logger def set_seed(seed: Optional[int] = None, deterministic: bool = False) -> int: """ Set the seed for the random number generators Due to NumPy's legacy seeding constraint the seed must be between 0 and 2**32 - 1. Otherwise a NumPy exception (``ValueError: Seed must be between 0 and 2**32 - 1``) will be raised Modified packages: - random - numpy - torch (if available) - jax (skrl's PRNG key: ``config.jax.key``) Example:: # fixed seed >>> from skrl.utils import set_seed >>> set_seed(42) [skrl:INFO] Seed: 42 42 # random seed >>> from skrl.utils import set_seed >>> set_seed() [skrl:INFO] Seed: 1776118066 1776118066 # enable deterministic. The following environment variables should be established: # - CUDA 10.1: CUDA_LAUNCH_BLOCKING=1 # - CUDA 10.2 or later: CUBLAS_WORKSPACE_CONFIG=:16:8 or CUBLAS_WORKSPACE_CONFIG=:4096:8 >>> from skrl.utils import set_seed >>> set_seed(42, deterministic=True) [skrl:INFO] Seed: 42 [skrl:WARNING] PyTorch/cuDNN deterministic algorithms are enabled. This may affect performance 42 :param seed: The seed to set. Is None, a random seed will be generated (default: ``None``) :type seed: int, optional :param deterministic: Whether PyTorch is configured to use deterministic algorithms (default: ``False``). The following environment variables should be established for CUDA 10.1 (``CUDA_LAUNCH_BLOCKING=1``) and for CUDA 10.2 or later (``CUBLAS_WORKSPACE_CONFIG=:16:8`` or ``CUBLAS_WORKSPACE_CONFIG=:4096:8``). See PyTorch `Reproducibility <https://pytorch.org/docs/stable/notes/randomness.html>`_ for details :type deterministic: bool, optional :return: Seed :rtype: int """ # generate a random seed if seed is None: try: seed = int.from_bytes(os.urandom(4), byteorder=sys.byteorder) except NotImplementedError: seed = int(time.time() * 1000) seed %= 2 ** 31 # NumPy's legacy seeding seed must be between 0 and 2**32 - 1 seed = int(seed) logger.info(f"Seed: {seed}") # numpy random.seed(seed) np.random.seed(seed) # torch try: import torch torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True # On CUDA 10.1, set environment variable CUDA_LAUNCH_BLOCKING=1 # On CUDA 10.2 or later, set environment variable CUBLAS_WORKSPACE_CONFIG=:16:8 or CUBLAS_WORKSPACE_CONFIG=:4096:8 logger.warning("PyTorch/cuDNN deterministic algorithms are enabled. This may affect performance") except ImportError: pass except Exception as e: logger.warning(f"PyTorch seeding error: {e}") # jax config.jax.key = seed return seed
3,232
Python
31.009901
128
0.628094
Toni-SM/skrl/skrl/utils/isaacgym_utils.py
from typing import List, Optional import logging import math import threading import numpy as np import torch try: import flask except ImportError: flask = None try: import imageio import isaacgym import isaacgym.torch_utils as torch_utils from isaacgym import gymapi except ImportError: imageio = None isaacgym = None torch_utils = None gymapi = None class WebViewer: def __init__(self, host: str = "127.0.0.1", port: int = 5000) -> None: """ Web viewer for Isaac Gym :param host: Host address (default: "127.0.0.1") :type host: str :param port: Port number (default: 5000) :type port: int """ self._app = flask.Flask(__name__) self._app.add_url_rule("/", view_func=self._route_index) self._app.add_url_rule("/_route_stream", view_func=self._route_stream) self._app.add_url_rule("/_route_input_event", view_func=self._route_input_event, methods=["POST"]) self._log = logging.getLogger('werkzeug') self._log.disabled = True self._app.logger.disabled = True self._image = None self._camera_id = 0 self._camera_type = gymapi.IMAGE_COLOR self._notified = False self._wait_for_page = True self._pause_stream = False self._event_load = threading.Event() self._event_stream = threading.Event() # start server self._thread = threading.Thread(target=lambda: \ self._app.run(host=host, port=port, debug=False, use_reloader=False), daemon=True) self._thread.start() print(f"\nStarting web viewer on http://{host}:{port}/\n") def _route_index(self) -> 'flask.Response': """Render the web page :return: Flask response :rtype: flask.Response """ template = """<!doctype html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no"> <style> html, body { width: 100%; height: 100%; margin: 0; overflow: hidden; display: block; background-color: #000; } </style> </head> <body> <div> <canvas id="canvas" tabindex='1'></canvas> </div> <script> var canvas, context, image; function sendInputRequest(data){ let xmlRequest = new XMLHttpRequest(); xmlRequest.open("POST", "{{ url_for('_route_input_event') }}", true); xmlRequest.setRequestHeader("Content-Type", "application/json"); xmlRequest.send(JSON.stringify(data)); } window.onload = function(){ canvas = document.getElementById("canvas"); context = canvas.getContext('2d'); image = new Image(); image.src = "{{ url_for('_route_stream') }}"; canvas.width = window.innerWidth; canvas.height = window.innerHeight; window.addEventListener('resize', function(){ canvas.width = window.innerWidth; canvas.height = window.innerHeight; }, false); window.setInterval(function(){ let ratio = image.naturalWidth / image.naturalHeight; context.drawImage(image, 0, 0, canvas.width, canvas.width / ratio); }, 50); canvas.addEventListener('keydown', function(event){ if(event.keyCode != 18) sendInputRequest({key: event.keyCode}); }, false); canvas.addEventListener('mousemove', function(event){ if(event.buttons){ let data = {dx: event.movementX, dy: event.movementY}; if(event.altKey && event.buttons == 1){ data.key = 18; data.mouse = "left"; } else if(event.buttons == 2) data.mouse = "right"; else if(event.buttons == 4) data.mouse = "middle"; else return; sendInputRequest(data); } }, false); canvas.addEventListener('wheel', function(event){ sendInputRequest({mouse: "wheel", dz: Math.sign(event.deltaY)}); }, false); } </script> </body> </html> """ self._event_load.set() return flask.render_template_string(template) def _route_stream(self) -> 'flask.Response': """Stream the image to the web page :return: Flask response :rtype: flask.Response """ return flask.Response(self._stream(), mimetype='multipart/x-mixed-replace; boundary=frame') def _route_input_event(self) -> 'flask.Response': """Handle keyboard and mouse input :return: Flask response :rtype: flask.Response """ def q_mult(q1, q2): return [q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2] - q1[3] * q2[3], q1[0] * q2[1] + q1[1] * q2[0] + q1[2] * q2[3] - q1[3] * q2[2], q1[0] * q2[2] + q1[2] * q2[0] + q1[3] * q2[1] - q1[1] * q2[3], q1[0] * q2[3] + q1[3] * q2[0] + q1[1] * q2[2] - q1[2] * q2[1]] def q_conj(q): return [q[0], -q[1], -q[2], -q[3]] def qv_mult(q, v): q2 = [0] + v return q_mult(q_mult(q, q2), q_conj(q))[1:] def q_from_angle_axis(angle, axis): s = math.sin(angle / 2.0) return [math.cos(angle / 2.0), axis[0] * s, axis[1] * s, axis[2] * s] def p_target(p, q, a=0, b=0, c=1, d=0): v = qv_mult(q, [1, 0, 0]) p1 = [c0 + c1 for c0, c1 in zip(p, v)] denominator = a * (p1[0] - p[0]) + b * (p1[1] - p[1]) + c * (p1[2] - p[2]) if denominator: t = -(a * p[0] + b * p[1] + c * p[2] + d) / denominator return [p[0] + t * (p1[0] - p[0]), p[1] + t * (p1[1] - p[1]), p[2] + t * (p1[2] - p[2])] return v # get keyboard and mouse inputs data = flask.request.get_json() key, mouse = data.get("key", None), data.get("mouse", None) dx, dy, dz = data.get("dx", None), data.get("dy", None), data.get("dz", None) transform = self._gym.get_camera_transform(self._sim, self._envs[self._camera_id], self._cameras[self._camera_id]) # zoom in/out if mouse == "wheel": # compute zoom vector vector = qv_mult([transform.r.w, transform.r.x, transform.r.y, transform.r.z], [-0.025 * dz, 0, 0]) # update transform transform.p.x += vector[0] transform.p.y += vector[1] transform.p.z += vector[2] # orbit camera elif mouse == "left": # convert mouse movement to angle dx *= 0.1 * math.pi / 180 dy *= 0.1 * math.pi / 180 # compute rotation (Z-up) q = q_from_angle_axis(dx, [0, 0, -1]) q = q_mult(q, q_from_angle_axis(dy, [1, 0, 0])) # apply rotation t = p_target([transform.p.x, transform.p.y, transform.p.z], [transform.r.w, transform.r.x, transform.r.y, transform.r.z]) p = qv_mult(q, [transform.p.x - t[0], transform.p.y - t[1], transform.p.z - t[2]]) q = q_mult(q, [transform.r.w, transform.r.x, transform.r.y, transform.r.z]) # update transform transform.p.x = p[0] + t[0] transform.p.y = p[1] + t[1] transform.p.z = p[2] + t[2] transform.r.w, transform.r.x, transform.r.y, transform.r.z = q # pan camera elif mouse == "right": # convert mouse movement to angle dx *= 0.1 * math.pi / 180 dy *= 0.1 * math.pi / 180 # compute rotation (Z-up) q = q_from_angle_axis(dx, [0, 0, -1]) q = q_mult(q, q_from_angle_axis(dy, [1, 0, 0])) # apply rotation q = q_mult(q, [transform.r.w, transform.r.x, transform.r.y, transform.r.z]) # update transform transform.r.w, transform.r.x, transform.r.y, transform.r.z = q # walk camera elif mouse == "middle": # compute displacement vector = qv_mult([transform.r.w, transform.r.x, transform.r.y, transform.r.z], [0, 0.001 * dx, 0.001 * dy]) # update transform transform.p.x += vector[0] transform.p.y += vector[1] transform.p.z += vector[2] # pause stream (V: 86) elif key == 86: self._pause_stream = not self._pause_stream return flask.Response(status=200) # change image type (T: 84) elif key == 84: if self._camera_type == gymapi.IMAGE_COLOR: self._camera_type = gymapi.IMAGE_DEPTH elif self._camera_type == gymapi.IMAGE_DEPTH: self._camera_type = gymapi.IMAGE_COLOR return flask.Response(status=200) else: return flask.Response(status=200) self._gym.set_camera_transform(self._cameras[self._camera_id], self._envs[self._camera_id], transform) return flask.Response(status=200) def _stream(self) -> bytes: """Format the image to be streamed :return: Image encoded as Content-Type :rtype: bytes """ while True: self._event_stream.wait() # prepare image image = imageio.imwrite("<bytes>", self._image, format="JPEG") # stream image yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + image + b'\r\n') self._event_stream.clear() self._notified = False def setup(self, gym: 'isaacgym.gymapi.Gym', sim: 'isaacgym.gymapi.Sim', envs: List[int], cameras: List[int]) -> None: """Setup the web viewer :param gym: The gym :type gym: isaacgym.gymapi.Gym :param sim: Simulation handle :type sim: isaacgym.gymapi.Sim :param envs: Environment handles :type envs: list of ints :param cameras: Camera handles :type cameras: list of ints """ self._gym = gym self._sim = sim self._envs = envs self._cameras = cameras def render(self, fetch_results: bool = True, step_graphics: bool = True, render_all_camera_sensors: bool = True, wait_for_page_load: bool = True) -> None: """Render and get the image from the current camera This function must be called after the simulation is stepped (post_physics_step). The following Isaac Gym functions are called before get the image. Their calling can be skipped by setting the corresponding argument to False - fetch_results - step_graphics - render_all_camera_sensors :param fetch_results: Call Gym.fetch_results method (default: True) :type fetch_results: bool :param step_graphics: Call Gym.step_graphics method (default: True) :type step_graphics: bool :param render_all_camera_sensors: Call Gym.render_all_camera_sensors method (default: True) :type render_all_camera_sensors: bool :param wait_for_page_load: Wait for the page to load (default: True) :type wait_for_page_load: bool """ # wait for page to load if self._wait_for_page: if wait_for_page_load: if not self._event_load.is_set(): print("Waiting for web page to begin loading...") self._event_load.wait() self._event_load.clear() self._wait_for_page = False # pause stream if self._pause_stream: return if self._notified: return # isaac gym API if fetch_results: self._gym.fetch_results(self._sim, True) if step_graphics: self._gym.step_graphics(self._sim) if render_all_camera_sensors: self._gym.render_all_camera_sensors(self._sim) # get image image = self._gym.get_camera_image(self._sim, self._envs[self._camera_id], self._cameras[self._camera_id], self._camera_type) if self._camera_type == gymapi.IMAGE_COLOR: self._image = image.reshape(image.shape[0], -1, 4)[..., :3] elif self._camera_type == gymapi.IMAGE_DEPTH: self._image = -image.reshape(image.shape[0], -1) minimum = 0 if np.isinf(np.min(self._image)) else np.min(self._image) maximum = 5 if np.isinf(np.max(self._image)) else np.max(self._image) self._image = np.clip(1 - (self._image - minimum) / (maximum - minimum), 0, 1) self._image = np.uint8(255 * self._image) else: raise ValueError("Unsupported camera type") # notify stream thread self._event_stream.set() self._notified = True def ik(jacobian_end_effector: torch.Tensor, current_position: torch.Tensor, current_orientation: torch.Tensor, goal_position: torch.Tensor, goal_orientation: Optional[torch.Tensor] = None, damping_factor: float = 0.05, squeeze_output: bool = True) -> torch.Tensor: """ Inverse kinematics using damped least squares method :param jacobian_end_effector: End effector's jacobian :type jacobian_end_effector: torch.Tensor :param current_position: End effector's current position :type current_position: torch.Tensor :param current_orientation: End effector's current orientation :type current_orientation: torch.Tensor :param goal_position: End effector's goal position :type goal_position: torch.Tensor :param goal_orientation: End effector's goal orientation (default: None) :type goal_orientation: torch.Tensor or None :param damping_factor: Damping factor (default: 0.05) :type damping_factor: float :param squeeze_output: Squeeze output (default: True) :type squeeze_output: bool :return: Change in joint angles :rtype: torch.Tensor """ if goal_orientation is None: goal_orientation = current_orientation # compute error q = torch_utils.quat_mul(goal_orientation, torch_utils.quat_conjugate(current_orientation)) error = torch.cat([goal_position - current_position, # position error q[:, 0:3] * torch.sign(q[:, 3]).unsqueeze(-1)], # orientation error dim=-1).unsqueeze(-1) # solve damped least squares (dO = J.T * V) transpose = torch.transpose(jacobian_end_effector, 1, 2) lmbda = torch.eye(6, device=jacobian_end_effector.device) * (damping_factor ** 2) if squeeze_output: return (transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error).squeeze(dim=2) else: return transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error def print_arguments(args): print("") print("Arguments") for a in args.__dict__: print(f" |-- {a}: {args.__getattribute__(a)}") def print_asset_options(asset_options: 'isaacgym.gymapi.AssetOptions', asset_name: str = ""): attrs = ["angular_damping", "armature", "collapse_fixed_joints", "convex_decomposition_from_submeshes", "default_dof_drive_mode", "density", "disable_gravity", "fix_base_link", "flip_visual_attachments", "linear_damping", "max_angular_velocity", "max_linear_velocity", "mesh_normal_mode", "min_particle_mass", "override_com", "override_inertia", "replace_cylinder_with_capsule", "tendon_limit_stiffness", "thickness", "use_mesh_materials", "use_physx_armature", "vhacd_enabled"] # vhacd_params print("\nAsset options{}".format(f" ({asset_name})" if asset_name else "")) for attr in attrs: print(" |-- {}: {}".format(attr, getattr(asset_options, attr) if hasattr(asset_options, attr) else "--")) # vhacd attributes if attr == "vhacd_enabled" and hasattr(asset_options, attr) and getattr(asset_options, attr): vhacd_attrs = ["alpha", "beta", "concavity", "convex_hull_approximation", "convex_hull_downsampling", "max_convex_hulls", "max_num_vertices_per_ch", "min_volume_per_ch", "mode", "ocl_acceleration", "pca", "plane_downsampling", "project_hull_vertices", "resolution"] print(" |-- vhacd_params:") for vhacd_attr in vhacd_attrs: print(" | |-- {}: {}".format(vhacd_attr, getattr(asset_options.vhacd_params, vhacd_attr) \ if hasattr(asset_options.vhacd_params, vhacd_attr) else "--")) def print_sim_components(gym, sim): print("") print("Sim components") print(" |-- env count:", gym.get_env_count(sim)) print(" |-- actor count:", gym.get_sim_actor_count(sim)) print(" |-- rigid body count:", gym.get_sim_rigid_body_count(sim)) print(" |-- joint count:", gym.get_sim_joint_count(sim)) print(" |-- dof count:", gym.get_sim_dof_count(sim)) print(" |-- force sensor count:", gym.get_sim_force_sensor_count(sim)) def print_env_components(gym, env): print("") print("Env components") print(" |-- actor count:", gym.get_actor_count(env)) print(" |-- rigid body count:", gym.get_env_rigid_body_count(env)) print(" |-- joint count:", gym.get_env_joint_count(env)) print(" |-- dof count:", gym.get_env_dof_count(env)) def print_actor_components(gym, env, actor): print("") print("Actor components") print(" |-- rigid body count:", gym.get_actor_rigid_body_count(env, actor)) print(" |-- joint count:", gym.get_actor_joint_count(env, actor)) print(" |-- dof count:", gym.get_actor_dof_count(env, actor)) print(" |-- actuator count:", gym.get_actor_actuator_count(env, actor)) print(" |-- rigid shape count:", gym.get_actor_rigid_shape_count(env, actor)) print(" |-- soft body count:", gym.get_actor_soft_body_count(env, actor)) print(" |-- tendon count:", gym.get_actor_tendon_count(env, actor)) def print_dof_properties(gymapi, props): print("") print("DOF properties") print(" |-- hasLimits:", props["hasLimits"]) print(" |-- lower:", props["lower"]) print(" |-- upper:", props["upper"]) print(" |-- driveMode:", props["driveMode"]) print(" | |-- {}: gymapi.DOF_MODE_NONE".format(int(gymapi.DOF_MODE_NONE))) print(" | |-- {}: gymapi.DOF_MODE_POS".format(int(gymapi.DOF_MODE_POS))) print(" | |-- {}: gymapi.DOF_MODE_VEL".format(int(gymapi.DOF_MODE_VEL))) print(" | |-- {}: gymapi.DOF_MODE_EFFORT".format(int(gymapi.DOF_MODE_EFFORT))) print(" |-- stiffness:", props["stiffness"]) print(" |-- damping:", props["damping"]) print(" |-- velocity (max):", props["velocity"]) print(" |-- effort (max):", props["effort"]) print(" |-- friction:", props["friction"]) print(" |-- armature:", props["armature"]) def print_links_and_dofs(gym, asset): link_dict = gym.get_asset_rigid_body_dict(asset) dof_dict = gym.get_asset_dof_dict(asset) print("") print("Links") for k in link_dict: print(f" |-- {k}: {link_dict[k]}") print("DOFs") for k in dof_dict: print(f" |-- {k}: {dof_dict[k]}")
20,525
Python
39.089844
122
0.533203
Toni-SM/skrl/skrl/utils/omniverse_isaacgym_utils.py
from typing import Mapping, Optional import queue import numpy as np import torch from skrl import logger def _np_quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) x1, y1, z1, w1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] x2, y2, z2, w2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) return np.stack([x, y, z, w], axis=-1).reshape(shape) def _np_quat_conjugate(a): shape = a.shape a = a.reshape(-1, 4) return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape) def _torch_quat_mul(a, b): assert a.shape == b.shape shape = a.shape a = a.reshape(-1, 4) b = b.reshape(-1, 4) w1, x1, y1, z1 = a[:, 0], a[:, 1], a[:, 2], a[:, 3] w2, x2, y2, z2 = b[:, 0], b[:, 1], b[:, 2], b[:, 3] ww = (z1 + x1) * (x2 + y2) yy = (w1 - y1) * (w2 + z2) zz = (w1 + y1) * (w2 - z2) xx = ww + yy + zz qq = 0.5 * (xx + (z1 - x1) * (x2 - y2)) w = qq - ww + (z1 - y1) * (y2 - z2) x = qq - xx + (x1 + w1) * (x2 + w2) y = qq - yy + (w1 - x1) * (y2 + z2) z = qq - zz + (z1 + y1) * (w2 - x2) return torch.stack([w, x, y, z], dim=-1).view(shape) def _torch_quat_conjugate(a): # wxyz shape = a.shape a = a.reshape(-1, 4) return torch.cat((a[:, :1], -a[:, 1:]), dim=-1).view(shape) def ik(jacobian_end_effector: torch.Tensor, current_position: torch.Tensor, current_orientation: torch.Tensor, goal_position: torch.Tensor, goal_orientation: Optional[torch.Tensor] = None, method: str = "damped least-squares", method_cfg: Mapping[str, float] = {"scale": 1, "damping": 0.05, "min_singular_value": 1e-5}, squeeze_output: bool = True,) -> torch.Tensor: """Differential inverse kinematics :param jacobian_end_effector: End effector's jacobian :type jacobian_end_effector: torch.Tensor :param current_position: End effector's current position :type current_position: torch.Tensor :param current_orientation: End effector's current orientation :type current_orientation: torch.Tensor :param goal_position: End effector's goal position :type goal_position: torch.Tensor :param goal_orientation: End effector's goal orientation (default: ``None``). If not provided, the current orientation will be used instead. :type goal_orientation: torch.Tensor, optional :param method: Differential inverse kinematics formulation (default: ``"damped least-squares"``). The supported methods are described in the following table: +----------------------------------+----------------------------------+ |IK Method |Method tag | +==================================+==================================+ |Damped least-squares |``"damped least-squares"`` | +----------------------------------+----------------------------------+ |Tanspose |``"transpose"`` | +----------------------------------+----------------------------------+ |Pseduoinverse |``"pseudoinverse"`` | +----------------------------------+----------------------------------+ |Singular-vale decomposition (SVD) |``"singular-vale decomposition"`` | +----------------------------------+----------------------------------+ :type method: str, optional :param method_cfg: Method configurations (default: ``{"scale": 1, "damping": 0.05, "min_singular_value": 1e-5}``) :type method_cfg: dict, optional :param squeeze_output: Squeeze output (default: ``True``) :type squeeze_output: bool, optional :return: Change in joint angles :rtype: torch.Tensor """ if goal_orientation is None: goal_orientation = current_orientation # torch if isinstance(jacobian_end_effector, torch.Tensor): # compute error q = _torch_quat_mul(goal_orientation, _torch_quat_conjugate(current_orientation)) error = torch.cat([goal_position - current_position, # position error q[:, 1:] * torch.sign(q[:, 0]).unsqueeze(-1)], # orientation error dim=-1).unsqueeze(-1) scale = method_cfg.get("scale", 1.0) # adaptive Singular Value Decomposition (SVD) if method == "singular-vale decomposition": min_singular_value = method_cfg.get("min_singular_value", 1e-5) U, S, Vh = torch.linalg.svd(jacobian_end_effector) # U: 6xd, S: dxd, V: d x num_dof inv_s = torch.where(S > min_singular_value, 1.0 / S, torch.zeros_like(S)) pseudoinverse = torch.transpose(Vh, 1, 2)[:, :, :6] @ torch.diag_embed(inv_s) @ torch.transpose(U, 1, 2) if squeeze_output: return (scale * pseudoinverse @ error).squeeze(dim=2) else: return scale * pseudoinverse @ error # jacobian pseudoinverse elif method == "pseudoinverse": pseudoinverse = torch.linalg.pinv(jacobian_end_effector) if squeeze_output: return (scale * pseudoinverse @ error).squeeze(dim=2) else: return scale * pseudoinverse @ error # jacobian transpose elif method == "transpose": transpose = torch.transpose(jacobian_end_effector, 1, 2) if squeeze_output: return (scale * transpose @ error).squeeze(dim=2) else: return scale * transpose @ error # damped least-squares elif method == "damped least-squares": damping = method_cfg.get("damping", 0.05) transpose = torch.transpose(jacobian_end_effector, 1, 2) lmbda = torch.eye(jacobian_end_effector.shape[1], device=jacobian_end_effector.device) * (damping ** 2) if squeeze_output: return (scale * transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error).squeeze(dim=2) else: return scale * transpose @ torch.inverse(jacobian_end_effector @ transpose + lmbda) @ error else: raise ValueError("Invalid IK method") # numpy # TODO: test and fix this else: # compute error q = _np_quat_mul(goal_orientation, _np_quat_conjugate(current_orientation)) error = np.concatenate([goal_position - current_position, # position error q[:, 0:3] * np.sign(q[:, 3])]) # orientation error # solve damped least squares (dO = J.T * V) transpose = np.transpose(jacobian_end_effector, 1, 2) lmbda = np.eye(6) * (method_cfg.get("damping", 0.05) ** 2) if squeeze_output: return (transpose @ np.linalg.inv(jacobian_end_effector @ transpose + lmbda) @ error) else: return transpose @ np.linalg.inv(jacobian_end_effector @ transpose + lmbda) @ error def get_env_instance(headless: bool = True, enable_livestream: bool = False, enable_viewport: bool = False, multi_threaded: bool = False) -> "omni.isaac.gym.vec_env.VecEnvBase": """ Instantiate a VecEnvBase-based object compatible with OmniIsaacGymEnvs :param headless: Disable UI when running (default: ``True``) :type headless: bool, optional :param enable_livestream: Whether to enable live streaming (default: ``False``) :type enable_livestream: bool, optional :param enable_viewport: Whether to enable viewport (default: ``False``) :type enable_viewport: bool, optional :param multi_threaded: Whether to return a multi-threaded environment instance (default: ``False``) :type multi_threaded: bool, optional :return: Environment instance :rtype: omni.isaac.gym.vec_env.VecEnvBase Example:: from skrl.envs.wrappers.torch import wrap_env from skrl.utils.omniverse_isaacgym_utils import get_env_instance # get environment instance env = get_env_instance(headless=True) # parse sim configuration from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig sim_config = SimConfig({"test": False, "device_id": 0, "headless": True, "multi_gpu": False, "sim_device": "gpu", "enable_livestream": False, "task": {"name": "CustomTask", "physics_engine": "physx", "env": {"numEnvs": 512, "envSpacing": 1.5, "enableDebugVis": False, "clipObservations": 1000.0, "clipActions": 1.0, "controlFrequencyInv": 4}, "sim": {"dt": 0.0083, # 1 / 120 "use_gpu_pipeline": True, "gravity": [0.0, 0.0, -9.81], "add_ground_plane": True, "use_flatcache": True, "enable_scene_query_support": False, "enable_cameras": False, "default_physics_material": {"static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0}, "physx": {"worker_thread_count": 4, "solver_type": 1, "use_gpu": True, "solver_position_iteration_count": 4, "solver_velocity_iteration_count": 1, "contact_offset": 0.005, "rest_offset": 0.0, "bounce_threshold_velocity": 0.2, "friction_offset_threshold": 0.04, "friction_correlation_distance": 0.025, "enable_sleeping": True, "enable_stabilization": True, "max_depenetration_velocity": 1000.0, "gpu_max_rigid_contact_count": 524288, "gpu_max_rigid_patch_count": 33554432, "gpu_found_lost_pairs_capacity": 524288, "gpu_found_lost_aggregate_pairs_capacity": 262144, "gpu_total_aggregate_pairs_capacity": 1048576, "gpu_max_soft_body_contacts": 1048576, "gpu_max_particle_contacts": 1048576, "gpu_heap_capacity": 33554432, "gpu_temp_buffer_capacity": 16777216, "gpu_max_num_partitions": 8}}}}) # import and setup custom task from custom_task import CustomTask task = CustomTask(name="CustomTask", sim_config=sim_config, env=env) env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True) # wrap the environment env = wrap_env(env, "omniverse-isaacgym") """ from omni.isaac.gym.vec_env import TaskStopException, VecEnvBase, VecEnvMT from omni.isaac.gym.vec_env.vec_env_mt import TrainerMT class _OmniIsaacGymVecEnv(VecEnvBase): def step(self, actions): actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).to(self._task.device).clone() self._task.pre_physics_step(actions) for _ in range(self._task.control_frequency_inv): self._world.step(render=self._render) self.sim_frame_count += 1 observations, rewards, dones, info = self._task.post_physics_step() return {"obs": torch.clamp(observations, -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone()}, \ rewards.to(self._task.rl_device).clone(), dones.to(self._task.rl_device).clone(), info.copy() def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] class _OmniIsaacGymTrainerMT(TrainerMT): def run(self): pass def stop(self): pass class _OmniIsaacGymVecEnvMT(VecEnvMT): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.action_queue = queue.Queue(1) self.data_queue = queue.Queue(1) def run(self, trainer=None): super().run(_OmniIsaacGymTrainerMT() if trainer is None else trainer) def _parse_data(self, data): self._observations = torch.clamp(data["obs"], -self._task.clip_obs, self._task.clip_obs).to(self._task.rl_device).clone() self._rewards = data["rew"].to(self._task.rl_device).clone() self._dones = data["reset"].to(self._task.rl_device).clone() self._info = data["extras"].copy() def step(self, actions): if self._stop: raise TaskStopException() actions = torch.clamp(actions, -self._task.clip_actions, self._task.clip_actions).clone() self.send_actions(actions) data = self.get_data() return {"obs": self._observations}, self._rewards, self._dones, self._info def reset(self): self._task.reset() actions = torch.zeros((self.num_envs, self._task.num_actions), device=self._task.device) return self.step(actions)[0] def close(self): # end stop signal to main thread self.send_actions(None) self.stop = True if multi_threaded: try: return _OmniIsaacGymVecEnvMT(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport) except TypeError: logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)") return _OmniIsaacGymVecEnvMT(headless=headless) # Isaac Sim 2022.2.0 and earlier else: try: return _OmniIsaacGymVecEnv(headless=headless, enable_livestream=enable_livestream, enable_viewport=enable_viewport) except TypeError: logger.warning("Using an older version of Isaac Sim (2022.2.0 or earlier)") return _OmniIsaacGymVecEnv(headless=headless) # Isaac Sim 2022.2.0 and earlier
16,269
Python
47.858859
133
0.487737
Toni-SM/skrl/skrl/utils/model_instantiators/torch/__init__.py
from typing import Any, Mapping, Optional, Sequence, Tuple, Union from enum import Enum import gym import gymnasium import torch import torch.nn as nn from skrl.models.torch import Model # noqa from skrl.models.torch import CategoricalMixin, DeterministicMixin, GaussianMixin, MultivariateGaussianMixin # noqa __all__ = ["categorical_model", "deterministic_model", "gaussian_model", "multivariate_gaussian_model", "Shape"] class Shape(Enum): """ Enum to select the shape of the model's inputs and outputs """ ONE = 1 STATES = 0 OBSERVATIONS = 0 ACTIONS = -1 STATES_ACTIONS = -2 def _get_activation_function(activation: str) -> nn.Module: """Get the activation function Supported activation functions: - "elu" - "leaky_relu" - "relu" - "selu" - "sigmoid" - "softmax" - "softplus" - "softsign" - "tanh" :param activation: activation function name. If activation is an empty string, a placeholder will be returned (``torch.nn.Identity()``) :type activation: str :raises: ValueError if activation is not a valid activation function :return: activation function :rtype: nn.Module """ if not activation: return torch.nn.Identity() elif activation == "relu": return torch.nn.ReLU() elif activation == "tanh": return torch.nn.Tanh() elif activation == "sigmoid": return torch.nn.Sigmoid() elif activation == "leaky_relu": return torch.nn.LeakyReLU() elif activation == "elu": return torch.nn.ELU() elif activation == "softplus": return torch.nn.Softplus() elif activation == "softsign": return torch.nn.Softsign() elif activation == "selu": return torch.nn.SELU() elif activation == "softmax": return torch.nn.Softmax() else: raise ValueError(f"Unknown activation function: {activation}") def _get_num_units_by_shape(model: Model, shape: Shape) -> int: """Get the number of units in a layer by shape :param model: Model to get the number of units for :type model: Model :param shape: Shape of the layer :type shape: Shape or int :return: Number of units in the layer :rtype: int """ num_units = {Shape.ONE: 1, Shape.STATES: model.num_observations, Shape.ACTIONS: model.num_actions, Shape.STATES_ACTIONS: model.num_observations + model.num_actions} try: return num_units[shape] except: return shape def _generate_sequential(model: Model, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Union[str, None] = "tanh", output_scale: Optional[int] = None) -> nn.Sequential: """Generate a sequential model :param model: model to generate sequential model for :type model: Model :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: None). If None, the output layer will not be scaled :type output_scale: int, optional :return: sequential model :rtype: nn.Sequential """ # input layer input_layer = [nn.Linear(_get_num_units_by_shape(model, input_shape), hiddens[0])] # hidden layers hidden_layers = [] for i in range(len(hiddens) - 1): hidden_layers.append(_get_activation_function(hidden_activation[i])) hidden_layers.append(nn.Linear(hiddens[i], hiddens[i + 1])) hidden_layers.append(_get_activation_function(hidden_activation[-1])) # output layer output_layer = [nn.Linear(hiddens[-1], _get_num_units_by_shape(model, output_shape))] if output_activation is not None: output_layer.append(_get_activation_function(output_activation)) return nn.Sequential(*input_layer, *hidden_layers, *output_layer) def gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, initial_log_std: float = 0, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a Gaussian model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param clip_actions: Flag to indicate whether the actions should be clipped (default: False) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation (default: -20) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation (default: 2) :type max_log_std: float, optional :param initial_log_std: Initial value for the log standard deviation (default: 0) :type initial_log_std: float, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Gaussian model instance :rtype: Model """ class GaussianModel(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions, clip_log_std, min_log_std, max_log_std, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) self.log_std_parameter = nn.Parameter(metadata["initial_log_std"] \ * torch.ones(_get_num_units_by_shape(self, metadata["output_shape"]))) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output * self.instantiator_output_scale, self.log_std_parameter, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale, "initial_log_std": initial_log_std} return GaussianModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions, clip_log_std=clip_log_std, min_log_std=min_log_std, max_log_std=max_log_std) def multivariate_gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, initial_log_std: float = 0, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a multivariate Gaussian model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param clip_actions: Flag to indicate whether the actions should be clipped (default: False) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation (default: -20) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation (default: 2) :type max_log_std: float, optional :param initial_log_std: Initial value for the log standard deviation (default: 0) :type initial_log_std: float, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Multivariate Gaussian model instance :rtype: Model """ class MultivariateGaussianModel(MultivariateGaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions, clip_log_std, min_log_std, max_log_std): Model.__init__(self, observation_space, action_space, device) MultivariateGaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) self.log_std_parameter = nn.Parameter(metadata["initial_log_std"] \ * torch.ones(_get_num_units_by_shape(self, metadata["output_shape"]))) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output * self.instantiator_output_scale, self.log_std_parameter, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale, "initial_log_std": initial_log_std} return MultivariateGaussianModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions, clip_log_std=clip_log_std, min_log_std=min_log_std, max_log_std=max_log_std) def deterministic_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, clip_actions: bool = False, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a deterministic model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: False) :type clip_actions: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Deterministic model instance :rtype: Model """ class DeterministicModel(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output * self.instantiator_output_scale, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale} return DeterministicModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions) def categorical_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, unnormalized_log_prob: bool = True, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = None) -> Model: """Instantiate a categorical model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: True). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: None) :type output_activation: str or None, optional :return: Categorical model instance :rtype: Model """ class CategoricalModel(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device, unnormalized_log_prob): Model.__init__(self, observation_space, action_space, device) CategoricalMixin.__init__(self, unnormalized_log_prob) self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"]) def compute(self, inputs, role=""): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) return output, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation} return CategoricalModel(observation_space=observation_space, action_space=action_space, device=device, unnormalized_log_prob=unnormalized_log_prob) def shared_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, structure: str = "", roles: Sequence[str] = [], parameters: Sequence[Mapping[str, Any]] = []) -> Model: """Instantiate a shared model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param structure: Shared model structure (default: ``""``). Note: this parameter is ignored for the moment :type structure: str, optional :param roles: Organized list of model roles (default: ``[]``) :type roles: sequence of strings, optional :param parameters: Organized list of model instantiator parameters (default: ``[]``) :type parameters: sequence of dict, optional :return: Shared model instance :rtype: Model """ class GaussianDeterministicModel(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, roles, metadata): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions=metadata[0]["clip_actions"], clip_log_std=metadata[0]["clip_log_std"], min_log_std=metadata[0]["min_log_std"], max_log_std=metadata[0]["max_log_std"], role=roles[0]) DeterministicMixin.__init__(self, clip_actions=metadata[1]["clip_actions"], role=roles[1]) self._roles = roles self.instantiator_input_type = metadata[0]["input_shape"].value self.instantiator_output_scales = [m["output_scale"] for m in metadata] # shared layers/network self.net = _generate_sequential(model=self, input_shape=metadata[0]["input_shape"], hiddens=metadata[0]["hiddens"][:-1], hidden_activation=metadata[0]["hidden_activation"][:-1], output_shape=metadata[0]["hiddens"][-1], output_activation=metadata[0]["hidden_activation"][-1]) # separated layers ("policy") mean_layers = [nn.Linear(metadata[0]["hiddens"][-1], _get_num_units_by_shape(self, metadata[0]["output_shape"]))] if metadata[0]["output_activation"] is not None: mean_layers.append(_get_activation_function(metadata[0]["output_activation"])) self.mean_net = nn.Sequential(*mean_layers) self.log_std_parameter = nn.Parameter(metadata[0]["initial_log_std"] \ * torch.ones(_get_num_units_by_shape(self, metadata[0]["output_shape"]))) # separated layer ("value") value_layers = [nn.Linear(metadata[1]["hiddens"][-1], _get_num_units_by_shape(self, metadata[1]["output_shape"]))] if metadata[1]["output_activation"] is not None: value_layers.append(_get_activation_function(metadata[1]["output_activation"])) self.value_net = nn.Sequential(*value_layers) def act(self, inputs, role): if role == self._roles[0]: return GaussianMixin.act(self, inputs, role) elif role == self._roles[1]: return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(torch.cat((inputs["states"], inputs["taken_actions"]), dim=1)) if role == self._roles[0]: return self.instantiator_output_scales[0] * self.mean_net(output), self.log_std_parameter, {} elif role == self._roles[1]: return self.instantiator_output_scales[1] * self.value_net(output), {} # TODO: define the model using the specified structure return GaussianDeterministicModel(observation_space=observation_space, action_space=action_space, device=device, roles=roles, metadata=parameters)
30,707
Python
52.3125
126
0.586446
Toni-SM/skrl/skrl/utils/model_instantiators/jax/__init__.py
from typing import Any, Mapping, Optional, Sequence, Tuple, Union import sys from enum import Enum import gym import gymnasium import flax.linen as nn import jax import jax.numpy as jnp from skrl.models.jax import Model # noqa from skrl.models.jax import CategoricalMixin, DeterministicMixin, GaussianMixin # noqa __all__ = ["categorical_model", "deterministic_model", "gaussian_model", "Shape"] class Shape(Enum): """ Enum to select the shape of the model's inputs and outputs """ ONE = 1 STATES = 0 OBSERVATIONS = 0 ACTIONS = -1 STATES_ACTIONS = -2 def _get_activation_function(activation: str) -> nn.Module: """Get the activation function Supported activation functions: - "elu" - "leaky_relu" - "relu" - "selu" - "sigmoid" - "softmax" - "softplus" - "softsign" - "tanh" :param activation: activation function name. If activation is an empty string, a placeholder will be returned (``lambda x: x``) :type activation: str :raises: ValueError if activation is not a valid activation function :return: activation function :rtype: nn.Module """ if not activation: return lambda x: x elif activation == "relu": return nn.relu elif activation == "tanh": return nn.tanh elif activation == "sigmoid": return nn.sigmoid elif activation == "leaky_relu": return nn.leaky_relu elif activation == "elu": return nn.elu elif activation == "softplus": return nn.softplus elif activation == "softsign": return nn.soft_sign elif activation == "selu": return nn.selu elif activation == "softmax": return nn.softmax else: raise ValueError(f"Unknown activation function: {activation}") def _get_num_units_by_shape(model: Model, shape: Shape) -> int: """Get the number of units in a layer by shape :param model: Model to get the number of units for :type model: Model :param shape: Shape of the layer :type shape: Shape or int :return: Number of units in the layer :rtype: int """ num_units = {Shape.ONE: 1, Shape.STATES: model.num_observations, Shape.ACTIONS: model.num_actions, Shape.STATES_ACTIONS: model.num_observations + model.num_actions} try: return num_units[shape] except: return shape def _generate_sequential(model: Model, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Union[str, None] = "tanh", output_scale: Optional[int] = None) -> nn.Sequential: """Generate a sequential model :param model: model to generate sequential model for :type model: Model :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: None). If None, the output layer will not be scaled :type output_scale: int, optional :return: sequential model :rtype: nn.Sequential """ # input layer input_layer = [nn.Dense(hiddens[0])] # hidden layers hidden_layers = [] for i in range(len(hiddens) - 1): hidden_layers.append(_get_activation_function(hidden_activation[i])) hidden_layers.append(nn.Dense(hiddens[i + 1])) hidden_layers.append(_get_activation_function(hidden_activation[-1])) # output layer output_layer = [nn.Dense(_get_num_units_by_shape(model, output_shape))] if output_activation is not None: output_layer.append(_get_activation_function(output_activation)) return nn.Sequential(input_layer + hidden_layers + output_layer) def gaussian_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, clip_actions: bool = False, clip_log_std: bool = True, min_log_std: float = -20, max_log_std: float = 2, initial_log_std: float = 0, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a Gaussian model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param clip_actions: Flag to indicate whether the actions should be clipped (default: False) :type clip_actions: bool, optional :param clip_log_std: Flag to indicate whether the log standard deviations should be clipped (default: True) :type clip_log_std: bool, optional :param min_log_std: Minimum value of the log standard deviation (default: -20) :type min_log_std: float, optional :param max_log_std: Maximum value of the log standard deviation (default: 2) :type max_log_std: float, optional :param initial_log_std: Initial value for the log standard deviation (default: 0) :type initial_log_std: float, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Gaussian model instance :rtype: Model """ class GaussianModel(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) # override the hash method for Python versions prior to 3.8 to avoid the following error: # TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes. if sys.version_info < (3, 8): def __hash__(self): return id(self) def setup(self): self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) self.log_std_parameter = self.param("log_std_parameter", lambda _: metadata["initial_log_std"] \ * jnp.ones(_get_num_units_by_shape(self, metadata["output_shape"]))) def __call__(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)) return output * self.instantiator_output_scale, self.log_std_parameter, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale, "initial_log_std": initial_log_std} return GaussianModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions, clip_log_std=clip_log_std, min_log_std=min_log_std, max_log_std=max_log_std) def deterministic_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, clip_actions: bool = False, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = "tanh", output_scale: float = 1.0) -> Model: """Instantiate a deterministic model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param clip_actions: Flag to indicate whether the actions should be clipped to the action space (default: False) :type clip_actions: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: "tanh") :type output_activation: str or None, optional :param output_scale: Scale of the output layer (default: 1.0). If None, the output layer will not be scaled :type output_scale: float, optional :return: Deterministic model instance :rtype: Model """ class DeterministicModel(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) # override the hash method for Python versions prior to 3.8 to avoid the following error: # TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes. if sys.version_info < (3, 8): def __hash__(self): return id(self) def setup(self): self.instantiator_output_scale = metadata["output_scale"] self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"], output_scale=metadata["output_scale"]) def __call__(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)) return output * self.instantiator_output_scale, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation, "output_scale": output_scale} return DeterministicModel(observation_space=observation_space, action_space=action_space, device=device, clip_actions=clip_actions) def categorical_model(observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jax.Device]] = None, unnormalized_log_prob: bool = True, input_shape: Shape = Shape.STATES, hiddens: list = [256, 256], hidden_activation: list = ["relu", "relu"], output_shape: Shape = Shape.ACTIONS, output_activation: Optional[str] = None) -> Model: """Instantiate a categorical model :param observation_space: Observation/state space or shape (default: None). If it is not None, the num_observations property will contain the size of that space :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None). If it is not None, the num_actions property will contain the size of that space :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or jax.Device, optional :param unnormalized_log_prob: Flag to indicate how to be interpreted the model's output (default: True). If True, the model's output is interpreted as unnormalized log probabilities (it can be any real number), otherwise as normalized probabilities (the output must be non-negative, finite and have a non-zero sum) :type unnormalized_log_prob: bool, optional :param input_shape: Shape of the input (default: Shape.STATES) :type input_shape: Shape, optional :param hiddens: Number of hidden units in each hidden layer :type hiddens: int or list of ints :param hidden_activation: Activation function for each hidden layer (default: "relu"). :type hidden_activation: list of strings :param output_shape: Shape of the output (default: Shape.ACTIONS) :type output_shape: Shape, optional :param output_activation: Activation function for the output layer (default: None) :type output_activation: str or None, optional :return: Categorical model instance :rtype: Model """ class CategoricalModel(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device, unnormalized_log_prob=True, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) CategoricalMixin.__init__(self, unnormalized_log_prob) # override the hash method for Python versions prior to 3.8 to avoid the following error: # TypeError: Failed to hash Flax Module. The module probably contains unhashable attributes. if sys.version_info < (3, 8): def __hash__(self): return id(self) def setup(self): self.instantiator_input_type = metadata["input_shape"].value self.net = _generate_sequential(model=self, input_shape=metadata["input_shape"], hiddens=metadata["hiddens"], hidden_activation=metadata["hidden_activation"], output_shape=metadata["output_shape"], output_activation=metadata["output_activation"]) def __call__(self, inputs, role): if self.instantiator_input_type == 0: output = self.net(inputs["states"]) elif self.instantiator_input_type == -1: output = self.net(inputs["taken_actions"]) elif self.instantiator_input_type == -2: output = self.net(jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1)) return output, {} metadata = {"input_shape": input_shape, "hiddens": hiddens, "hidden_activation": hidden_activation, "output_shape": output_shape, "output_activation": output_activation} return CategoricalModel(observation_space=observation_space, action_space=action_space, device=device, unnormalized_log_prob=unnormalized_log_prob)
20,151
Python
48.271394
116
0.599325
Toni-SM/skrl/skrl/memories/torch/base.py
from typing import List, Optional, Tuple, Union import csv import datetime import functools import operator import os import gym import gymnasium import numpy as np import torch from torch.utils.data.sampler import BatchSampler class Memory: def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[Union[str, torch.device]] = None, export: bool = False, export_format: str = "pt", export_directory: str = "") -> None: """Base class representing a memory with circular buffers Buffers are torch tensors with shape (memory size, number of environments, data size). Circular buffers are implemented with two integers: a memory index and an environment index :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :raises ValueError: The export format is not supported """ self.memory_size = memory_size self.num_envs = num_envs self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") if device is None else torch.device(device) # internal variables self.filled = False self.env_index = 0 self.memory_index = 0 self.tensors = {} self.tensors_view = {} self.tensors_keep_dimensions = {} self.sampling_indexes = None self.all_sequence_indexes = np.concatenate([np.arange(i, memory_size * num_envs + i, num_envs) for i in range(num_envs)]) # exporting data self.export = export self.export_format = export_format self.export_directory = export_directory if not self.export_format in ["pt", "np", "csv"]: raise ValueError(f"Export format not supported ({self.export_format})") def __len__(self) -> int: """Compute and return the current (valid) size of the memory The valid size is calculated as the ``memory_size * num_envs`` if the memory is full (filled). Otherwise, the ``memory_index * num_envs + env_index`` is returned :return: Valid size :rtype: int """ return self.memory_size * self.num_envs if self.filled else self.memory_index * self.num_envs + self.env_index def _get_space_size(self, space: Union[int, Tuple[int], gym.Space, gymnasium.Space], keep_dimensions: bool = False) -> Union[Tuple, int]: """Get the size (number of elements) of a space :param space: Space or shape from which to obtain the number of elements :type space: int, tuple or list of integers, gym.Space, or gymnasium.Space :param keep_dimensions: Whether or not to keep the space dimensions (default: ``False``) :type keep_dimensions: bool, optional :raises ValueError: If the space is not supported :return: Size of the space. If ``keep_dimensions`` is True, the space size will be a tuple :rtype: int or tuple of int """ if type(space) in [int, float]: return (int(space),) if keep_dimensions else int(space) elif type(space) in [tuple, list]: return tuple(space) if keep_dimensions else np.prod(space) elif issubclass(type(space), gym.Space): if issubclass(type(space), gym.spaces.Discrete): return (1,) if keep_dimensions else 1 elif issubclass(type(space), gym.spaces.MultiDiscrete): return space.nvec.shape[0] elif issubclass(type(space), gym.spaces.Box): return tuple(space.shape) if keep_dimensions else np.prod(space.shape) elif issubclass(type(space), gym.spaces.Dict): if keep_dimensions: raise ValueError("keep_dimensions=True cannot be used with Dict spaces") return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) elif issubclass(type(space), gymnasium.Space): if issubclass(type(space), gymnasium.spaces.Discrete): return (1,) if keep_dimensions else 1 elif issubclass(type(space), gymnasium.spaces.MultiDiscrete): return space.nvec.shape[0] elif issubclass(type(space), gymnasium.spaces.Box): return tuple(space.shape) if keep_dimensions else np.prod(space.shape) elif issubclass(type(space), gymnasium.spaces.Dict): if keep_dimensions: raise ValueError("keep_dimensions=True cannot be used with Dict spaces") return sum([self._get_space_size(space.spaces[key]) for key in space.spaces]) raise ValueError(f"Space type {type(space)} not supported") def share_memory(self) -> None: """Share the tensors between processes """ for tensor in self.tensors.values(): if not tensor.is_cuda: tensor.share_memory_() def get_tensor_names(self) -> Tuple[str]: """Get the name of the internal tensors in alphabetical order :return: Tensor names without internal prefix (_tensor_) :rtype: tuple of strings """ return sorted(self.tensors.keys()) def get_tensor_by_name(self, name: str, keepdim: bool = True) -> torch.Tensor: """Get a tensor by its name :param name: Name of the tensor to retrieve :type name: str :param keepdim: Keep the tensor's shape (memory size, number of environments, size) (default: ``True``) If False, the returned tensor will have a shape of (memory size * number of environments, size) :type keepdim: bool, optional :raises KeyError: The tensor does not exist :return: Tensor :rtype: torch.Tensor """ return self.tensors[name] if keepdim else self.tensors_view[name] def set_tensor_by_name(self, name: str, tensor: torch.Tensor) -> None: """Set a tensor by its name :param name: Name of the tensor to set :type name: str :param tensor: Tensor to set :type tensor: torch.Tensor :raises KeyError: The tensor does not exist """ with torch.no_grad(): self.tensors[name].copy_(tensor) def create_tensor(self, name: str, size: Union[int, Tuple[int], gym.Space, gymnasium.Space], dtype: Optional[torch.dtype] = None, keep_dimensions: bool = False) -> bool: """Create a new internal tensor in memory The tensor will have a 3-components shape (memory size, number of environments, size). The internal representation will use _tensor_<name> as the name of the class property :param name: Tensor name (the name has to follow the python PEP 8 style) :type name: str :param size: Number of elements in the last dimension (effective data size). The product of the elements will be computed for sequences or gym/gymnasium spaces :type size: int, tuple or list of integers, gym.Space, or gymnasium.Space :param dtype: Data type (torch.dtype) (default: ``None``). If None, the global default torch data type will be used :type dtype: torch.dtype or None, optional :param keep_dimensions: Whether or not to keep the dimensions defined through the size parameter (default: ``False``) :type keep_dimensions: bool, optional :raises ValueError: The tensor name exists already but the size or dtype are different :return: True if the tensor was created, otherwise False :rtype: bool """ # compute data size size = self._get_space_size(size, keep_dimensions) # check dtype and size if the tensor exists if name in self.tensors: tensor = self.tensors[name] if tensor.size(-1) != size: raise ValueError(f"Size of tensor {name} ({size}) doesn't match the existing one ({tensor.size(-1)})") if dtype is not None and tensor.dtype != dtype: raise ValueError(f"Dtype of tensor {name} ({dtype}) doesn't match the existing one ({tensor.dtype})") return False # define tensor shape tensor_shape = (self.memory_size, self.num_envs, *size) if keep_dimensions else (self.memory_size, self.num_envs, size) view_shape = (-1, *size) if keep_dimensions else (-1, size) # create tensor (_tensor_<name>) and add it to the internal storage setattr(self, f"_tensor_{name}", torch.zeros(tensor_shape, device=self.device, dtype=dtype)) # update internal variables self.tensors[name] = getattr(self, f"_tensor_{name}") self.tensors_view[name] = self.tensors[name].view(*view_shape) self.tensors_keep_dimensions[name] = keep_dimensions # fill the tensors (float tensors) with NaN for tensor in self.tensors.values(): if torch.is_floating_point(tensor): tensor.fill_(float("nan")) return True def reset(self) -> None: """Reset the memory by cleaning internal indexes and flags Old data will be retained until overwritten, but access through the available methods will not be guaranteed Default values of the internal indexes and flags - filled: False - env_index: 0 - memory_index: 0 """ self.filled = False self.env_index = 0 self.memory_index = 0 def add_samples(self, **tensors: torch.Tensor) -> None: """Record samples in memory Samples should be a tensor with 2-components shape (number of environments, data size). All tensors must be of the same shape According to the number of environments, the following classification is made: - one environment: Store a single sample (tensors with one dimension) and increment the environment index (second index) by one - number of environments less than num_envs: Store the samples and increment the environment index (second index) by the number of the environments - number of environments equals num_envs: Store the samples and increment the memory index (first index) by one :param tensors: Sampled data as key-value arguments where the keys are the names of the tensors to be modified. Non-existing tensors will be skipped :type tensors: dict :raises ValueError: No tensors were provided or the tensors have incompatible shapes """ if not tensors: raise ValueError("No samples to be recorded in memory. Pass samples as key-value arguments (where key is the tensor name)") # dimensions and shapes of the tensors (assume all tensors have the dimensions of the first tensor) tmp = tensors.get("states", tensors[next(iter(tensors))]) # ask for states first dim, shape = tmp.ndim, tmp.shape # multi environment (number of environments equals num_envs) if dim == 2 and shape[0] == self.num_envs: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index].copy_(tensor) self.memory_index += 1 # multi environment (number of environments less than num_envs) elif dim == 2 and shape[0] < self.num_envs: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index, self.env_index:self.env_index + tensor.shape[0]].copy_(tensor) self.env_index += tensor.shape[0] # single environment - multi sample (number of environments greater than num_envs (num_envs = 1)) elif dim == 2 and self.num_envs == 1: for name, tensor in tensors.items(): if name in self.tensors: num_samples = min(shape[0], self.memory_size - self.memory_index) remaining_samples = shape[0] - num_samples # copy the first n samples self.tensors[name][self.memory_index:self.memory_index + num_samples].copy_(tensor[:num_samples].unsqueeze(dim=1)) self.memory_index += num_samples # storage remaining samples if remaining_samples > 0: self.tensors[name][:remaining_samples].copy_(tensor[num_samples:].unsqueeze(dim=1)) self.memory_index = remaining_samples # single environment elif dim == 1: for name, tensor in tensors.items(): if name in self.tensors: self.tensors[name][self.memory_index, self.env_index].copy_(tensor) self.env_index += 1 else: raise ValueError(f"Expected shape (number of environments = {self.num_envs}, data size), got {shape}") # update indexes and flags if self.env_index >= self.num_envs: self.env_index = 0 self.memory_index += 1 if self.memory_index >= self.memory_size: self.memory_index = 0 self.filled = True # export tensors to file if self.export: self.save(directory=self.export_directory, format=self.export_format) def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]: """Data sampling method to be implemented by the inheriting classes :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :raises NotImplementedError: The method has not been implemented :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of torch.Tensor list """ raise NotImplementedError("The sampling method (.sample()) is not implemented") def sample_by_index(self, names: Tuple[str], indexes: Union[tuple, np.ndarray, torch.Tensor], mini_batches: int = 1) -> List[List[torch.Tensor]]: """Sample data from memory according to their indexes :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param indexes: Indexes used for sampling :type indexes: tuple or list, numpy.ndarray or torch.Tensor :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (number of indexes, data size) :rtype: list of torch.Tensor list """ if mini_batches > 1: batches = BatchSampler(indexes, batch_size=len(indexes) // mini_batches, drop_last=True) return [[self.tensors_view[name][batch] for name in names] for batch in batches] return [[self.tensors_view[name][indexes] for name in names]] def sample_all(self, names: Tuple[str], mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]: """Sample all data from memory :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :return: Sampled data from memory. The sampled tensors will have the following shape: (memory size * number of environments, data size) :rtype: list of torch.Tensor list """ # sequential order if sequence_length > 1: if mini_batches > 1: batches = BatchSampler(self.all_sequence_indexes, batch_size=len(self.all_sequence_indexes) // mini_batches, drop_last=True) return [[self.tensors_view[name][batch] for name in names] for batch in batches] return [[self.tensors_view[name][self.all_sequence_indexes] for name in names]] # default order if mini_batches > 1: indexes = np.arange(self.memory_size * self.num_envs) batches = BatchSampler(indexes, batch_size=len(indexes) // mini_batches, drop_last=True) return [[self.tensors_view[name][batch] for name in names] for batch in batches] return [[self.tensors_view[name] for name in names]] def get_sampling_indexes(self) -> Union[tuple, np.ndarray, torch.Tensor]: """Get the last indexes used for sampling :return: Last sampling indexes :rtype: tuple or list, numpy.ndarray or torch.Tensor """ return self.sampling_indexes def save(self, directory: str = "", format: str = "pt") -> None: """Save the memory to a file Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) :param directory: Path to the folder where the memory will be saved. If not provided, the directory defined in the constructor will be used :type directory: str :param format: Format of the file where the memory will be saved (default: ``"pt"``) :type format: str, optional :raises ValueError: If the format is not supported """ if not directory: directory = self.export_directory os.makedirs(os.path.join(directory, "memories"), exist_ok=True) memory_path = os.path.join(directory, "memories", \ "{}_memory_{}.{}".format(datetime.datetime.now().strftime("%y-%m-%d_%H-%M-%S-%f"), hex(id(self)), format)) # torch if format == "pt": torch.save({name: self.tensors[name] for name in self.get_tensor_names()}, memory_path) # numpy elif format == "npz": np.savez(memory_path, **{name: self.tensors[name].cpu().numpy() for name in self.get_tensor_names()}) # comma-separated values elif format == "csv": # open csv writer # TODO: support keeping the dimensions with open(memory_path, "a") as file: writer = csv.writer(file) names = self.get_tensor_names() # write headers headers = [[f"{name}.{i}" for i in range(self.tensors_view[name].shape[-1])] for name in names] writer.writerow([item for sublist in headers for item in sublist]) # write rows for i in range(len(self)): writer.writerow(functools.reduce(operator.iconcat, [self.tensors_view[name][i].tolist() for name in names], [])) # unsupported format else: raise ValueError(f"Unsupported format: {format}. Available formats: pt, csv, npz") def load(self, path: str) -> None: """Load the memory from a file Supported formats: - PyTorch (pt) - NumPy (npz) - Comma-separated values (csv) :param path: Path to the file where the memory will be loaded :type path: str :raises ValueError: If the format is not supported """ # torch if path.endswith(".pt"): data = torch.load(path) for name in self.get_tensor_names(): setattr(self, f"_tensor_{name}", data[name]) # numpy elif path.endswith(".npz"): data = np.load(path) for name in data: setattr(self, f"_tensor_{name}", torch.tensor(data[name])) # comma-separated values elif path.endswith(".csv"): # TODO: load the memory from a csv pass # unsupported format else: raise ValueError(f"Unsupported format: {path}")
21,679
Python
45.226013
149
0.6087
Toni-SM/skrl/skrl/memories/torch/__init__.py
from skrl.memories.torch.base import Memory # isort:skip from skrl.memories.torch.random import RandomMemory
111
Python
26.999993
57
0.81982
Toni-SM/skrl/skrl/memories/torch/random.py
from typing import List, Optional, Tuple, Union import torch from skrl.memories.torch import Memory class RandomMemory(Memory): def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[Union[str, torch.device]] = None, export: bool = False, export_format: str = "pt", export_directory: str = "", replacement=True) -> None: """Random sampling memory Sample a batch from memory randomly :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which a tensor/array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :param replacement: Flag to indicate whether the sample is with or without replacement (default: ``True``). Replacement implies that a value can be selected multiple times (the batch size is always guaranteed). Sampling without replacement will return a batch of maximum memory size if the memory size is less than the requested batch size :type replacement: bool, optional :raises ValueError: The export format is not supported """ super().__init__(memory_size, num_envs, device, export, export_format, export_directory) self._replacement = replacement def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1, sequence_length: int = 1) -> List[List[torch.Tensor]]: """Sample a batch from memory randomly :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :param sequence_length: Length of each sequence (default: ``1``) :type sequence_length: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of torch.Tensor list """ # compute valid memory sizes size = len(self) if sequence_length > 1: sequence_indexes = torch.arange(0, self.num_envs * sequence_length, self.num_envs) size -= sequence_indexes[-1].item() # generate random indexes if self._replacement: indexes = torch.randint(0, size, (batch_size,)) else: # details about the random sampling performance can be found here: # https://discuss.pytorch.org/t/torch-equivalent-of-numpy-random-choice/16146/19 indexes = torch.randperm(size, dtype=torch.long)[:batch_size] # generate sequence indexes if sequence_length > 1: indexes = (sequence_indexes.repeat(indexes.shape[0], 1) + indexes.view(-1, 1)).view(-1) self.sampling_indexes = indexes return self.sample_by_index(names=names, indexes=indexes, mini_batches=mini_batches)
4,190
Python
46.624999
156
0.609785
Toni-SM/skrl/skrl/memories/jax/__init__.py
from skrl.memories.jax.base import Memory # isort:skip from skrl.memories.jax.random import RandomMemory
107
Python
25.999994
55
0.813084
Toni-SM/skrl/skrl/memories/jax/random.py
from typing import List, Optional, Tuple import jax import numpy as np from skrl.memories.jax import Memory class RandomMemory(Memory): def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[jax.Device] = None, export: bool = False, export_format: str = "pt", export_directory: str = "", replacement=True) -> None: """Random sampling memory Sample a batch from memory randomly :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: ``1``) :type num_envs: int, optional :param device: Device on which an array is or will be allocated (default: ``None``) :type device: jax.Device, optional :param export: Export the memory to a file (default: ``False``). If True, the memory will be exported when the memory is filled :type export: bool, optional :param export_format: Export format (default: ``"pt"``). Supported formats: torch (pt), numpy (np), comma separated values (csv) :type export_format: str, optional :param export_directory: Directory where the memory will be exported (default: ``""``). If empty, the agent's experiment directory will be used :type export_directory: str, optional :param replacement: Flag to indicate whether the sample is with or without replacement (default: ``True``). Replacement implies that a value can be selected multiple times (the batch size is always guaranteed). Sampling without replacement will return a batch of maximum memory size if the memory size is less than the requested batch size :type replacement: bool, optional :raises ValueError: The export format is not supported """ super().__init__(memory_size, num_envs, device, export, export_format, export_directory) self._replacement = replacement def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[jax.Array]]: """Sample a batch from memory randomly :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: ``1``) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of jax.Array list """ # generate random indexes if self._replacement: indexes = np.random.randint(0, len(self), (batch_size,)) else: indexes = np.random.permutation(len(self))[:batch_size] return self.sample_by_index(names=names, indexes=indexes, mini_batches=mini_batches)
3,247
Python
46.072463
156
0.619341
Toni-SM/skrl/tests/test_agents.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.agents.torch import Agent from skrl.agents.torch.a2c import A2C from skrl.agents.torch.amp import AMP from skrl.agents.torch.cem import CEM from skrl.agents.torch.ddpg import DDPG from skrl.agents.torch.dqn import DDQN, DQN from skrl.agents.torch.ppo import PPO from skrl.agents.torch.q_learning import Q_LEARNING from skrl.agents.torch.sac import SAC from skrl.agents.torch.sarsa import SARSA from skrl.agents.torch.td3 import TD3 from skrl.agents.torch.trpo import TRPO from .utils import DummyModel @pytest.fixture def classes_and_kwargs(): return [(A2C, {"models": {"policy": DummyModel()}}), (AMP, {"models": {"policy": DummyModel()}}), (CEM, {"models": {"policy": DummyModel()}}), (DDPG, {"models": {"policy": DummyModel()}}), (DQN, {"models": {"policy": DummyModel()}}), (DDQN, {"models": {"policy": DummyModel()}}), (PPO, {"models": {"policy": DummyModel()}}), (Q_LEARNING, {"models": {"policy": DummyModel()}}), (SAC, {"models": {"policy": DummyModel()}}), (SARSA, {"models": {"policy": DummyModel()}}), (TD3, {"models": {"policy": DummyModel()}}), (TRPO, {"models": {"policy": DummyModel()}})] def test_agent(capsys, classes_and_kwargs): for klass, kwargs in classes_and_kwargs: cfg = {"learning_starts": 1, "experiment": {"write_interval": 0}} agent: Agent = klass(cfg=cfg, **kwargs) agent.init() agent.pre_interaction(timestep=0, timesteps=1) # agent.act(None, timestep=0, timestesps=1) agent.record_transition(states=torch.tensor([]), actions=torch.tensor([]), rewards=torch.tensor([]), next_states=torch.tensor([]), terminated=torch.tensor([]), truncated=torch.tensor([]), infos={}, timestep=0, timesteps=1) agent.post_interaction(timestep=0, timesteps=1)
2,251
Python
37.169491
63
0.561972
Toni-SM/skrl/tests/test_examples_gymnasium.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest EXAMPLE_DIR = "gymnasium" SCRIPTS = ["ddpg_gymnasium_pendulum.py", "cem_gymnasium_cartpole.py", "dqn_gymnasium_cartpole.py", "q_learning_gymnasium_frozen_lake.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"python {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)}" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import gymnasium except ImportError as e: warnings.warn(f"\n\nUnable to import gymnasium ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
837
Python
30.037036
124
0.690562
Toni-SM/skrl/tests/test_examples_omniisaacgym.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest # See the following link for Omniverse Isaac Sim Python environment # https://docs.omniverse.nvidia.com/app_isaacsim/app_isaacsim/install_python.html PYTHON_ENVIRONMENT = "./python.sh" EXAMPLE_DIR = "omniisaacgym" SCRIPTS = ["ppo_cartpole.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)} headless=True num_envs=64" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: import omniisaacgymenvs except ImportError as e: warnings.warn(f"\n\nUnable to import omniisaacgymenvs ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
939
Python
32.571427
134
0.734824
Toni-SM/skrl/tests/test_envs.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.envs.torch import Wrapper, wrap_env from .utils import DummyEnv @pytest.fixture def classes_and_kwargs(): return [] @pytest.mark.parametrize("wrapper", ["gym", "gymnasium", "dm", "robosuite", \ "isaacgym-preview2", "isaacgym-preview3", "isaacgym-preview4", "omniverse-isaacgym"]) def test_wrap_env(capsys, classes_and_kwargs, wrapper): env = DummyEnv(num_envs=1) try: env: Wrapper = wrap_env(env=env, wrapper=wrapper) except ValueError as e: warnings.warn(f"{e}. This test will be skipped for '{wrapper}'") except ModuleNotFoundError as e: warnings.warn(f"{e}. The '{wrapper}' wrapper module is not found. This test will be skipped") env.observation_space env.action_space env.state_space env.num_envs env.device
903
Python
24.828571
101
0.69103
Toni-SM/skrl/tests/test_resources_schedulers.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.resources.schedulers.torch import KLAdaptiveRL @pytest.fixture def classes_and_kwargs(): return [(KLAdaptiveRL, {})] @pytest.mark.parametrize("optimizer", [torch.optim.Adam([torch.ones((1,))], lr=0.1), torch.optim.SGD([torch.ones((1,))], lr=0.1)]) def test_step(capsys, classes_and_kwargs, optimizer): for klass, kwargs in classes_and_kwargs: scheduler = klass(optimizer, **kwargs) scheduler.step(0.0)
578
Python
24.173912
84
0.66782
Toni-SM/skrl/tests/test_model_instantiators.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.models.torch import Model from skrl.utils.model_instantiators import ( Shape, categorical_model, deterministic_model, gaussian_model, multivariate_gaussian_model ) @pytest.fixture def classes_and_kwargs(): return [(categorical_model, {}), (deterministic_model, {}), (gaussian_model, {}), (multivariate_gaussian_model, {})] def test_models(capsys, classes_and_kwargs): for klass, kwargs in classes_and_kwargs: model: Model = klass(observation_space=1, action_space=1, device="cpu", **kwargs)
675
Python
22.310344
89
0.685926
Toni-SM/skrl/tests/utils.py
import random import gymnasium as gym import torch class DummyEnv(gym.Env): def __init__(self, num_envs, device = "cpu"): self.num_agents = 1 self.num_envs = num_envs self.device = torch.device(device) self.action_space = gym.spaces.Discrete(2) self.observation_space = gym.spaces.Box(low=-1, high=1, shape=(2,)) def __getattr__(self, key): if key in ["_spec_to_space", "observation_spec"]: return lambda *args, **kwargs: None return None def step(self, action): observation = self.observation_space.sample() reward = random.random() terminated = random.random() > 0.95 truncated = random.random() > 0.95 observation = torch.tensor(observation, dtype=torch.float32).view(self.num_envs, -1) reward = torch.tensor(reward, device=self.device, dtype=torch.float32).view(self.num_envs, -1) terminated = torch.tensor(terminated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) truncated = torch.tensor(truncated, device=self.device, dtype=torch.bool).view(self.num_envs, -1) return observation, reward, terminated, truncated, {} def reset(self): observation = self.observation_space.sample() observation = torch.tensor(observation, dtype=torch.float32).view(self.num_envs, -1) return observation, {} def render(self, *args, **kwargs): pass def close(self, *args, **kwargs): pass class _DummyBaseAgent: def __init__(self): pass def record_transition(self, states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps): pass def pre_interaction(self, timestep, timesteps): pass def post_interaction(self, timestep, timesteps): pass def set_running_mode(self, mode): pass class DummyAgent(_DummyBaseAgent): def __init__(self): super().__init__() def init(self, trainer_cfg=None): pass def act(self, states, timestep, timesteps): return torch.tensor([]), None, {} def record_transition(self, states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps): pass def pre_interaction(self, timestep, timesteps): pass def post_interaction(self, timestep, timesteps): pass class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.device = torch.device("cpu") self.layer = torch.nn.Linear(1, 1) def set_mode(self, *args, **kwargs): pass def get_specification(self, *args, **kwargs): return {} def act(self, *args, **kwargs): return torch.tensor([]), None, {}
2,763
Python
27.494845
122
0.621426
Toni-SM/skrl/tests/test_memories.py
import string import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.memories.torch import Memory, RandomMemory @pytest.fixture def classes_and_kwargs(): return [(RandomMemory, {})] @pytest.mark.parametrize("device", [None, "cpu", "cuda:0"]) def test_device(capsys, classes_and_kwargs, device): _device = torch.device(device) if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu") for klass, kwargs in classes_and_kwargs: try: memory: Memory = klass(memory_size=1, device=device, **kwargs) except (RuntimeError, AssertionError) as e: with capsys.disabled(): print(e) warnings.warn(f"Invalid device: {device}. This test will be skipped") continue assert memory.device == _device # defined device @hypothesis.given(names=st.sets(st.text(alphabet=string.ascii_letters + string.digits + "_", min_size=1, max_size=10), min_size=1, max_size=10)) @hypothesis.settings(suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], deadline=None) def test_create_tensors(capsys, classes_and_kwargs, names): for klass, kwargs in classes_and_kwargs: memory: Memory = klass(memory_size=1, **kwargs) for name in names: memory.create_tensor(name=name, size=1, dtype=torch.float32) assert memory.get_tensor_names() == sorted(names) @hypothesis.given(memory_size=st.integers(min_value=1, max_value=100), num_envs=st.integers(min_value=1, max_value=10), num_samples=st.integers(min_value=1, max_value=500)) @hypothesis.settings(suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], deadline=None) def test_add_samples(capsys, classes_and_kwargs, memory_size, num_envs, num_samples): for klass, kwargs in classes_and_kwargs: memory: Memory = klass(memory_size=memory_size, num_envs=num_envs, **kwargs) memory.create_tensor(name="tensor_1", size=1, dtype=torch.float32) memory.create_tensor(name="tensor_2", size=2, dtype=torch.float32) # memory_index for _ in range(num_samples): memory.add_samples(tensor_1=torch.zeros((num_envs, 1))) assert memory.memory_index == num_samples % memory_size assert memory.filled == (num_samples >= memory_size) memory.reset() # memory_index, env_index for _ in range(num_samples): memory.add_samples(tensor_2=torch.zeros((2,))) assert memory.memory_index == (num_samples // num_envs) % memory_size assert memory.env_index == num_samples % num_envs assert memory.filled == (num_samples >= memory_size * num_envs)
2,769
Python
38.571428
144
0.668834
Toni-SM/skrl/tests/test_examples_isaac_orbit.py
import os import subprocess import warnings import hypothesis import hypothesis.strategies as st import pytest # See the following link for Isaac Orbit environment # https://isaac-orbit.github.io/orbit/source/setup/installation.html PYTHON_ENVIRONMENT = "orbit -p" EXAMPLE_DIR = "isaacorbit" SCRIPTS = ["ppo_cartpole.py"] EXAMPLES_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "docs", "source", "examples")) COMMANDS = [f"{PYTHON_ENVIRONMENT} {os.path.join(EXAMPLES_DIR, EXAMPLE_DIR, script)} --headless --num_envs 64" for script in SCRIPTS] @pytest.mark.parametrize("command", COMMANDS) def test_scripts(capsys, command): try: from omni.isaac.kit import SimulationApp except ImportError as e: warnings.warn(f"\n\nUnable to import omni.isaac.kit ({e}).\nThis test will be skipped\n") return subprocess.run(command, shell=True, check=True)
920
Python
31.892856
133
0.725
Toni-SM/skrl/tests/test_trainers.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.trainers.torch import ManualTrainer, ParallelTrainer, SequentialTrainer, Trainer from .utils import DummyAgent, DummyEnv @pytest.fixture def classes_and_kwargs(): return [(ManualTrainer, {"cfg": {"timesteps": 100}}), (ParallelTrainer, {"cfg": {"timesteps": 100}}), (SequentialTrainer, {"cfg": {"timesteps": 100}})] def test_train(capsys, classes_and_kwargs): env = DummyEnv(num_envs=1) agent = DummyAgent() for klass, kwargs in classes_and_kwargs: trainer: Trainer = klass(env, agents=agent, **kwargs) trainer.train() def test_eval(capsys, classes_and_kwargs): env = DummyEnv(num_envs=1) agent = DummyAgent() for klass, kwargs in classes_and_kwargs: trainer: Trainer = klass(env, agents=agent, **kwargs) trainer.eval()
921
Python
23.918918
90
0.676439
Toni-SM/skrl/tests/test_jax_memories_memory.py
import math import unittest import gym import jax import jax.numpy as jnp import numpy as np from skrl.memories.jax import Memory class TestCase(unittest.TestCase): def setUp(self): self.devices = [jax.devices("cpu")[0], jax.devices("gpu")[0]] self.memory_sizes = [10, 100, 1000] self.num_envs = [1, 10, 100] self.names = ["states", "actions", "rewards", "dones"] self.raw_sizes = [gym.spaces.Box(-1, 1, shape=(5,)), gym.spaces.Discrete(5), 1, 1] self.sizes = [5, 1, 1, 1] self.raw_dtypes = [jnp.float32, int, float, bool] self.dtypes = [np.float32, np.int32, np.float32, bool] self.mini_batches = [1, 2, 3, 5, 7] def tearDown(self): pass def test_devices(self): for device in self.devices: # TODO: test pass def test_tensor_names(self): for memory_size, num_envs in zip(self.memory_sizes, self.num_envs): # create memory memory = Memory(memory_size=memory_size, num_envs=num_envs) # create tensors for name, size, dtype in zip(self.names, self.raw_sizes, self.raw_dtypes): memory.create_tensor(name, size, dtype) # test memory.get_tensor_names self.assertCountEqual(self.names, memory.get_tensor_names(), "get_tensor_names") # test memory.get_tensor_by_name for name, size, dtype in zip(self.names, self.sizes, self.dtypes): tensor = memory.get_tensor_by_name(name, keepdim=True) self.assertSequenceEqual(memory.get_tensor_by_name(name, keepdim=True).shape, (memory_size, num_envs, size), "get_tensor_by_name(..., keepdim=True)") self.assertSequenceEqual(memory.get_tensor_by_name(name, keepdim=False).shape, (memory_size * num_envs, size), "get_tensor_by_name(..., keepdim=False)") self.assertEqual(memory.get_tensor_by_name(name, keepdim=True).dtype, dtype, "get_tensor_by_name(...).dtype") # test memory.set_tensor_by_name for name, size, dtype in zip(self.names, self.sizes, self.raw_dtypes): new_tensor = jnp.arange(memory_size * num_envs * size).reshape(memory_size, num_envs, size).astype(dtype) memory.set_tensor_by_name(name, new_tensor) tensor = memory.get_tensor_by_name(name, keepdim=True) self.assertTrue((tensor == new_tensor).all().item(), "set_tensor_by_name(...)") def test_sample(self): for memory_size, num_envs in zip(self.memory_sizes, self.num_envs): # create memory memory = Memory(memory_size=memory_size, num_envs=num_envs) # create tensors for name, size, dtype in zip(self.names, self.raw_sizes, self.raw_dtypes): memory.create_tensor(name, size, dtype) # fill memory for name, size, dtype in zip(self.names, self.sizes, self.raw_dtypes): new_tensor = jnp.arange(memory_size * num_envs * size).reshape(memory_size, num_envs, size).astype(dtype) memory.set_tensor_by_name(name, new_tensor) # test memory.sample_all for i, mini_batches in enumerate(self.mini_batches): samples = memory.sample_all(self.names, mini_batches=mini_batches) for sample, name, size in zip(samples[i], self.names, self.sizes): self.assertSequenceEqual(sample.shape, (memory_size * num_envs, size), f"sample_all(...).shape with mini_batches={mini_batches}") tensor = memory.get_tensor_by_name(name, keepdim=True) self.assertTrue((sample.reshape(memory_size, num_envs, size) == tensor).all().item(), f"sample_all(...) with mini_batches={mini_batches}") if __name__ == '__main__': import sys if not sys.argv[-1] == '--debug': raise RuntimeError('Test can only be runned manually with --debug flag') test = TestCase() test.setUp() for method in dir(test): if method.startswith('test_'): print('Running test: {}'.format(method)) getattr(test, method)() test.tearDown() print('All tests passed.')
4,231
Python
42.183673
168
0.599858
Toni-SM/skrl/tests/test_resources_noises.py
import warnings import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.resources.noises.torch import GaussianNoise, Noise, OrnsteinUhlenbeckNoise @pytest.fixture def classes_and_kwargs(): return [(GaussianNoise, {"mean": 0, "std": 1}), (OrnsteinUhlenbeckNoise, {"theta": 0.1, "sigma": 0.2, "base_scale": 0.3})] @pytest.mark.parametrize("device", [None, "cpu", "cuda:0"]) def test_device(capsys, classes_and_kwargs, device): _device = torch.device(device) if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu") for klass, kwargs in classes_and_kwargs: try: noise: Noise = klass(device=device, **kwargs) except (RuntimeError, AssertionError) as e: with capsys.disabled(): print(e) warnings.warn(f"Invalid device: {device}. This test will be skipped") continue output = noise.sample((1,)) assert noise.device == _device # defined device assert output.device == _device # runtime device @hypothesis.given(size=st.lists(st.integers(min_value=1, max_value=10), max_size=5)) @hypothesis.settings(suppress_health_check=[hypothesis.HealthCheck.function_scoped_fixture], deadline=None) def test_sample(capsys, classes_and_kwargs, size): for klass, kwargs in classes_and_kwargs: noise: Noise = klass(**kwargs) # sample output = noise.sample(size) assert output.size() == torch.Size(size) # sample like tensor = torch.rand(size, device="cpu") output = noise.sample_like(tensor) assert output.size() == torch.Size(size)
1,686
Python
34.145833
124
0.657177
Toni-SM/skrl/tests/test_resources_preprocessors.py
import warnings import gym import gymnasium import hypothesis import hypothesis.strategies as st import pytest import torch from skrl.resources.preprocessors.torch import RunningStandardScaler @pytest.fixture def classes_and_kwargs(): return [(RunningStandardScaler, {"size": 1})] @pytest.mark.parametrize("device", [None, "cpu", "cuda:0"]) def test_device(capsys, classes_and_kwargs, device): _device = torch.device(device) if device is not None else torch.device("cuda:0" if torch.cuda.is_available() else "cpu") for klass, kwargs in classes_and_kwargs: try: preprocessor = klass(device=device, **kwargs) except (RuntimeError, AssertionError) as e: with capsys.disabled(): print(e) warnings.warn(f"Invalid device: {device}. This test will be skipped") continue assert preprocessor.device == _device # defined device assert preprocessor(torch.ones(kwargs["size"], device=_device)).device == _device # runtime device @pytest.mark.parametrize("space_and_size", [(gym.spaces.Box(low=-1, high=1, shape=(2, 3)), 6), (gymnasium.spaces.Box(low=-1, high=1, shape=(2, 3)), 6), (gym.spaces.Discrete(n=3), 1), (gymnasium.spaces.Discrete(n=3), 1)]) def test_forward(capsys, classes_and_kwargs, space_and_size): for klass, kwargs in classes_and_kwargs: space, size = space_and_size preprocessor = klass(size=space, device="cpu") output = preprocessor(torch.rand((10, size), device="cpu")) assert output.shape == torch.Size((10, size))
1,711
Python
37.044444
124
0.62069
Toni-SM/skrl/docs/README.md
# Documentation ## Install Sphinx and Read the Docs Sphinx Theme ```bash cd docs pip install -r requirements.txt ``` ## Building the documentation ```bash cd docs make html ``` Building each time a file is changed: ```bash cd docs sphinx-autobuild ./source/ _build/html ``` ## Useful links - [Sphinx directives](https://www.sphinx-doc.org/en/master/usage/restructuredtext/directives.html) - [Math support in Sphinx](https://www.sphinx-doc.org/en/1.0/ext/math.html)
473
Markdown
15.928571
98
0.725159
Toni-SM/skrl/docs/source/404.rst
:orphan: Page not found ============== .. image:: _static/data/404-light.svg :width: 50% :align: center :class: only-light :alt: 404 .. image:: _static/data/404-dark.svg :width: 50% :align: center :class: only-dark :alt: 404 .. raw:: html <br> <div style="text-align: center; font-size: 1.75rem;"> <p style="margin: 0;"><strong>404: Puzzle piece not found.</strong></p> <p style="margin: 0;">Did you look under the sofa cushions?</p> </div> <br> <br> Since version 1.0.0, the documentation structure has changed to improve content organization and to provide a better browsing experience. Navigate using the left sidebar or type in the search box to find what you are looking for.
755
reStructuredText
24.199999
137
0.631788
Toni-SM/skrl/docs/source/index.rst
SKRL - Reinforcement Learning library (|version|) ================================================= .. raw:: html <a href="https://pypi.org/project/skrl"> <img alt="pypi" src="https://img.shields.io/pypi/v/skrl"> </a> <a href="https://huggingface.co/skrl"> <img alt="huggingface" src="https://img.shields.io/badge/%F0%9F%A4%97%20models-hugging%20face-F8D521"> </a> <a href="https://github.com/Toni-SM/skrl/discussions"> <img alt="discussions" src="https://img.shields.io/github/discussions/Toni-SM/skrl"> </a> <br> <a href="https://github.com/Toni-SM/skrl/blob/main/LICENSE"> <img alt="license" src="https://img.shields.io/github/license/Toni-SM/skrl"> </a> &nbsp;&nbsp;&nbsp;&nbsp; <a href="https://skrl.readthedocs.io"> <img alt="docs" src="https://readthedocs.org/projects/skrl/badge/?version=latest"> </a> <a href="https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml"> <img alt="pytest" src="https://github.com/Toni-SM/skrl/actions/workflows/python-test.yml/badge.svg"> </a> <a href="https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml"> <img alt="pre-commit" src="https://github.com/Toni-SM/skrl/actions/workflows/pre-commit.yml/badge.svg"> </a> <br><br> **skrl** is an open-source library for Reinforcement Learning written in Python (on top of `PyTorch <https://pytorch.org/>`_ and `JAX <https://jax.readthedocs.io>`_) and designed with a focus on modularity, readability, simplicity and transparency of algorithm implementation. In addition to supporting the OpenAI `Gym <https://www.gymlibrary.dev>`_ / Farama `Gymnasium <https://gymnasium.farama.org/>`_, `DeepMind <https://github.com/deepmind/dm_env>`_ and other environment interfaces, it allows loading and configuring `NVIDIA Isaac Gym <https://developer.nvidia.com/isaac-gym>`_, `NVIDIA Isaac Orbit <https://isaac-orbit.github.io/orbit/index.html>`_ and `NVIDIA Omniverse Isaac Gym <https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html>`_ environments, enabling agents' simultaneous training by scopes (subsets of environments among all available environments), which may or may not share resources, in the same run. **Main features:** * PyTorch (|_1| |pytorch| |_1|) and JAX (|_1| |jax| |_1|) * Clean code * Modularity and reusability * Documented library, code and implementations * Support for Gym/Gymnasium (single and vectorized), DeepMind, NVIDIA Isaac Gym (preview 2, 3 and 4), NVIDIA Isaac Orbit, NVIDIA Omniverse Isaac Gym environments, among others * Simultaneous learning by scopes in Gym/Gymnasium (vectorized), NVIDIA Isaac Gym, NVIDIA Isaac Orbit and NVIDIA Omniverse Isaac Gym .. raw:: html <br> .. warning:: **skrl** is under **active continuous development**. Make sure you always have the latest version. Visit the `develop <https://github.com/Toni-SM/skrl/tree/develop>`_ branch or its `documentation <https://skrl.readthedocs.io/en/develop>`_ to access the latest updates to be released. | **GitHub repository:** https://github.com/Toni-SM/skrl | **Questions or discussions:** https://github.com/Toni-SM/skrl/discussions | **Citing skrl:** To cite this library (created at Mondragon Unibertsitatea) use the following reference to its article: `skrl: Modular and Flexible Library for Reinforcement Learning <http://jmlr.org/papers/v24/23-0112.html>`_. .. code-block:: bibtex @article{serrano2023skrl, author = {Antonio Serrano-Muñoz and Dimitrios Chrysostomou and Simon Bøgh and Nestor Arana-Arexolaleiba}, title = {skrl: Modular and Flexible Library for Reinforcement Learning}, journal = {Journal of Machine Learning Research}, year = {2023}, volume = {24}, number = {254}, pages = {1--9}, url = {http://jmlr.org/papers/v24/23-0112.html} } .. raw:: html <br><hr> User guide ---------- To start using the library, visit the following links: .. toctree:: :maxdepth: 1 intro/installation intro/getting_started intro/examples intro/data .. raw:: html <br><hr> Library components (overview) ----------------------------- .. toctree:: :caption: API :hidden: api/agents api/multi_agents api/envs api/memories api/models api/resources api/trainers api/utils Agents ^^^^^^ Definition of reinforcement learning algorithms that compute an optimal policy. All agents inherit from one and only one :doc:`base class <api/agents>` (that defines a uniform interface and provides for common functionalities) but which is not tied to the implementation details of the algorithms * :doc:`Advantage Actor Critic <api/agents/a2c>` (**A2C**) * :doc:`Adversarial Motion Priors <api/agents/amp>` (**AMP**) * :doc:`Cross-Entropy Method <api/agents/cem>` (**CEM**) * :doc:`Deep Deterministic Policy Gradient <api/agents/ddpg>` (**DDPG**) * :doc:`Double Deep Q-Network <api/agents/ddqn>` (**DDQN**) * :doc:`Deep Q-Network <api/agents/dqn>` (**DQN**) * :doc:`Proximal Policy Optimization <api/agents/ppo>` (**PPO**) * :doc:`Q-learning <api/agents/q_learning>` (**Q-learning**) * :doc:`Robust Policy Optimization <api/agents/rpo>` (**RPO**) * :doc:`Soft Actor-Critic <api/agents/sac>` (**SAC**) * :doc:`State Action Reward State Action <api/agents/sarsa>` (**SARSA**) * :doc:`Twin-Delayed DDPG <api/agents/td3>` (**TD3**) * :doc:`Trust Region Policy Optimization <api/agents/trpo>` (**TRPO**) Multi-agents ^^^^^^^^^^^^ Definition of reinforcement learning algorithms that compute an optimal policies. All agents (multi-agents) inherit from one and only one :doc:`base class <api/multi_agents>` (that defines a uniform interface and provides for common functionalities) but which is not tied to the implementation details of the algorithms * :doc:`Independent Proximal Policy Optimization <api/multi_agents/ippo>` (**IPPO**) * :doc:`Multi-Agent Proximal Policy Optimization <api/multi_agents/mappo>` (**MAPPO**) Environments ^^^^^^^^^^^^ Definition of the Isaac Gym (preview 2, 3 and 4), Isaac Orbit and Omniverse Isaac Gym environment loaders, and wrappers for the Gym/Gymnasium, DeepMind, Isaac Gym, Isaac Orbit, Omniverse Isaac Gym environments, among others * :doc:`Single-agent environment wrapping <api/envs/wrapping>` for **Gym/Gymnasium**, **DeepMind**, **Isaac Gym**, **Isaac Orbit**, **Omniverse Isaac Gym** environments, among others * :doc:`Multi-agent environment wrapping <api/envs/multi_agents_wrapping>` for **PettingZoo** and **Bi-DexHands** environments * Loading :doc:`Isaac Gym environments <api/envs/isaac_gym>` * Loading :doc:`Isaac Orbit environments <api/envs/isaac_orbit>` * Loading :doc:`Omniverse Isaac Gym environments <api/envs/omniverse_isaac_gym>` Memories ^^^^^^^^ Generic memory definitions. Such memories are not bound to any agent and can be used for any role such as rollout buffer or experience replay memory, for example. All memories inherit from a :doc:`base class <api/memories>` that defines a uniform interface and keeps track (in allocated tensors) of transitions with the environment or other defined data * :doc:`Random memory <api/memories/random>` Models ^^^^^^ Definition of helper mixins for the construction of tabular functions or function approximators using artificial neural networks. This library does not provide predefined policies but helper mixins to create discrete and continuous (stochastic or deterministic) policies in which the user only has to define the tables (tensors) or artificial neural networks. All models inherit from one :doc:`base class <api/models>` that defines a uniform interface and provides for common functionalities. In addition, it is possible to create :doc:`shared model <api/models/shared_model>` by combining the implemented definitions * :doc:`Tabular model <api/models/tabular>` (discrete domain) * :doc:`Categorical model <api/models/categorical>` (discrete domain) * :doc:`Multi-Categorical model <api/models/multicategorical>` (discrete domain) * :doc:`Gaussian model <api/models/gaussian>` (continuous domain) * :doc:`Multivariate Gaussian model <api/models/multivariate_gaussian>` (continuous domain) * :doc:`Deterministic model <api/models/deterministic>` (continuous domain) Trainers ^^^^^^^^ Definition of the procedures responsible for managing the agent's training and interaction with the environment. All trainers inherit from a :doc:`base class <api/trainers>` that defines a uniform interface and provides for common functionalities * :doc:`Sequential trainer <api/trainers/sequential>` * :doc:`Parallel trainer <api/trainers/parallel>` * :doc:`Step trainer <api/trainers/step>` Resources ^^^^^^^^^ Definition of resources used by the agents during training and/or evaluation, such as exploration noises or learning rate schedulers **Noises:** Definition of the noises used by the agents during the exploration stage. All noises inherit from a :doc:`base class <api/resources/noises>` that defines a uniform interface * :doc:`Gaussian <api/resources/noises/gaussian>` noise * :doc:`Ornstein-Uhlenbeck <api/resources/noises/ornstein_uhlenbeck>` noise **Learning rate schedulers:** Definition of learning rate schedulers. All schedulers inherit from the PyTorch :literal:`_LRScheduler` class (see `how to adjust learning rate <https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate>`_ in the PyTorch documentation for more details) * :doc:`KL Adaptive <api/resources/schedulers/kl_adaptive>` **Preprocessors:** Definition of preprocessors * :doc:`Running standard scaler <api/resources/preprocessors/running_standard_scaler>` **Optimizers:** Definition of optimizers * :doc:`Adam <api/resources/optimizers/adam>` Utils and configurations ^^^^^^^^^^^^^^^^^^^^^^^^ Definition of utilities and configurations * :doc:`ML frameworks <api/config/frameworks>` configuration * :doc:`Random seed <api/utils/seed>` * Memory and Tensorboard :doc:`file post-processing <api/utils/postprocessing>` * :doc:`Model instantiators <api/utils/model_instantiators>` * :doc:`Hugging Face integration <api/utils/huggingface>` * :doc:`Isaac Gym utils <api/utils/isaacgym_utils>` * :doc:`Omniverse Isaac Gym utils <api/utils/omniverse_isaacgym_utils>`
10,541
reStructuredText
50.42439
946
0.703823
Toni-SM/skrl/docs/source/conf.py
import os import sys # skrl library sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) print("[DOCS] skrl library path: {}".format(sys.path[0])) import skrl # project information project = "skrl" copyright = "2021, Toni-SM" author = "Toni-SM" if skrl.__version__ != "unknown": release = version = skrl.__version__ else: release = version = "1.1.0" master_doc = "index" # general configuration extensions = [ "sphinx.ext.duration", "sphinx.ext.doctest", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.intersphinx", "sphinx_tabs.tabs", "sphinx_copybutton", "notfound.extension", ] # generate links to the documentation of objects in external projects intersphinx_mapping = { "python": ("https://docs.python.org/3/", None), "gym": ("https://www.gymlibrary.dev/", None), "gymnasium": ("https://gymnasium.farama.org/", None), "numpy": ("https://numpy.org/doc/stable/", None), "torch": ("https://pytorch.org/docs/stable/", None), "jax": ("https://jax.readthedocs.io/en/latest/", None), "flax": ("https://flax.readthedocs.io/en/latest/", None), "optax": ("https://optax.readthedocs.io/en/latest/", None), } pygments_style = "tango" pygments_dark_style = "zenburn" intersphinx_disabled_domains = ["std"] templates_path = ["_templates"] rst_prolog = """ .. include:: <s5defs.txt> .. |_1| unicode:: 0xA0 :trim: .. |_2| unicode:: 0xA0 0xA0 :trim: .. |_3| unicode:: 0xA0 0xA0 0xA0 :trim: .. |_4| unicode:: 0xA0 0xA0 0xA0 0xA0 :trim: .. |_5| unicode:: 0xA0 0xA0 0xA0 0xA0 0xA0 :trim: .. |jax| image:: /_static/data/logo-jax.svg :width: 28 .. |pytorch| image:: /_static/data/logo-torch.svg :width: 16 .. |br| raw:: html <br> """ # HTML output html_theme = "furo" html_title = f"<div style='text-align: center;'><strong>{project}</strong> ({version})</div>" html_scaled_image_link = False html_static_path = ["_static"] html_favicon = "_static/data/favicon.ico" html_css_files = ["css/skrl.css", "css/s5defs-roles.css"] html_theme_options = { # logo "light_logo": "data/logo-light-mode.png", "dark_logo": "data/logo-dark-mode.png", # edit button "source_repository": "https://github.com/Toni-SM/skrl", "source_branch": "../tree/main", "source_directory": "docs/source", # css "light_css_variables": { "color-brand-primary": "#FF4800", "color-brand-content": "#FF4800", }, "dark_css_variables": { "color-brand-primary": "#EAA000", "color-brand-content": "#EAA000", }, } # EPUB output epub_show_urls = "footnote" # autodoc ext autodoc_mock_imports = [ "gym", "gymnasium", "torch", "jax", "jaxlib", "flax", "optax", "tensorboard", "tqdm", "packaging", "isaacgym", ] # copybutton ext copybutton_prompt_text = r">>> |\.\.\. " copybutton_prompt_is_regexp = True # notfound ext notfound_template = "404.rst" notfound_context = { "title": "Page Not Found", "body": """ <h1>Page Not Found</h1> <p>Sorry, we couldn't find that page in skrl.</p> <p>Try using the search box or go to the homepage.</p> """, } # suppress warning messages suppress_warnings = [ "ref.python", # more than one target found for cross-reference ]
3,325
Python
21.62585
93
0.613233
Toni-SM/skrl/docs/source/examples/gym/jax_gym_pendulum_ddpg.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, Model from skrl.resources.noises.jax import OrnsteinUhlenbeckNoise from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixins class Actor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(400)(inputs["states"])) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(self.num_actions)(x) # Pendulum-v1 action_space is -2 to 2 return 2 * nn.tanh(x), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(400)(x)) x = nn.relu(nn.Dense(300)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Pendulum-v1") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Pendulum-v")][0] print("Pendulum-v1 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models = {} models["policy"] = Actor(env.observation_space, env.action_space, device) models["target_policy"] = Actor(env.observation_space, env.action_space, device) models["critic"] = Critic(env.observation_space, env.action_space, device) models["target_critic"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg = DDPG_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg["batch_size"] = 100 cfg["random_timesteps"] = 100 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 75 cfg["experiment"]["checkpoint_interval"] = 750 cfg["experiment"]["directory"] = "runs/jax/Pendulum" agent = DDPG(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 15000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
4,148
Python
35.394737
106
0.706847
Toni-SM/skrl/docs/source/examples/gym/jax_gym_cartpole_cem.py
import gym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.cem import CEM, CEM_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import CategoricalMixin, Model from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (categorical model) using mixin class Policy(CategoricalMixin, Model): def __init__(self, observation_space, action_space, device=None, unnormalized_log_prob=True, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) CategoricalMixin.__init__(self, unnormalized_log_prob) @nn.compact def __call__(self, inputs, role): x = nn.relu(nn.Dense(64)(inputs["states"])) x = nn.relu(nn.Dense(64)(x)) x = nn.Dense(self.num_actions)(x) return x, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("CartPole-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's model (function approximator). # CEM requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/cem.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/cem.html#configuration-and-hyperparameters cfg = CEM_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1000 cfg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/jax/CartPole" agent = CEM(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
3,045
Python
31.404255
107
0.716585
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_trpo_rnn.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO_DEFAULT_CONFIG from skrl.agents.torch.trpo import TRPO_RNN as TRPO from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(rnn_output)), self.log_std_parameter, {"rnn": [hidden_states]} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, num_envs=1, num_layers=1, hidden_size=64, sequence_length=128): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 1)) def get_specification(self): # batch size (N) is the number of envs return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), {"rnn": [hidden_states]} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True, num_envs=env.num_envs) models["value"] = Value(env.observation_space, env.action_space, device, num_envs=env.num_envs) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.9 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
9,716
Python
44.406542
146
0.620729
Toni-SM/skrl/docs/source/examples/gym/jax_gym_cartpole_dqn.py
import gym # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.dqn import DQN, DQN_DEFAULT_CONFIG from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed from skrl.utils.model_instantiators.jax import Shape, deterministic_model config.jax.backend = "numpy" # or "jax" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("CartPole-v0") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("CartPole-v")][0] print("CartPole-v0 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=50000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators) using the model instantiator utility. # DQN requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#models models = {} models["q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) models["target_q_network"] = deterministic_model(observation_space=env.observation_space, action_space=env.action_space, device=device, clip_actions=False, input_shape=Shape.OBSERVATIONS, hiddens=[64, 64], hidden_activation=["relu", "relu"], output_shape=Shape.ACTIONS, output_activation=None, output_scale=1.0) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal", stddev=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#configuration-and-hyperparameters cfg = DQN_DEFAULT_CONFIG.copy() cfg["learning_starts"] = 100 cfg["exploration"]["final_epsilon"] = 0.04 cfg["exploration"]["timesteps"] = 1500 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/jax/CartPole" agent = DQN(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
3,921
Python
39.854166
97
0.58888
Toni-SM/skrl/docs/source/examples/gym/torch_gym_pendulumnovel_trpo.py
import gym import numpy as np import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # Pendulum-v1 action_space is -2 to 2 return 2 * torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # environment observation wrapper used to mask velocity. Adapted from rl_zoo3 (rl_zoo3/wrappers.py) class NoVelocityWrapper(gym.ObservationWrapper): def observation(self, observation): # observation: x, y, angular velocity return observation * np.array([1, 1, 0]) gym.envs.registration.register(id="PendulumNoVel-v1", entry_point=lambda: NoVelocityWrapper(gym.make("Pendulum-v1"))) # load and wrap the gym environment env = gym.vector.make("PendulumNoVel-v1", num_envs=4, asynchronous=False) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1024, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TRPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/trpo.html#configuration-and-hyperparameters cfg = TRPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1024 # memory_size cfg["learning_epochs"] = 10 cfg["mini_batches"] = 32 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 0.5 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 500 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/PendulumNoVel" agent = TRPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
4,514
Python
38.605263
117
0.677448
Toni-SM/skrl/docs/source/examples/gym/torch_gym_taxi_sarsa.py
import gym import torch # import the skrl components to build the RL system from skrl.agents.torch.sarsa import SARSA, SARSA_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.models.torch import Model, TabularMixin from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (tabular model) using mixin class EpilonGreedyPolicy(TabularMixin, Model): def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) self.epsilon = epsilon self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32, device=self.device) def compute(self, inputs, role): actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), inputs["states"]], dim=-1, keepdim=True).view(-1,1) # choose random actions for exploration according to epsilon indexes = (torch.rand(inputs["states"].shape[0], device=self.device) < self.epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device) return actions, {} # load and wrap the gym environment. # note: the environment version may change depending on the gym version try: env = gym.make("Taxi-v3") except gym.error.DeprecatedEnv as e: env_id = [spec.id for spec in gym.envs.registry.all() if spec.id.startswith("Taxi-v")][0] print("Taxi-v3 not found. Trying {}".format(env_id)) env = gym.make(env_id) env = wrap_env(env) device = env.device # instantiate the agent's model (table) # SARSA requires 1 model, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sarsa.html#models models = {} models["policy"] = EpilonGreedyPolicy(env.observation_space, env.action_space, device, num_envs=env.num_envs, epsilon=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sarsa.html#configuration-and-hyperparameters cfg = SARSA_DEFAULT_CONFIG.copy() cfg["discount_factor"] = 0.999 cfg["alpha"] = 0.4 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1600 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Taxi" agent = SARSA(models=models, memory=None, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 80000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
3,037
Python
36.04878
122
0.690155
Toni-SM/skrl/docs/source/examples/bidexhands/torch_bidexhands_shadow_hand_over_mappo.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.envs.loaders.torch import load_bidexhands_env from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.multi_agents.torch.mappo import MAPPO, MAPPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ELU(), nn.Linear(512, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the environment env = load_bidexhands_env(task_name="ShadowHandOver") env = wrap_env(env, wrapper="bidexhands") device = env.device # instantiate memories as rollout buffer (any memory can be used for this) memories = {} for agent_name in env.possible_agents: memories[agent_name] = RandomMemory(memory_size=24, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # MAPPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/multi_agents/mappo.html#models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = Policy(env.observation_space(agent_name), env.action_space(agent_name), device) models[agent_name]["value"] = Value(env.shared_observation_space(agent_name), env.action_space(agent_name), device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/multi_agents/mappo.html#configuration-and-hyperparameters cfg = MAPPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 24 # memory_size cfg["learning_epochs"] = 5 cfg["mini_batches"] = 6 # 24 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.001 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": next(iter(env.observation_spaces.values())), "device": device} cfg["shared_state_preprocessor"] = RunningStandardScaler cfg["shared_state_preprocessor_kwargs"] = { "size": next(iter(env.shared_observation_spaces.values())), "device": device } cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 180 cfg["experiment"]["checkpoint_interval"] = 1800 cfg["experiment"]["directory"] = "runs/torch/ShadowHandOver" agent = MAPPO(possible_agents=env.possible_agents, models=models, memories=memories, cfg=cfg, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=device, shared_observation_spaces=env.shared_observation_spaces) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 36000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
5,266
Python
39.515384
119
0.661983
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ingenuity_ppo.py
import isaacgym import isaacgymenvs import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility seed = set_seed() # e.g. `set_seed(42)` for fixed seed # define shared model (stochastic and deterministic models) using mixins class Shared(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU()) self.mean_layer = nn.Linear(128, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) self.value_layer = nn.Linear(128, 1) def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # load and wrap the Isaac Gym environment using the easy-to-use API from NVIDIA env = isaacgymenvs.make(seed=seed, task="Ingenuity", num_envs=4096, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=True) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Shared(env.observation_space, env.action_space, device) models["value"] = models["policy"] # same instance: shared model # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 8 cfg["mini_batches"] = 4 # 16 * 4096 / 16384 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.016} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 40 cfg["experiment"]["checkpoint_interval"] = 400 cfg["experiment"]["directory"] = "runs/torch/Ingenuity" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 8000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train() # # --------------------------------------------------------- # # comment the code above: `trainer.train()`, and... # # uncomment the following lines to evaluate a trained agent # # --------------------------------------------------------- # from skrl.utils.huggingface import download_model_from_huggingface # # download the trained agent's checkpoint from Hugging Face Hub and load it # path = download_model_from_huggingface("skrl/IsaacGymEnvs-Ingenuity-PPO", filename="agent.pt") # agent.load(path) # # start evaluation # trainer.eval()
5,281
Python
37.275362
101
0.65196
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_ddpg_td3_sac_parallel_unshared_memory.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.noises.torch import GaussianNoise, OrnsteinUhlenbeckNoise from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import ParallelTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} if __name__ == '__main__': # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=192) env = wrap_env(env) device = env.device # instantiate memories as experience replay (unique for each agents). # scopes (192 envs): DDPG 64, TD3 64 and SAC 64 memory_ddpg = RandomMemory(memory_size=15625, num_envs=64, device=device) memory_td3 = RandomMemory(memory_size=15625, num_envs=64, device=device) memory_sac = RandomMemory(memory_size=15625, num_envs=64, device=device) # instantiate the agents' models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models_ddpg = {} models_ddpg["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["critic"] = Critic(env.observation_space, env.action_space, device) models_ddpg["target_critic"] = Critic(env.observation_space, env.action_space, device) # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models_td3 = {} models_td3["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["critic_2"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models_sac = {} models_sac["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models_sac["critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["critic_2"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agents (visit their documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg_ddpg = DDPG_DEFAULT_CONFIG.copy() cfg_ddpg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device) cfg_ddpg["gradient_steps"] = 1 cfg_ddpg["batch_size"] = 4096 cfg_ddpg["discount_factor"] = 0.99 cfg_ddpg["polyak"] = 0.005 cfg_ddpg["actor_learning_rate"] = 5e-4 cfg_ddpg["critic_learning_rate"] = 5e-4 cfg_ddpg["random_timesteps"] = 80 cfg_ddpg["learning_starts"] = 80 cfg_ddpg["state_preprocessor"] = RunningStandardScaler cfg_ddpg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_ddpg["experiment"]["write_interval"] = 800 cfg_ddpg["experiment"]["checkpoint_interval"] = 8000 cfg_ddpg["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg_td3 = TD3_DEFAULT_CONFIG.copy() cfg_td3["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg_td3["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg_td3["smooth_regularization_clip"] = 0.5 cfg_td3["gradient_steps"] = 1 cfg_td3["batch_size"] = 4096 cfg_td3["discount_factor"] = 0.99 cfg_td3["polyak"] = 0.005 cfg_td3["actor_learning_rate"] = 5e-4 cfg_td3["critic_learning_rate"] = 5e-4 cfg_td3["random_timesteps"] = 80 cfg_td3["learning_starts"] = 80 cfg_td3["state_preprocessor"] = RunningStandardScaler cfg_td3["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_td3["experiment"]["write_interval"] = 800 cfg_td3["experiment"]["checkpoint_interval"] = 8000 cfg_td3["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg_sac = SAC_DEFAULT_CONFIG.copy() cfg_sac["gradient_steps"] = 1 cfg_sac["batch_size"] = 4096 cfg_sac["discount_factor"] = 0.99 cfg_sac["polyak"] = 0.005 cfg_sac["actor_learning_rate"] = 5e-4 cfg_sac["critic_learning_rate"] = 5e-4 cfg_sac["random_timesteps"] = 80 cfg_sac["learning_starts"] = 80 cfg_sac["grad_norm_clip"] = 0 cfg_sac["learn_entropy"] = True cfg_sac["entropy_learning_rate"] = 5e-3 cfg_sac["initial_entropy_value"] = 1.0 cfg_sac["state_preprocessor"] = RunningStandardScaler cfg_sac["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_sac["experiment"]["write_interval"] = 800 cfg_sac["experiment"]["checkpoint_interval"] = 8000 cfg_sac["experiment"]["directory"] = "runs/torch/Ant" agent_ddpg = DDPG(models=models_ddpg, memory=memory_ddpg, cfg=cfg_ddpg, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_td3 = TD3(models=models_td3, memory=memory_td3, cfg=cfg_td3, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_sac = SAC(models=models_sac, memory=memory_sac, cfg=cfg_sac, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer and define the agent scopes cfg_trainer = {"timesteps": 160000, "headless": True} trainer = ParallelTrainer(cfg=cfg_trainer, env=env, agents=[agent_ddpg, agent_td3, agent_sac], agents_scope=[64, 64, 64]) # scopes (192 envs): DDPG 64, TD3 64 and SAC 64 # start training trainer.train()
9,851
Python
46.365384
115
0.643589
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_td3.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.resources.noises.torch import GaussianNoise from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (deterministic models) using mixins class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models = {} models["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg = TD3_DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_noise"] = GaussianNoise(0, 0.1, device=device) cfg["smooth_regularization_clip"] = 0.5 cfg["gradient_steps"] = 1 cfg["batch_size"] = 4096 cfg["discount_factor"] = 0.99 cfg["polyak"] = 0.005 cfg["actor_learning_rate"] = 5e-4 cfg["critic_learning_rate"] = 5e-4 cfg["random_timesteps"] = 80 cfg["learning_starts"] = 80 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Ant" agent = TD3(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
4,396
Python
38.612612
93
0.681984
Toni-SM/skrl/docs/source/examples/isaacgym/jax_ant_sac.py
import isaacgym import flax.linen as nn import jax import jax.numpy as jnp # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_isaacgym_env_preview4 from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.relu(nn.Dense(512)(inputs["states"])) x = nn.relu(nn.Dense(256)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return nn.tanh(x), log_std, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = jnp.concatenate([inputs["states"], inputs["taken_actions"]], axis=-1) x = nn.relu(nn.Dense(512)(x)) x = nn.relu(nn.Dense(256)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["gradient_steps"] = 1 cfg["batch_size"] = 4096 cfg["discount_factor"] = 0.99 cfg["polyak"] = 0.005 cfg["actor_learning_rate"] = 5e-4 cfg["critic_learning_rate"] = 5e-4 cfg["random_timesteps"] = 80 cfg["learning_starts"] = 80 cfg["grad_norm_clip"] = 0 cfg["learn_entropy"] = True cfg["entropy_learning_rate"] = 5e-3 cfg["initial_entropy_value"] = 1.0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/jax/Ant" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
4,450
Python
37.042735
102
0.709213
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_sac.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models = {} models["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models["critic_1"] = Critic(env.observation_space, env.action_space, device) models["critic_2"] = Critic(env.observation_space, env.action_space, device) models["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg = SAC_DEFAULT_CONFIG.copy() cfg["gradient_steps"] = 1 cfg["batch_size"] = 4096 cfg["discount_factor"] = 0.99 cfg["polyak"] = 0.005 cfg["actor_learning_rate"] = 5e-4 cfg["critic_learning_rate"] = 5e-4 cfg["random_timesteps"] = 80 cfg["learning_starts"] = 80 cfg["grad_norm_clip"] = 0 cfg["learn_entropy"] = True cfg["entropy_learning_rate"] = 5e-3 cfg["initial_entropy_value"] = 1.0 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 800 cfg["experiment"]["checkpoint_interval"] = 8000 cfg["experiment"]["directory"] = "runs/torch/Ant" agent = SAC(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
4,433
Python
38.589285
102
0.674036
Toni-SM/skrl/docs/source/examples/isaacgym/torch_ant_ddpg_td3_sac_sequential_shared_memory.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.noises.torch import GaussianNoise, OrnsteinUhlenbeckNoise from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-5, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, self.num_actions), nn.Tanh()) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 512), nn.ReLU(), nn.Linear(512, 256), nn.ReLU(), nn.Linear(256, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="Ant", num_envs=64) env = wrap_env(env) device = env.device # instantiate a memory as experience replay (unique to all agents) memory = RandomMemory(memory_size=15625, num_envs=env.num_envs, device=device) # instantiate the agents' models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#models models_ddpg = {} models_ddpg["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_ddpg["critic"] = Critic(env.observation_space, env.action_space, device) models_ddpg["target_critic"] = Critic(env.observation_space, env.action_space, device) # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#models models_td3 = {} models_td3["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models_td3["critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["critic_2"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_td3["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#models models_sac = {} models_sac["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models_sac["critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["critic_2"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # configure and instantiate the agents (visit their documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ddpg.html#configuration-and-hyperparameters cfg_ddpg = DDPG_DEFAULT_CONFIG.copy() cfg_ddpg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=0.5, device=device) cfg_ddpg["gradient_steps"] = 1 cfg_ddpg["batch_size"] = 4096 cfg_ddpg["discount_factor"] = 0.99 cfg_ddpg["polyak"] = 0.005 cfg_ddpg["actor_learning_rate"] = 5e-4 cfg_ddpg["critic_learning_rate"] = 5e-4 cfg_ddpg["random_timesteps"] = 80 cfg_ddpg["learning_starts"] = 80 cfg_ddpg["state_preprocessor"] = RunningStandardScaler cfg_ddpg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_ddpg["experiment"]["write_interval"] = 800 cfg_ddpg["experiment"]["checkpoint_interval"] = 8000 cfg_ddpg["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/td3.html#configuration-and-hyperparameters cfg_td3 = TD3_DEFAULT_CONFIG.copy() cfg_td3["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg_td3["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg_td3["smooth_regularization_clip"] = 0.5 cfg_td3["gradient_steps"] = 1 cfg_td3["batch_size"] = 4096 cfg_td3["discount_factor"] = 0.99 cfg_td3["polyak"] = 0.005 cfg_td3["actor_learning_rate"] = 5e-4 cfg_td3["critic_learning_rate"] = 5e-4 cfg_td3["random_timesteps"] = 80 cfg_td3["learning_starts"] = 80 cfg_td3["state_preprocessor"] = RunningStandardScaler cfg_td3["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_td3["experiment"]["write_interval"] = 800 cfg_td3["experiment"]["checkpoint_interval"] = 8000 cfg_td3["experiment"]["directory"] = "runs/torch/Ant" # https://skrl.readthedocs.io/en/latest/api/agents/sac.html#configuration-and-hyperparameters cfg_sac = SAC_DEFAULT_CONFIG.copy() cfg_sac["gradient_steps"] = 1 cfg_sac["batch_size"] = 4096 cfg_sac["discount_factor"] = 0.99 cfg_sac["polyak"] = 0.005 cfg_sac["actor_learning_rate"] = 5e-4 cfg_sac["critic_learning_rate"] = 5e-4 cfg_sac["random_timesteps"] = 80 cfg_sac["learning_starts"] = 80 cfg_sac["grad_norm_clip"] = 0 cfg_sac["learn_entropy"] = True cfg_sac["entropy_learning_rate"] = 5e-3 cfg_sac["initial_entropy_value"] = 1.0 cfg_sac["state_preprocessor"] = RunningStandardScaler cfg_sac["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg_sac["experiment"]["write_interval"] = 800 cfg_sac["experiment"]["checkpoint_interval"] = 8000 cfg_sac["experiment"]["directory"] = "runs/torch/Ant" agent_ddpg = DDPG(models=models_ddpg, memory=memory, # shared memory cfg=cfg_ddpg, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_td3 = TD3(models=models_td3, memory=memory, # shared memory cfg=cfg_td3, observation_space=env.observation_space, action_space=env.action_space, device=device) agent_sac = SAC(models=models_sac, memory=memory, # shared memory cfg=cfg_sac, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 160000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent_ddpg, agent_td3, agent_sac], agents_scope=[]) # start training trainer.train()
9,123
Python
43.945813
111
0.674449
Toni-SM/skrl/docs/source/examples/isaacgym/torch_humanoid_amp.py
import isaacgym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.amp import AMP, AMP_DEFAULT_CONFIG from skrl.envs.loaders.torch import load_isaacgym_env_preview4 from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins # - Policy: takes as input the environment's observation/state and returns an action # - Value: takes the state as input and provides a value to guide the policy # - Discriminator: differentiate between police-generated behaviors and behaviors from the motion dataset class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 1024), nn.ReLU(), nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, self.num_actions)) # set a fixed log standard deviation for the policy self.log_std_parameter = nn.Parameter(torch.full((self.num_actions,), fill_value=-2.9), requires_grad=False) def compute(self, inputs, role): return torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 1024), nn.ReLU(), nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} class Discriminator(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 1024), nn.ReLU(), nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the Isaac Gym environment env = load_isaacgym_env_preview4(task_name="HumanoidAMP") env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # AMP requires 3 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/amp.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) models["discriminator"] = Discriminator(env.amp_observation_space, env.action_space, device) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/amp.html#configuration-and-hyperparameters cfg = AMP_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 6 cfg["mini_batches"] = 2 # 16 * 4096 / 32768 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 5e-5 cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 2.5 cfg["discriminator_loss_scale"] = 5.0 cfg["amp_batch_size"] = 512 cfg["task_reward_weight"] = 0.0 cfg["style_reward_weight"] = 1.0 cfg["discriminator_batch_size"] = 4096 cfg["discriminator_reward_scale"] = 2 cfg["discriminator_logit_regularization_scale"] = 0.05 cfg["discriminator_gradient_penalty_scale"] = 5 cfg["discriminator_weight_decay_scale"] = 0.0001 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} cfg["amp_state_preprocessor"] = RunningStandardScaler cfg["amp_state_preprocessor_kwargs"] = {"size": env.amp_observation_space, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 160 cfg["experiment"]["checkpoint_interval"] = 4000 cfg["experiment"]["directory"] = "runs/torch/HumanoidAMP" agent = AMP(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device, amp_observation_space=env.amp_observation_space, motion_dataset=RandomMemory(memory_size=200000, device=device), reply_buffer=RandomMemory(memory_size=1000000, device=device), collect_reference_motions=lambda num_samples: env.fetch_amp_obs_demo(num_samples), collect_observation=lambda: env.reset_done()[0]["obs"]) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 80000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
6,218
Python
41.02027
116
0.670794
Toni-SM/skrl/docs/source/examples/deepmind/dm_suite_cartpole_swingup_ddpg.py
from dm_control import suite import torch import torch.nn as nn import torch.nn.functional as F # Import the skrl components to build the RL system from skrl.models.torch import Model, DeterministicMixin from skrl.memories.torch import RandomMemory from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise from skrl.trainers.torch import SequentialTrainer from skrl.envs.torch import wrap_env # Define the models (deterministic models) for the DDPG agent using mixins # and programming with two approaches (torch functional and torch.nn.Sequential class). # - Actor (policy): takes as input the environment's observation/state and returns an action # - Critic: takes the state and action as input and provides a value to guide the policy class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) return torch.tanh(self.action_layer(x)), {} class DeterministicCritic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 400), nn.ReLU(), nn.Linear(400, 300), nn.ReLU(), nn.Linear(300, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # Load and wrap the DeepMind environment env = suite.load(domain_name="cartpole", task_name="swingup") env = wrap_env(env) device = env.device # Instantiate a RandomMemory (without replacement) as experience replay memory memory = RandomMemory(memory_size=25000, num_envs=env.num_envs, device=device, replacement=False) # Instantiate the agent's models (function approximators). # DDPG requires 4 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#spaces-and-models models_ddpg = {} models_ddpg["policy"] = DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True) models_ddpg["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device, clip_actions=True) models_ddpg["critic"] = DeterministicCritic(env.observation_space, env.action_space, device) models_ddpg["target_critic"] = DeterministicCritic(env.observation_space, env.action_space, device) # Initialize the models' parameters (weights and biases) using a Gaussian distribution for model in models_ddpg.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # Configure and instantiate the agent. # Only modify some of the default configuration, visit its documentation to see all the options # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ddpg.html#configuration-and-hyperparameters cfg_ddpg = DDPG_DEFAULT_CONFIG.copy() cfg_ddpg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.1, base_scale=1.0, device=device) cfg_ddpg["batch_size"] = 100 cfg_ddpg["random_timesteps"] = 100 cfg_ddpg["learning_starts"] = 100 # logging to TensorBoard and write checkpoints each 1000 and 5000 timesteps respectively cfg_ddpg["experiment"]["write_interval"] = 1000 cfg_ddpg["experiment"]["checkpoint_interval"] = 5000 agent_ddpg = DDPG(models=models_ddpg, memory=memory, cfg=cfg_ddpg, observation_space=env.observation_space, action_space=env.action_space, device=device) # Configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent_ddpg) # start training trainer.train()
4,425
Python
43.26
117
0.712542
Toni-SM/skrl/docs/source/examples/deepmind/dm_manipulation_stack_sac.py
from dm_control import manipulation import torch import torch.nn as nn # Import the skrl components to build the RL system from skrl.models.torch import Model, GaussianMixin, DeterministicMixin from skrl.memories.torch import RandomMemory from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG from skrl.trainers.torch import SequentialTrainer from skrl.envs.torch import wrap_env # Define the models (stochastic and deterministic models) for the SAC agent using the mixins. # - StochasticActor (policy): takes as input the environment's observation/state and returns an action # - Critic: takes the state and action as input and provides a value to guide the policy class StochasticActor(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.features_extractor = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=3), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=2, stride=1), nn.ReLU(), nn.Flatten(), nn.Linear(7744, 512), nn.ReLU(), nn.Linear(512, 8), nn.Tanh()) self.net = nn.Sequential(nn.Linear(26, 32), nn.ReLU(), nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): states = inputs["states"] # The dm_control.manipulation tasks have as observation/state spec a `collections.OrderedDict` object as follows: # OrderedDict([('front_close', BoundedArray(shape=(1, 84, 84, 3), dtype=dtype('uint8'), name='front_close', minimum=0, maximum=255)), # ('jaco_arm/joints_pos', Array(shape=(1, 6, 2), dtype=dtype('float64'), name='jaco_arm/joints_pos')), # ('jaco_arm/joints_torque', Array(shape=(1, 6), dtype=dtype('float64'), name='jaco_arm/joints_torque')), # ('jaco_arm/joints_vel', Array(shape=(1, 6), dtype=dtype('float64'), name='jaco_arm/joints_vel')), # ('jaco_arm/jaco_hand/joints_pos', Array(shape=(1, 3), dtype=dtype('float64'), name='jaco_arm/jaco_hand/joints_pos')), # ('jaco_arm/jaco_hand/joints_vel', Array(shape=(1, 3), dtype=dtype('float64'), name='jaco_arm/jaco_hand/joints_vel')), # ('jaco_arm/jaco_hand/pinch_site_pos', Array(shape=(1, 3), dtype=dtype('float64'), name='jaco_arm/jaco_hand/pinch_site_pos')), # ('jaco_arm/jaco_hand/pinch_site_rmat', Array(shape=(1, 9), dtype=dtype('float64'), name='jaco_arm/jaco_hand/pinch_site_rmat'))]) # This spec is converted to a `gym.spaces.Dict` space by the `wrap_env` function as follows: # Dict(front_close: Box(0, 255, (1, 84, 84, 3), uint8), # jaco_arm/jaco_hand/joints_pos: Box(-inf, inf, (1, 3), float64), # jaco_arm/jaco_hand/joints_vel: Box(-inf, inf, (1, 3), float64), # jaco_arm/jaco_hand/pinch_site_pos: Box(-inf, inf, (1, 3), float64), # jaco_arm/jaco_hand/pinch_site_rmat: Box(-inf, inf, (1, 9), float64), # jaco_arm/joints_pos: Box(-inf, inf, (1, 6, 2), float64), # jaco_arm/joints_torque: Box(-inf, inf, (1, 6), float64), # jaco_arm/joints_vel: Box(-inf, inf, (1, 6), float64)) # The `spaces` parameter is a flat tensor of the flattened observation/state space with shape (batch_size, size_of_flat_space). # Using the model's method `tensor_to_space` we can convert the flattened tensor to the original space. # https://skrl.readthedocs.io/en/latest/modules/skrl.models.base_class.html#skrl.models.torch.base.Model.tensor_to_space space = self.tensor_to_space(states, self.observation_space) # For this case, the `space` variable is a Python dictionary with the following structure and shapes: # {'front_close': torch.Tensor(shape=[batch_size, 1, 84, 84, 3], dtype=torch.float32), # 'jaco_arm/jaco_hand/joints_pos': torch.Tensor(shape=[batch_size, 1, 3], dtype=torch.float32) # 'jaco_arm/jaco_hand/joints_vel': torch.Tensor(shape=[batch_size, 1, 3], dtype=torch.float32) # 'jaco_arm/jaco_hand/pinch_site_pos': torch.Tensor(shape=[batch_size, 1, 3], dtype=torch.float32) # 'jaco_arm/jaco_hand/pinch_site_rmat': torch.Tensor(shape=[batch_size, 1, 9], dtype=torch.float32) # 'jaco_arm/joints_pos': torch.Tensor(shape=[batch_size, 1, 6, 2], dtype=torch.float32) # 'jaco_arm/joints_torque': torch.Tensor(shape=[batch_size, 1, 6], dtype=torch.float32) # 'jaco_arm/joints_vel': torch.Tensor(shape=[batch_size, 1, 6], dtype=torch.float32)} # permute and normalize the images (samples, width, height, channels) -> (samples, channels, width, height) features = self.features_extractor(space['front_close'][:,0].permute(0, 3, 1, 2) / 255.0) mean_actions = torch.tanh(self.net(torch.cat([features, space["jaco_arm/joints_pos"].view(states.shape[0], -1), space["jaco_arm/joints_vel"].view(states.shape[0], -1)], dim=-1))) return mean_actions, self.log_std_parameter, {} class Critic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.features_extractor = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=3), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=2, stride=1), nn.ReLU(), nn.Flatten(), nn.Linear(7744, 512), nn.ReLU(), nn.Linear(512, 8), nn.Tanh()) self.net = nn.Sequential(nn.Linear(26 + self.num_actions, 32), nn.ReLU(), nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, 1)) def compute(self, inputs, role): states = inputs["states"] # map the observations/states to the original space. # See the explanation above (StochasticActor.compute) space = self.tensor_to_space(states, self.observation_space) # permute and normalize the images (samples, width, height, channels) -> (samples, channels, width, height) features = self.features_extractor(space['front_close'][:,0].permute(0, 3, 1, 2) / 255.0) return self.net(torch.cat([features, space["jaco_arm/joints_pos"].view(states.shape[0], -1), space["jaco_arm/joints_vel"].view(states.shape[0], -1), inputs["taken_actions"]], dim=-1)), {} # Load and wrap the DeepMind environment env = manipulation.load("reach_site_vision") env = wrap_env(env) device = env.device # Instantiate a RandomMemory (without replacement) as experience replay memory memory = RandomMemory(memory_size=50000, num_envs=env.num_envs, device=device, replacement=False) # Instantiate the agent's models (function approximators). # SAC requires 5 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#spaces-and-models models_sac = {} models_sac["policy"] = StochasticActor(env.observation_space, env.action_space, device, clip_actions=True) models_sac["critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["critic_2"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_1"] = Critic(env.observation_space, env.action_space, device) models_sac["target_critic_2"] = Critic(env.observation_space, env.action_space, device) # Initialize the models' parameters (weights and biases) using a Gaussian distribution for model in models_sac.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # Configure and instantiate the agent. # Only modify some of the default configuration, visit its documentation to see all the options # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.sac.html#configuration-and-hyperparameters cfg_sac = SAC_DEFAULT_CONFIG.copy() cfg_sac["gradient_steps"] = 1 cfg_sac["batch_size"] = 256 cfg_sac["random_timesteps"] = 0 cfg_sac["learning_starts"] = 10000 cfg_sac["learn_entropy"] = True # logging to TensorBoard and write checkpoints each 1000 and 5000 timesteps respectively cfg_sac["experiment"]["write_interval"] = 1000 cfg_sac["experiment"]["checkpoint_interval"] = 5000 agent_sac = SAC(models=models_sac, memory=memory, cfg=cfg_sac, observation_space=env.observation_space, action_space=env.action_space, device=device) # Configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent_sac) # start training trainer.train()
10,370
Python
55.672131
151
0.579749
Toni-SM/skrl/docs/source/examples/robosuite/td3_robosuite_two_arm_lift.py
import robosuite from robosuite.controllers import load_controller_config import torch import torch.nn as nn import torch.nn.functional as F # Import the skrl components to build the RL system from skrl.models.torch import Model, DeterministicMixin from skrl.memories.torch import RandomMemory from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG from skrl.resources.noises.torch import GaussianNoise from skrl.trainers.torch import SequentialTrainer from skrl.envs.torch import wrap_env # Define the models (deterministic models) for the TD3 agent using mixins # and programming with two approaches (torch functional and torch.nn.Sequential class). # - Actor (policy): takes as input the environment's observation/state and returns an action # - Critic: takes the state and action as input and provides a value to guide the policy class DeterministicActor(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.linear_layer_1 = nn.Linear(self.num_observations, 400) self.linear_layer_2 = nn.Linear(400, 300) self.action_layer = nn.Linear(300, self.num_actions) def compute(self, inputs, role): x = F.relu(self.linear_layer_1(inputs["states"])) x = F.relu(self.linear_layer_2(x)) return torch.tanh(self.action_layer(x)), {} class DeterministicCritic(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations + self.num_actions, 400), nn.ReLU(), nn.Linear(400, 300), nn.ReLU(), nn.Linear(300, 1)) def compute(self, inputs, role): return self.net(torch.cat([inputs["states"], inputs["taken_actions"]], dim=1)), {} # Load and wrap the DeepMind robosuite environment controller_config = load_controller_config(default_controller="OSC_POSE") env = robosuite.make("TwoArmLift", robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot gripper_types="default", # use default grippers per robot arm controller_configs=controller_config, # each arm is controlled using OSC env_configuration="single-arm-opposed", # (two-arm envs only) arms face each other has_renderer=True, # on-screen rendering render_camera="frontview", # visualize the "frontview" camera has_offscreen_renderer=False, # no off-screen rendering control_freq=20, # 20 hz control for applied actions horizon=200, # each episode terminates after 200 steps use_object_obs=True, # provide object observations to agent use_camera_obs=False, # don't provide image observations to agent reward_shaping=True) # use a dense reward signal for learning env = wrap_env(env) device = env.device # Instantiate a RandomMemory (without replacement) as experience replay memory memory = RandomMemory(memory_size=25000, num_envs=env.num_envs, device=device, replacement=False) # Instantiate the agent's models (function approximators). # TD3 requires 6 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#spaces-and-models models = {} models["policy"] = DeterministicActor(env.observation_space, env.action_space, device) models["target_policy"] = DeterministicActor(env.observation_space, env.action_space, device) models["critic_1"] = DeterministicCritic(env.observation_space, env.action_space, device) models["critic_2"] = DeterministicCritic(env.observation_space, env.action_space, device) models["target_critic_1"] = DeterministicCritic(env.observation_space, env.action_space, device) models["target_critic_2"] = DeterministicCritic(env.observation_space, env.action_space, device) # Initialize the models' parameters (weights and biases) using a Gaussian distribution for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # Configure and instantiate the agent. # Only modify some of the default configuration, visit its documentation to see all the options # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.td3.html#configuration-and-hyperparameters cfg_agent = TD3_DEFAULT_CONFIG.copy() cfg_agent["exploration"]["noise"] = GaussianNoise(0, 0.1, device=device) cfg_agent["smooth_regularization_noise"] = GaussianNoise(0, 0.2, device=device) cfg_agent["smooth_regularization_clip"] = 0.5 cfg_agent["batch_size"] = 100 cfg_agent["random_timesteps"] = 100 cfg_agent["learning_starts"] = 100 # logging to TensorBoard and write checkpoints each 1000 and 5000 timesteps respectively cfg_agent["experiment"]["write_interval"] = 1000 cfg_agent["experiment"]["checkpoint_interval"] = 5000 agent = TD3(models=models, memory=memory, cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=device) # Configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": False} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
5,854
Python
48.618644
104
0.67629
Toni-SM/skrl/docs/source/examples/isaacsim/torch_isaacsim_cartpole_ppo.py
# Omniverse Isaac Sim tutorial: Creating New RL Environment # https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_new_rl_example.html # instantiate the VecEnvBase and create the task from omni.isaac.gym.vec_env import VecEnvBase # isort: skip env = VecEnvBase(headless=True) from cartpole_task import CartpoleTask # isort: skip task = CartpoleTask(name="Cartpole") env.set_task(task, backend="torch") import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, GaussianMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return torch.tanh(self.net(inputs["states"])), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.Tanh(), nn.Linear(64, 64), nn.Tanh(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the environment env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device, clip_actions=True) models["value"] = Value(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 1000 # memory_size cfg["learning_epochs"] = 20 cfg["mini_batches"] = 1 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 1e-3 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = False cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 0.5 cfg["kl_threshold"] = 0 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 10000 cfg["experiment"]["directory"] = "runs/torch/Cartpole" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 100000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
4,354
Python
35.906779
101
0.673404
Toni-SM/skrl/docs/source/examples/omniisaacgym/jax_ant_mt_ppo.py
""" Notes for Isaac Sim 2022.2.1 or earlier (Python 3.7 environment): * Python 3.7 is only supported up to jax<=0.3.25. See: https://github.com/google/jax/blob/main/CHANGELOG.md#jaxlib-041-dec-13-2022. * Builds for jaxlib<=0.3.25 are only available up to NVIDIA CUDA 11 and cuDNN 8.2 versions. See: https://storage.googleapis.com/jax-releases/jax_cuda_releases.html and search for `cuda11/jaxlib-0.3.25+cuda11.cudnn82-cp37-cp37m-manylinux2014_x86_64.whl`. * The `jax.Device = jax.xla.Device` statement is required by skrl to support jax<0.4.3. * Models require overloading the `__hash__` method to avoid "TypeError: Failed to hash Flax Module". """ import threading import flax.linen as nn import jax import jax.numpy as jnp jax.Device = jax.xla.Device # for Isaac Sim 2022.2.1 or earlier # import the skrl components to build the RL system from skrl import config from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.envs.loaders.jax import load_omniverse_isaacgym_env from skrl.envs.wrappers.jax import wrap_env from skrl.memories.jax import RandomMemory from skrl.models.jax import DeterministicMixin, GaussianMixin, Model from skrl.resources.preprocessors.jax import RunningStandardScaler from skrl.resources.schedulers.jax import KLAdaptiveRL from skrl.trainers.jax import SequentialTrainer from skrl.utils import set_seed config.jax.backend = "jax" # or "numpy" # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define models (stochastic and deterministic models) using mixins class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) def __hash__(self): # for Isaac Sim 2022.2.1 or earlier return id(self) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(128)(x)) x = nn.elu(nn.Dense(64)(x)) x = nn.Dense(self.num_actions)(x) log_std = self.param("log_std", lambda _: jnp.zeros(self.num_actions)) return x, log_std, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) def __hash__(self): # for Isaac Sim 2022.2.1 or earlier return id(self) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.elu(nn.Dense(256)(inputs["states"])) x = nn.elu(nn.Dense(128)(x)) x = nn.elu(nn.Dense(64)(x)) x = nn.Dense(1)(x) return x, {} # load and wrap the multi-threaded Omniverse Isaac Gym environment env = load_omniverse_isaacgym_env(task_name="Ant", multi_threaded=True, timeout=30) env = wrap_env(env) device = env.device # instantiate a memory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#models models = {} models["policy"] = Policy(env.observation_space, env.action_space, device) models["value"] = Value(env.observation_space, env.action_space, device) # instantiate models' state dict for role, model in models.items(): model.init_state_dict(role) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/ppo.html#configuration-and-hyperparameters cfg = PPO_DEFAULT_CONFIG.copy() cfg["rollouts"] = 16 # memory_size cfg["learning_epochs"] = 4 cfg["mini_batches"] = 2 # 16 * 4096 / 32768 cfg["discount_factor"] = 0.99 cfg["lambda"] = 0.95 cfg["learning_rate"] = 3e-4 cfg["learning_rate_scheduler"] = KLAdaptiveRL cfg["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg["random_timesteps"] = 0 cfg["learning_starts"] = 0 cfg["grad_norm_clip"] = 1.0 cfg["ratio_clip"] = 0.2 cfg["value_clip"] = 0.2 cfg["clip_predicted_values"] = True cfg["entropy_loss_scale"] = 0.0 cfg["value_loss_scale"] = 1.0 cfg["kl_threshold"] = 0 cfg["rewards_shaper"] = lambda rewards, timestep, timesteps: rewards * 0.01 cfg["state_preprocessor"] = RunningStandardScaler cfg["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg["value_preprocessor"] = RunningStandardScaler cfg["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 40 cfg["experiment"]["checkpoint_interval"] = 400 cfg["experiment"]["directory"] = "runs/jax/Ant" agent = PPO(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 8000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training in a separate thread threading.Thread(target=trainer.train).start() # run the simulation in the main thread env.run()
5,683
Python
37.405405
102
0.70315
Toni-SM/skrl/docs/source/examples/shimmy/torch_shimmy_atari_pong_dqn.py
import gymnasium as gym import torch import torch.nn as nn # import the skrl components to build the RL system from skrl.agents.torch.dqn import DQN, DQN_DEFAULT_CONFIG from skrl.envs.wrappers.torch import wrap_env from skrl.memories.torch import RandomMemory from skrl.models.torch import DeterministicMixin, Model from skrl.trainers.torch import SequentialTrainer from skrl.utils import set_seed # seed for reproducibility set_seed() # e.g. `set_seed(42)` for fixed seed # define model (deterministic model) using mixin class QNetwork(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # load and wrap the environment env = gym.make("ALE/Pong-v5") env = wrap_env(env) device = env.device # instantiate a memory as experience replay memory = RandomMemory(memory_size=15000, num_envs=env.num_envs, device=device, replacement=False) # instantiate the agent's models (function approximators). # DQN requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#models models = {} models["q_network"] = QNetwork(env.observation_space, env.action_space, device) models["target_q_network"] = QNetwork(env.observation_space, env.action_space, device) # initialize models' parameters (weights and biases) for model in models.values(): model.init_parameters(method_name="normal_", mean=0.0, std=0.1) # configure and instantiate the agent (visit its documentation to see all the options) # https://skrl.readthedocs.io/en/latest/api/agents/dqn.html#configuration-and-hyperparameters cfg = DQN_DEFAULT_CONFIG.copy() cfg["learning_starts"] = 100 cfg["exploration"]["initial_epsilon"] = 1.0 cfg["exploration"]["final_epsilon"] = 0.04 cfg["exploration"]["timesteps"] = 1500 # logging to TensorBoard and write checkpoints (in timesteps) cfg["experiment"]["write_interval"] = 1000 cfg["experiment"]["checkpoint_interval"] = 5000 cfg["experiment"]["directory"] = "runs/torch/ALE_Pong" agent = DQN(models=models, memory=memory, cfg=cfg, observation_space=env.observation_space, action_space=env.action_space, device=device) # configure and instantiate the RL trainer cfg_trainer = {"timesteps": 50000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=[agent]) # start training trainer.train()
2,898
Python
33.511904
97
0.695997
Toni-SM/skrl/docs/source/examples/utils/tensorboard_file_iterator.py
import numpy as np import matplotlib.pyplot as plt from skrl.utils import postprocessing labels = [] rewards = [] # load the Tensorboard files and iterate over them (tag: "Reward / Total reward (mean)") tensorboard_iterator = postprocessing.TensorboardFileIterator("runs/*/events.out.tfevents.*", tags=["Reward / Total reward (mean)"]) for dirname, data in tensorboard_iterator: rewards.append(data["Reward / Total reward (mean)"]) labels.append(dirname) # convert to numpy arrays and compute mean and std rewards = np.array(rewards) mean = np.mean(rewards[:,:,1], axis=0) std = np.std(rewards[:,:,1], axis=0) # creae two subplots (one for each reward and one for the mean) fig, ax = plt.subplots(1, 2, figsize=(15, 5)) # plot the rewards for each experiment for reward, label in zip(rewards, labels): ax[0].plot(reward[:,0], reward[:,1], label=label) ax[0].set_title("Total reward (for each experiment)") ax[0].set_xlabel("Timesteps") ax[0].set_ylabel("Reward") ax[0].grid(True) ax[0].legend() # plot the mean and std (across experiments) ax[1].fill_between(rewards[0,:,0], mean - std, mean + std, alpha=0.5, label="std") ax[1].plot(rewards[0,:,0], mean, label="mean") ax[1].set_title("Total reward (mean and std of all experiments)") ax[1].set_xlabel("Timesteps") ax[1].set_ylabel("Reward") ax[1].grid(True) ax[1].legend() # show and save the figure plt.show() plt.savefig("total_reward.png")
1,480
Python
29.854166
100
0.670946
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_omniverse_isaacgym_env.py
import torch import numpy as np from omniisaacgymenvs.tasks.base.rl_task import RLTask from omniisaacgymenvs.robots.articulations.franka import Franka as Robot from omni.isaac.core.prims import RigidPrimView from omni.isaac.core.articulations import ArticulationView from omni.isaac.core.objects import DynamicSphere from omni.isaac.core.utils.prims import get_prim_at_path from skrl.utils import omniverse_isaacgym_utils # post_physics_step calls # - get_observations() # - get_states() # - calculate_metrics() # - is_done() # - get_extras() TASK_CFG = {"test": False, "device_id": 0, "headless": True, "sim_device": "gpu", "enable_livestream": False, "warp": False, "seed": 42, "task": {"name": "ReachingFranka", "physics_engine": "physx", "env": {"numEnvs": 1024, "envSpacing": 1.5, "episodeLength": 100, "enableDebugVis": False, "clipObservations": 1000.0, "clipActions": 1.0, "controlFrequencyInv": 4, "actionScale": 2.5, "dofVelocityScale": 0.1, "controlSpace": "cartesian"}, "sim": {"dt": 0.0083, # 1 / 120 "use_gpu_pipeline": True, "gravity": [0.0, 0.0, -9.81], "add_ground_plane": True, "use_flatcache": True, "enable_scene_query_support": False, "enable_cameras": False, "default_physics_material": {"static_friction": 1.0, "dynamic_friction": 1.0, "restitution": 0.0}, "physx": {"worker_thread_count": 4, "solver_type": 1, "use_gpu": True, "solver_position_iteration_count": 4, "solver_velocity_iteration_count": 1, "contact_offset": 0.005, "rest_offset": 0.0, "bounce_threshold_velocity": 0.2, "friction_offset_threshold": 0.04, "friction_correlation_distance": 0.025, "enable_sleeping": True, "enable_stabilization": True, "max_depenetration_velocity": 1000.0, "gpu_max_rigid_contact_count": 524288, "gpu_max_rigid_patch_count": 33554432, "gpu_found_lost_pairs_capacity": 524288, "gpu_found_lost_aggregate_pairs_capacity": 262144, "gpu_total_aggregate_pairs_capacity": 1048576, "gpu_max_soft_body_contacts": 1048576, "gpu_max_particle_contacts": 1048576, "gpu_heap_capacity": 33554432, "gpu_temp_buffer_capacity": 16777216, "gpu_max_num_partitions": 8}, "robot": {"override_usd_defaults": False, "fixed_base": False, "enable_self_collisions": False, "enable_gyroscopic_forces": True, "solver_position_iteration_count": 4, "solver_velocity_iteration_count": 1, "sleep_threshold": 0.005, "stabilization_threshold": 0.001, "density": -1, "max_depenetration_velocity": 1000.0, "contact_offset": 0.005, "rest_offset": 0.0}, "target": {"override_usd_defaults": False, "fixed_base": True, "make_kinematic": True, "enable_self_collisions": False, "enable_gyroscopic_forces": True, "solver_position_iteration_count": 4, "solver_velocity_iteration_count": 1, "sleep_threshold": 0.005, "stabilization_threshold": 0.001, "density": -1, "max_depenetration_velocity": 1000.0, "contact_offset": 0.005, "rest_offset": 0.0}}}} class RobotView(ArticulationView): def __init__(self, prim_paths_expr: str, name: str = "robot_view") -> None: super().__init__(prim_paths_expr=prim_paths_expr, name=name, reset_xform_properties=False) class ReachingFrankaTask(RLTask): def __init__(self, name, sim_config, env, offset=None) -> None: self._sim_config = sim_config self._cfg = sim_config.config self._task_cfg = sim_config.task_config self.dt = 1 / 120.0 self._num_envs = self._task_cfg["env"]["numEnvs"] self._env_spacing = self._task_cfg["env"]["envSpacing"] self._action_scale = self._task_cfg["env"]["actionScale"] self._dof_vel_scale = self._task_cfg["env"]["dofVelocityScale"] self._max_episode_length = self._task_cfg["env"]["episodeLength"] self._control_space = self._task_cfg["env"]["controlSpace"] # observation and action space self._num_observations = 18 if self._control_space == "joint": self._num_actions = 7 elif self._control_space == "cartesian": self._num_actions = 3 else: raise ValueError("Invalid control space: {}".format(self._control_space)) self._end_effector_link = "panda_leftfinger" RLTask.__init__(self, name, env) def set_up_scene(self, scene) -> None: self.get_robot() self.get_target() super().set_up_scene(scene) # robot view self._robots = RobotView(prim_paths_expr="/World/envs/.*/robot", name="robot_view") scene.add(self._robots) # end-effectors view self._end_effectors = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/{}".format(self._end_effector_link), name="end_effector_view") scene.add(self._end_effectors) # hands view (cartesian) if self._control_space == "cartesian": self._hands = RigidPrimView(prim_paths_expr="/World/envs/.*/robot/panda_hand", name="hand_view", reset_xform_properties=False) scene.add(self._hands) # target view self._targets = RigidPrimView(prim_paths_expr="/World/envs/.*/target", name="target_view", reset_xform_properties=False) scene.add(self._targets) self.init_data() def get_robot(self): robot = Robot(prim_path=self.default_zero_env_path + "/robot", translation=torch.tensor([0.0, 0.0, 0.0]), orientation=torch.tensor([1.0, 0.0, 0.0, 0.0]), name="robot") self._sim_config.apply_articulation_settings("robot", get_prim_at_path(robot.prim_path), self._sim_config.parse_actor_config("robot")) def get_target(self): target = DynamicSphere(prim_path=self.default_zero_env_path + "/target", name="target", radius=0.025, color=torch.tensor([1, 0, 0])) self._sim_config.apply_articulation_settings("target", get_prim_at_path(target.prim_path), self._sim_config.parse_actor_config("target")) target.set_collision_enabled(False) def init_data(self) -> None: self.robot_default_dof_pos = torch.tensor(np.radians([0, -45, 0, -135, 0, 90, 45, 0, 0]), device=self._device, dtype=torch.float32) self.actions = torch.zeros((self._num_envs, self.num_actions), device=self._device) if self._control_space == "cartesian": self.jacobians = torch.zeros((self._num_envs, 10, 6, 9), device=self._device) self.hand_pos, self.hand_rot = torch.zeros((self._num_envs, 3), device=self._device), torch.zeros((self._num_envs, 4), device=self._device) def get_observations(self) -> dict: robot_dof_pos = self._robots.get_joint_positions(clone=False) robot_dof_vel = self._robots.get_joint_velocities(clone=False) end_effector_pos, end_effector_rot = self._end_effectors.get_world_poses(clone=False) target_pos, target_rot = self._targets.get_world_poses(clone=False) dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) \ / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 dof_vel_scaled = robot_dof_vel * self._dof_vel_scale generalization_noise = torch.rand((dof_vel_scaled.shape[0], 7), device=self._device) + 0.5 self.obs_buf[:, 0] = self.progress_buf / self._max_episode_length self.obs_buf[:, 1:8] = dof_pos_scaled[:, :7] self.obs_buf[:, 8:15] = dof_vel_scaled[:, :7] * generalization_noise self.obs_buf[:, 15:18] = target_pos - self._env_pos # compute distance for calculate_metrics() and is_done() self._computed_distance = torch.norm(end_effector_pos - target_pos, dim=-1) if self._control_space == "cartesian": self.jacobians = self._robots.get_jacobians(clone=False) self.hand_pos, self.hand_rot = self._hands.get_world_poses(clone=False) self.hand_pos -= self._env_pos return {self._robots.name: {"obs_buf": self.obs_buf}} def pre_physics_step(self, actions) -> None: reset_env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(reset_env_ids) > 0: self.reset_idx(reset_env_ids) self.actions = actions.clone().to(self._device) env_ids_int32 = torch.arange(self._robots.count, dtype=torch.int32, device=self._device) if self._control_space == "joint": targets = self.robot_dof_targets[:, :7] + self.robot_dof_speed_scales[:7] * self.dt * self.actions * self._action_scale elif self._control_space == "cartesian": goal_position = self.hand_pos + actions / 100.0 delta_dof_pos = omniverse_isaacgym_utils.ik(jacobian_end_effector=self.jacobians[:, 8 - 1, :, :7], # franka hand index: 8 current_position=self.hand_pos, current_orientation=self.hand_rot, goal_position=goal_position, goal_orientation=None) targets = self.robot_dof_targets[:, :7] + delta_dof_pos self.robot_dof_targets[:, :7] = torch.clamp(targets, self.robot_dof_lower_limits[:7], self.robot_dof_upper_limits[:7]) self.robot_dof_targets[:, 7:] = 0 self._robots.set_joint_position_targets(self.robot_dof_targets, indices=env_ids_int32) def reset_idx(self, env_ids) -> None: indices = env_ids.to(dtype=torch.int32) # reset robot pos = torch.clamp(self.robot_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_robot_dofs), device=self._device) - 0.5), self.robot_dof_lower_limits, self.robot_dof_upper_limits) dof_pos = torch.zeros((len(indices), self._robots.num_dof), device=self._device) dof_pos[:, :] = pos dof_pos[:, 7:] = 0 dof_vel = torch.zeros((len(indices), self._robots.num_dof), device=self._device) self.robot_dof_targets[env_ids, :] = pos self.robot_dof_pos[env_ids, :] = pos self._robots.set_joint_position_targets(self.robot_dof_targets[env_ids], indices=indices) self._robots.set_joint_positions(dof_pos, indices=indices) self._robots.set_joint_velocities(dof_vel, indices=indices) # reset target pos = (torch.rand((len(env_ids), 3), device=self._device) - 0.5) * 2 \ * torch.tensor([0.25, 0.25, 0.10], device=self._device) \ + torch.tensor([0.50, 0.00, 0.20], device=self._device) self._targets.set_world_poses(pos + self._env_pos[env_ids], indices=indices) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def post_reset(self): self.num_robot_dofs = self._robots.num_dof self.robot_dof_pos = torch.zeros((self.num_envs, self.num_robot_dofs), device=self._device) dof_limits = self._robots.get_dof_limits() self.robot_dof_lower_limits = dof_limits[0, :, 0].to(device=self._device) self.robot_dof_upper_limits = dof_limits[0, :, 1].to(device=self._device) self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits) self.robot_dof_targets = torch.zeros((self._num_envs, self.num_robot_dofs), dtype=torch.float, device=self._device) # randomize all envs indices = torch.arange(self._num_envs, dtype=torch.int64, device=self._device) self.reset_idx(indices) def calculate_metrics(self) -> None: self.rew_buf[:] = -self._computed_distance def is_done(self) -> None: self.reset_buf.fill_(0) # target reached self.reset_buf = torch.where(self._computed_distance <= 0.035, torch.ones_like(self.reset_buf), self.reset_buf) # max episode length self.reset_buf = torch.where(self.progress_buf >= self._max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf)
14,470
Python
50.682143
152
0.517554
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_real_skrl_eval.py
import torch import torch.nn as nn # Import the skrl components to build the RL system from skrl.models.torch import Model, GaussianMixin from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.envs.torch import wrap_env # Define only the policy for evaluation class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 64), nn.ELU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} # Load the environment from reaching_franka_real_env import ReachingFranka control_space = "joint" # joint or cartesian motion_type = "waypoint" # waypoint or impedance camera_tracking = False # True for USB-camera tracking env = ReachingFranka(robot_ip="172.16.0.2", device="cpu", control_space=control_space, motion_type=motion_type, camera_tracking=camera_tracking) # wrap the environment env = wrap_env(env) device = env.device # Instantiate the agent's policy. # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models models_ppo = {} models_ppo["policy"] = Policy(env.observation_space, env.action_space, device) # Configure and instantiate the agent. # Only modify some of the default configuration, visit its documentation to see all the options # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters cfg_ppo = PPO_DEFAULT_CONFIG.copy() cfg_ppo["random_timesteps"] = 0 cfg_ppo["learning_starts"] = 0 cfg_ppo["state_preprocessor"] = RunningStandardScaler cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} # logging to TensorBoard each 32 timesteps an ignore checkpoints cfg_ppo["experiment"]["write_interval"] = 32 cfg_ppo["experiment"]["checkpoint_interval"] = 0 agent = PPO(models=models_ppo, memory=None, cfg=cfg_ppo, observation_space=env.observation_space, action_space=env.action_space, device=device) # load checkpoints if control_space == "joint": agent.load("./agent_joint.pt") elif control_space == "cartesian": agent.load("./agent_cartesian.pt") # Configure and instantiate the RL trainer cfg_trainer = {"timesteps": 1000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start evaluation trainer.eval()
3,319
Python
36.30337
102
0.664357
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_real_env.py
import gym import time import threading import numpy as np from packaging import version import frankx class ReachingFranka(gym.Env): def __init__(self, robot_ip="172.16.0.2", device="cuda:0", control_space="joint", motion_type="waypoint", camera_tracking=False): # gym API self._drepecated_api = version.parse(gym.__version__) < version.parse(" 0.25.0") self.device = device self.control_space = control_space # joint or cartesian self.motion_type = motion_type # waypoint or impedance if self.control_space == "cartesian" and self.motion_type == "impedance": # The operation of this mode (Cartesian-impedance) was adjusted later without being able to test it on the real robot. # Dangerous movements may occur for the operator and the robot. # Comment the following line of code if you want to proceed with this mode. raise ValueError("See comment in the code to proceed with this mode") pass # camera tracking (disabled by default) self.camera_tracking = camera_tracking if self.camera_tracking: threading.Thread(target=self._update_target_from_camera).start() # spaces self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32) if self.control_space == "joint": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32) elif self.control_space == "cartesian": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32) else: raise ValueError("Invalid control space:", self.control_space) # init real franka print("Connecting to robot at {}...".format(robot_ip)) self.robot = frankx.Robot(robot_ip) self.robot.set_default_behavior() self.robot.recover_from_errors() # the robot's response can be better managed by independently setting the following properties, for example: # - self.robot.velocity_rel = 0.2 # - self.robot.acceleration_rel = 0.1 # - self.robot.jerk_rel = 0.01 self.robot.set_dynamic_rel(0.25) self.gripper = self.robot.get_gripper() print("Robot connected") self.motion = None self.motion_thread = None self.dt = 1 / 120.0 self.action_scale = 2.5 self.dof_vel_scale = 0.1 self.max_episode_length = 100 self.robot_dof_speed_scales = 1 self.target_pos = np.array([0.65, 0.2, 0.2]) self.robot_default_dof_pos = np.radians([0, -45, 0, -135, 0, 90, 45]) self.robot_dof_lower_limits = np.array([-2.8973, -1.7628, -2.8973, -3.0718, -2.8973, -0.0175, -2.8973]) self.robot_dof_upper_limits = np.array([ 2.8973, 1.7628, 2.8973, -0.0698, 2.8973, 3.7525, 2.8973]) self.progress_buf = 1 self.obs_buf = np.zeros((18,), dtype=np.float32) def _update_target_from_camera(self): pixel_to_meter = 1.11 / 375 # m/px: adjust for custom cases import cv2 cap = cv2.VideoCapture(0) while cap.isOpened(): ret, frame = cap.read() if not ret: break # convert to HSV and remove noise hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) hsv = cv2.medianBlur(hsv, 15) # color matching in HSV mask = cv2.inRange(hsv, np.array([80, 100, 100]), np.array([100, 255, 255])) M = cv2.moments(mask) if M["m00"]: x = M["m10"] / M["m00"] y = M["m01"] / M["m00"] # real-world position (fixed z to 0.2 meters) pos = np.array([pixel_to_meter * (y - 185), pixel_to_meter * (x - 320), 0.2]) if self is not None: self.target_pos = pos # draw target frame = cv2.circle(frame, (int(x), int(y)), 30, (0,0,255), 2) frame = cv2.putText(frame, str(np.round(pos, 4).tolist()), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 1, cv2.LINE_AA) # show images cv2.imshow("frame", frame) cv2.imshow("mask", mask) k = cv2.waitKey(1) & 0xFF if k == ord('q'): cap.release() def _get_observation_reward_done(self): # get robot state try: robot_state = self.robot.get_state(read_once=True) except frankx.InvalidOperationException: robot_state = self.robot.get_state(read_once=False) # observation robot_dof_pos = np.array(robot_state.q) robot_dof_vel = np.array(robot_state.dq) end_effector_pos = np.array(robot_state.O_T_EE[-4:-1]) dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 dof_vel_scaled = robot_dof_vel * self.dof_vel_scale self.obs_buf[0] = self.progress_buf / float(self.max_episode_length) self.obs_buf[1:8] = dof_pos_scaled self.obs_buf[8:15] = dof_vel_scaled self.obs_buf[15:18] = self.target_pos # reward distance = np.linalg.norm(end_effector_pos - self.target_pos) reward = -distance # done done = self.progress_buf >= self.max_episode_length - 1 done = done or distance <= 0.075 print("Distance:", distance) if done: print("Target or Maximum episode length reached") time.sleep(1) return self.obs_buf, reward, done def reset(self): print("Reseting...") # end current motion if self.motion is not None: self.motion.finish() self.motion_thread.join() self.motion = None self.motion_thread = None # open/close gripper # self.gripper.open() # self.gripper.clamp() # go to 1) safe position, 2) random position self.robot.move(frankx.JointMotion(self.robot_default_dof_pos.tolist())) dof_pos = self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5) self.robot.move(frankx.JointMotion(dof_pos.tolist())) # get target position from prompt if not self.camera_tracking: while True: try: print("Enter target position (X, Y, Z) in meters") raw = input("or press [Enter] key for a random target position: ") if raw: self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')]) else: noise = (2 * np.random.rand(3) - 1) * np.array([0.25, 0.25, 0.10]) self.target_pos = np.array([0.5, 0.0, 0.2]) + noise print("Target position:", self.target_pos) break except ValueError: print("Invalid input. Try something like: 0.65, 0.0, 0.2") # initial pose affine = frankx.Affine(frankx.Kinematics.forward(dof_pos.tolist())) affine = affine * frankx.Affine(x=0, y=0, z=-0.10335, a=np.pi/2) # motion type if self.motion_type == "waypoint": self.motion = frankx.WaypointMotion([frankx.Waypoint(affine)], return_when_finished=False) elif self.motion_type == "impedance": self.motion = frankx.ImpedanceMotion(500, 50) else: raise ValueError("Invalid motion type:", self.motion_type) self.motion_thread = self.robot.move_async(self.motion) if self.motion_type == "impedance": self.motion.target = affine input("Press [Enter] to continue") self.progress_buf = 0 observation, reward, done = self._get_observation_reward_done() if self._drepecated_api: return observation else: return observation, {} def step(self, action): self.progress_buf += 1 # control space # joint if self.control_space == "joint": # get robot state try: robot_state = self.robot.get_state(read_once=True) except frankx.InvalidOperationException: robot_state = self.robot.get_state(read_once=False) # forward kinematics dof_pos = np.array(robot_state.q) + (self.robot_dof_speed_scales * self.dt * action * self.action_scale) affine = frankx.Affine(self.robot.forward_kinematics(dof_pos.flatten().tolist())) affine = affine * frankx.Affine(x=0, y=0, z=-0.10335, a=np.pi/2) # cartesian elif self.control_space == "cartesian": action /= 100.0 if self.motion_type == "waypoint": affine = frankx.Affine(x=action[0], y=action[1], z=action[2]) elif self.motion_type == "impedance": # get robot pose try: robot_pose = self.robot.current_pose(read_once=True) except frankx.InvalidOperationException: robot_pose = self.robot.current_pose(read_once=False) affine = robot_pose * frankx.Affine(x=action[0], y=action[1], z=action[2]) # motion type # waypoint motion if self.motion_type == "waypoint": if self.control_space == "joint": self.motion.set_next_waypoint(frankx.Waypoint(affine)) elif self.control_space == "cartesian": self.motion.set_next_waypoint(frankx.Waypoint(affine, frankx.Waypoint.Relative)) # impedance motion elif self.motion_type == "impedance": self.motion.target = affine else: raise ValueError("Invalid motion type:", self.motion_type) # the use of time.sleep is for simplicity. This does not guarantee control at a specific frequency time.sleep(0.1) # lower frequency, at 30Hz there are discontinuities observation, reward, done = self._get_observation_reward_done() if self._drepecated_api: return observation, reward, done, {} else: return observation, reward, done, done, {} def render(self, *args, **kwargs): pass def close(self): pass
10,370
Python
38.888461
144
0.568274
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_isaacgym_env.py
import os import numpy as np import torch from isaacgym import gymtorch, gymapi # isaacgymenvs (VecTask class) import sys import isaacgymenvs sys.path.append(list(isaacgymenvs.__path__)[0]) from tasks.base.vec_task import VecTask from skrl.utils import isaacgym_utils TASK_CFG = {"name": "ReachingFranka", "physics_engine": "physx", "rl_device": "cuda:0", "sim_device": "cuda:0", "graphics_device_id": 0, "headless": False, "virtual_screen_capture": False, "force_render": True, "env": {"numEnvs": 1024, "envSpacing": 1.5, "episodeLength": 100, "enableDebugVis": False, "clipObservations": 1000.0, "clipActions": 1.0, "controlFrequencyInv": 4, "actionScale": 2.5, "dofVelocityScale": 0.1, "controlSpace": "cartesian", "enableCameraSensors": False}, "sim": {"dt": 0.0083, # 1 / 120 "substeps": 1, "up_axis": "z", "use_gpu_pipeline": True, "gravity": [0.0, 0.0, -9.81], "physx": {"num_threads": 4, "solver_type": 1, "use_gpu": True, "num_position_iterations": 4, "num_velocity_iterations": 1, "contact_offset": 0.005, "rest_offset": 0.0, "bounce_threshold_velocity": 0.2, "max_depenetration_velocity": 1000.0, "default_buffer_size_multiplier": 5.0, "max_gpu_contact_pairs": 1048576, "num_subscenes": 4, "contact_collection": 0}}, "task": {"randomize": False}} class ReachingFrankaTask(VecTask): def __init__(self, cfg): self.cfg = cfg rl_device = cfg["rl_device"] sim_device = cfg["sim_device"] graphics_device_id = cfg["graphics_device_id"] headless = cfg["headless"] virtual_screen_capture = cfg["virtual_screen_capture"] force_render = cfg["force_render"] self.dt = 1 / 120.0 self._action_scale = self.cfg["env"]["actionScale"] self._dof_vel_scale = self.cfg["env"]["dofVelocityScale"] self._control_space = self.cfg["env"]["controlSpace"] self.max_episode_length = self.cfg["env"]["episodeLength"] # name required for VecTask self.debug_viz = self.cfg["env"]["enableDebugVis"] # observation and action space self.cfg["env"]["numObservations"] = 18 if self._control_space == "joint": self.cfg["env"]["numActions"] = 7 elif self._control_space == "cartesian": self.cfg["env"]["numActions"] = 3 else: raise ValueError("Invalid control space: {}".format(self._control_space)) self._end_effector_link = "panda_leftfinger" # setup VecTask super().__init__(config=self.cfg, rl_device=rl_device, sim_device=sim_device, graphics_device_id=graphics_device_id, headless=headless, virtual_screen_capture=virtual_screen_capture, force_render=force_render) # tensors and views: DOFs, roots, rigid bodies dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) rigid_body_state_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.root_state = gymtorch.wrap_tensor(root_state_tensor) self.rigid_body_state = gymtorch.wrap_tensor(rigid_body_state_tensor) self.dof_pos = self.dof_state.view(self.num_envs, -1, 2)[..., 0] self.dof_vel = self.dof_state.view(self.num_envs, -1, 2)[..., 1] self.root_pos = self.root_state[:, 0:3].view(self.num_envs, -1, 3) self.root_rot = self.root_state[:, 3:7].view(self.num_envs, -1, 4) self.root_vel_lin = self.root_state[:, 7:10].view(self.num_envs, -1, 3) self.root_vel_ang = self.root_state[:, 10:13].view(self.num_envs, -1, 3) self.rigid_body_pos = self.rigid_body_state[:, 0:3].view(self.num_envs, -1, 3) self.rigid_body_rot = self.rigid_body_state[:, 3:7].view(self.num_envs, -1, 4) self.rigid_body_vel_lin = self.rigid_body_state[:, 7:10].view(self.num_envs, -1, 3) self.rigid_body_vel_ang = self.rigid_body_state[:, 10:13].view(self.num_envs, -1, 3) # tensors and views: jacobian if self._control_space == "cartesian": jacobian_tensor = self.gym.acquire_jacobian_tensor(self.sim, "robot") self.jacobian = gymtorch.wrap_tensor(jacobian_tensor) self.jacobian_end_effector = self.jacobian[:, self.rigid_body_dict_robot[self._end_effector_link] - 1, :, :7] self.reset_idx(torch.arange(self.num_envs, device=self.device)) def create_sim(self): self.sim_params.up_axis = gymapi.UP_AXIS_Z self.sim_params.gravity.x = 0 self.sim_params.gravity.y = 0 self.sim_params.gravity.z = -9.81 self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]["envSpacing"], int(np.sqrt(self.num_envs))) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = os.path.join(os.path.dirname(os.path.abspath(isaacgymenvs.__file__)), "../assets") robot_asset_file = "urdf/franka_description/robots/franka_panda.urdf" # robot asset asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = True asset_options.fix_base_link = True asset_options.collapse_fixed_joints = True asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS asset_options.use_mesh_materials = True robot_asset = self.gym.load_asset(self.sim, asset_root, robot_asset_file, asset_options) # target asset asset_options = gymapi.AssetOptions() asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = True asset_options.thickness = 0.001 asset_options.use_mesh_materials = True target_asset = self.gym.create_sphere(self.sim, 0.025, asset_options) robot_dof_stiffness = torch.tensor([400, 400, 400, 400, 400, 400, 400, 1.0e6, 1.0e6], dtype=torch.float32, device=self.device) robot_dof_damping = torch.tensor([80, 80, 80, 80, 80, 80, 80, 1.0e2, 1.0e2], dtype=torch.float, device=self.device) # set robot dof properties robot_dof_props = self.gym.get_asset_dof_properties(robot_asset) self.robot_dof_lower_limits = [] self.robot_dof_upper_limits = [] for i in range(9): robot_dof_props["driveMode"][i] = gymapi.DOF_MODE_POS if self.physics_engine == gymapi.SIM_PHYSX: robot_dof_props["stiffness"][i] = robot_dof_stiffness[i] robot_dof_props["damping"][i] = robot_dof_damping[i] else: robot_dof_props["stiffness"][i] = 7000.0 robot_dof_props["damping"][i] = 50.0 self.robot_dof_lower_limits.append(robot_dof_props["lower"][i]) self.robot_dof_upper_limits.append(robot_dof_props["upper"][i]) self.robot_dof_lower_limits = torch.tensor(self.robot_dof_lower_limits, device=self.device) self.robot_dof_upper_limits = torch.tensor(self.robot_dof_upper_limits, device=self.device) self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits) robot_dof_props["effort"][7] = 200 robot_dof_props["effort"][8] = 200 self.handle_targets = [] self.handle_robots = [] self.handle_envs = [] indexes_sim_robot = [] indexes_sim_target = [] for i in range(self.num_envs): # create env instance env_ptr = self.gym.create_env(self.sim, lower, upper, num_per_row) # create robot instance pose = gymapi.Transform() pose.p = gymapi.Vec3(0.0, 0.0, 0.0) pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1) robot_actor = self.gym.create_actor(env=env_ptr, asset=robot_asset, pose=pose, name="robot", group=i, # collision group filter=1, # mask off collision segmentationId=0) self.gym.set_actor_dof_properties(env_ptr, robot_actor, robot_dof_props) indexes_sim_robot.append(self.gym.get_actor_index(env_ptr, robot_actor, gymapi.DOMAIN_SIM)) # create target instance pose = gymapi.Transform() pose.p = gymapi.Vec3(0.5, 0.0, 0.2) pose.r = gymapi.Quat(0.0, 0.0, 0.0, 1) target_actor = self.gym.create_actor(env=env_ptr, asset=target_asset, pose=pose, name="target", group=i + 1, # collision group filter=1, # mask off collision segmentationId=1) indexes_sim_target.append(self.gym.get_actor_index(env_ptr, target_actor, gymapi.DOMAIN_SIM)) self.gym.set_rigid_body_color(env_ptr, target_actor, 0, gymapi.MESH_VISUAL, gymapi.Vec3(1., 0., 0.)) self.handle_envs.append(env_ptr) self.handle_robots.append(robot_actor) self.handle_targets.append(target_actor) self.indexes_sim_robot = torch.tensor(indexes_sim_robot, dtype=torch.int32, device=self.device) self.indexes_sim_target = torch.tensor(indexes_sim_target, dtype=torch.int32, device=self.device) self.num_robot_dofs = self.gym.get_asset_dof_count(robot_asset) self.rigid_body_dict_robot = self.gym.get_asset_rigid_body_dict(robot_asset) self.init_data() def init_data(self): self.robot_default_dof_pos = torch.tensor(np.radians([0, -45, 0, -135, 0, 90, 45, 0, 0]), device=self.device, dtype=torch.float32) self.robot_dof_targets = torch.zeros((self.num_envs, self.num_robot_dofs), device=self.device, dtype=torch.float32) if self._control_space == "cartesian": self.end_effector_pos = torch.zeros((self.num_envs, 3), device=self.device) self.end_effector_rot = torch.zeros((self.num_envs, 4), device=self.device) def compute_reward(self): self.rew_buf[:] = -self._computed_distance self.reset_buf.fill_(0) # target reached self.reset_buf = torch.where(self._computed_distance <= 0.035, torch.ones_like(self.reset_buf), self.reset_buf) # max episode length self.reset_buf = torch.where(self.progress_buf >= self.max_episode_length - 1, torch.ones_like(self.reset_buf), self.reset_buf) # double restart correction (why?, is it necessary?) self.rew_buf = torch.where(self.progress_buf == 0, -0.75 * torch.ones_like(self.reset_buf), self.rew_buf) self.reset_buf = torch.where(self.progress_buf == 0, torch.zeros_like(self.reset_buf), self.reset_buf) def compute_observations(self): self.gym.refresh_dof_state_tensor(self.sim) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) if self._control_space == "cartesian": self.gym.refresh_jacobian_tensors(self.sim) robot_dof_pos = self.dof_pos robot_dof_vel = self.dof_vel self.end_effector_pos = self.rigid_body_pos[:, self.rigid_body_dict_robot[self._end_effector_link]] self.end_effector_rot = self.rigid_body_rot[:, self.rigid_body_dict_robot[self._end_effector_link]] target_pos = self.root_pos[:, 1] target_rot = self.root_rot[:, 1] dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) \ / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 dof_vel_scaled = robot_dof_vel * self._dof_vel_scale generalization_noise = torch.rand((dof_vel_scaled.shape[0], 7), device=self.device) + 0.5 self.obs_buf[:, 0] = self.progress_buf / self.max_episode_length self.obs_buf[:, 1:8] = dof_pos_scaled[:, :7] self.obs_buf[:, 8:15] = dof_vel_scaled[:, :7] * generalization_noise self.obs_buf[:, 15:18] = target_pos # compute distance for compute_reward() self._computed_distance = torch.norm(self.end_effector_pos - target_pos, dim=-1) def reset_idx(self, env_ids): # reset robot pos = torch.clamp(self.robot_default_dof_pos.unsqueeze(0) + 0.25 * (torch.rand((len(env_ids), self.num_robot_dofs), device=self.device) - 0.5), self.robot_dof_lower_limits, self.robot_dof_upper_limits) pos[:, 7:] = 0 self.robot_dof_targets[env_ids, :] = pos[:] self.dof_pos[env_ids, :] = pos[:] self.dof_vel[env_ids, :] = 0 indexes = self.indexes_sim_robot[env_ids] self.gym.set_dof_position_target_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.robot_dof_targets), gymtorch.unwrap_tensor(indexes), len(env_ids)) self.gym.set_dof_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.dof_state), gymtorch.unwrap_tensor(indexes), len(env_ids)) # reset targets pos = (torch.rand((len(env_ids), 3), device=self.device) - 0.5) * 2 pos[:, 0] = 0.50 + pos[:, 0] * 0.25 pos[:, 1] = 0.00 + pos[:, 1] * 0.25 pos[:, 2] = 0.20 + pos[:, 2] * 0.10 self.root_pos[env_ids, 1, :] = pos[:] indexes = self.indexes_sim_target[env_ids] self.gym.set_actor_root_state_tensor_indexed(self.sim, gymtorch.unwrap_tensor(self.root_state), gymtorch.unwrap_tensor(indexes), len(env_ids)) # bookkeeping self.reset_buf[env_ids] = 0 self.progress_buf[env_ids] = 0 def pre_physics_step(self, actions): actions = actions.clone().to(self.device) if self._control_space == "joint": targets = self.robot_dof_targets[:, :7] + self.robot_dof_speed_scales[:7] * self.dt * actions * self._action_scale elif self._control_space == "cartesian": goal_position = self.end_effector_pos + actions / 100.0 delta_dof_pos = isaacgym_utils.ik(jacobian_end_effector=self.jacobian_end_effector, current_position=self.end_effector_pos, current_orientation=self.end_effector_rot, goal_position=goal_position, goal_orientation=None) targets = self.robot_dof_targets[:, :7] + delta_dof_pos self.robot_dof_targets[:, :7] = torch.clamp(targets, self.robot_dof_lower_limits[:7], self.robot_dof_upper_limits[:7]) self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.robot_dof_targets)) def post_physics_step(self): self.progress_buf += 1 env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1) if len(env_ids) > 0: self.reset_idx(env_ids) self.compute_observations() self.compute_reward()
17,190
Python
46.09863
151
0.553054
Toni-SM/skrl/docs/source/examples/real_world/franka_emika_panda/reaching_franka_omniverse_isaacgym_skrl_train.py
import torch import torch.nn as nn # Import the skrl components to build the RL system from skrl.models.torch import Model, GaussianMixin, DeterministicMixin from skrl.memories.torch import RandomMemory from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG from skrl.resources.schedulers.torch import KLAdaptiveRL from skrl.resources.preprocessors.torch import RunningStandardScaler from skrl.trainers.torch import SequentialTrainer from skrl.utils.omniverse_isaacgym_utils import get_env_instance from skrl.envs.torch import wrap_env from skrl.utils import set_seed # Seed for reproducibility seed = set_seed() # e.g. `set_seed(42)` for fixed seed # Define the models (stochastic and deterministic models) for the agent using helper mixin. # - Policy: takes as input the environment's observation/state and returns an action # - Value: takes the state as input and provides a value to guide the policy class Policy(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 64), nn.ELU(), nn.Linear(64, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} class Value(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 256), nn.ELU(), nn.Linear(256, 128), nn.ELU(), nn.Linear(128, 64), nn.ELU(), nn.Linear(64, 1)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # instance VecEnvBase and setup task headless = True # set headless to False for rendering env = get_env_instance(headless=headless) from omniisaacgymenvs.utils.config_utils.sim_config import SimConfig from reaching_franka_omniverse_isaacgym_env import ReachingFrankaTask, TASK_CFG TASK_CFG["seed"] = seed TASK_CFG["headless"] = headless TASK_CFG["task"]["env"]["numEnvs"] = 1024 TASK_CFG["task"]["env"]["controlSpace"] = "joint" # "joint" or "cartesian" sim_config = SimConfig(TASK_CFG) task = ReachingFrankaTask(name="ReachingFranka", sim_config=sim_config, env=env) env.set_task(task=task, sim_params=sim_config.get_physics_params(), backend="torch", init_sim=True) # wrap the environment env = wrap_env(env, "omniverse-isaacgym") device = env.device # Instantiate a RandomMemory as rollout buffer (any memory can be used for this) memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device) # Instantiate the agent's models (function approximators). # PPO requires 2 models, visit its documentation for more details # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#spaces-and-models models_ppo = {} models_ppo["policy"] = Policy(env.observation_space, env.action_space, device) models_ppo["value"] = Value(env.observation_space, env.action_space, device) # Configure and instantiate the agent. # Only modify some of the default configuration, visit its documentation to see all the options # https://skrl.readthedocs.io/en/latest/modules/skrl.agents.ppo.html#configuration-and-hyperparameters cfg_ppo = PPO_DEFAULT_CONFIG.copy() cfg_ppo["rollouts"] = 16 cfg_ppo["learning_epochs"] = 8 cfg_ppo["mini_batches"] = 8 cfg_ppo["discount_factor"] = 0.99 cfg_ppo["lambda"] = 0.95 cfg_ppo["learning_rate"] = 5e-4 cfg_ppo["learning_rate_scheduler"] = KLAdaptiveRL cfg_ppo["learning_rate_scheduler_kwargs"] = {"kl_threshold": 0.008} cfg_ppo["random_timesteps"] = 0 cfg_ppo["learning_starts"] = 0 cfg_ppo["grad_norm_clip"] = 1.0 cfg_ppo["ratio_clip"] = 0.2 cfg_ppo["value_clip"] = 0.2 cfg_ppo["clip_predicted_values"] = True cfg_ppo["entropy_loss_scale"] = 0.0 cfg_ppo["value_loss_scale"] = 2.0 cfg_ppo["kl_threshold"] = 0 cfg_ppo["state_preprocessor"] = RunningStandardScaler cfg_ppo["state_preprocessor_kwargs"] = {"size": env.observation_space, "device": device} cfg_ppo["value_preprocessor"] = RunningStandardScaler cfg_ppo["value_preprocessor_kwargs"] = {"size": 1, "device": device} # logging to TensorBoard and write checkpoints each 32 and 250 timesteps respectively cfg_ppo["experiment"]["write_interval"] = 32 cfg_ppo["experiment"]["checkpoint_interval"] = 250 agent = PPO(models=models_ppo, memory=memory, cfg=cfg_ppo, observation_space=env.observation_space, action_space=env.action_space, device=device) # Configure and instantiate the RL trainer cfg_trainer = {"timesteps": 5000, "headless": True} trainer = SequentialTrainer(cfg=cfg_trainer, env=env, agents=agent) # start training trainer.train()
5,539
Python
40.037037
102
0.67449
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_env.py
import time import numpy as np import gymnasium as gym import libiiwa class ReachingIiwa(gym.Env): def __init__(self, control_space="joint"): self.control_space = control_space # joint or cartesian # spaces self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32) if self.control_space == "joint": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32) elif self.control_space == "cartesian": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32) else: raise ValueError("Invalid control space:", self.control_space) # init iiwa print("Connecting to robot...") self.robot = libiiwa.LibIiwa() self.robot.set_control_interface(libiiwa.ControlInterface.CONTROL_INTERFACE_SERVO) self.robot.set_desired_joint_velocity_rel(0.5) self.robot.set_desired_joint_acceleration_rel(0.5) self.robot.set_desired_joint_jerk_rel(0.5) self.robot.set_desired_cartesian_velocity(10) self.robot.set_desired_cartesian_acceleration(10) self.robot.set_desired_cartesian_jerk(10) print("Robot connected") self.motion = None self.motion_thread = None self.dt = 1 / 120.0 self.action_scale = 2.5 self.dof_vel_scale = 0.1 self.max_episode_length = 100 self.robot_dof_speed_scales = 1 self.target_pos = np.array([0.65, 0.2, 0.2]) self.robot_default_dof_pos = np.radians([0, 0, 0, -90, 0, 90, 0]) self.robot_dof_lower_limits = np.array([-2.9671, -2.0944, -2.9671, -2.0944, -2.9671, -2.0944, -3.0543]) self.robot_dof_upper_limits = np.array([ 2.9671, 2.0944, 2.9671, 2.0944, 2.9671, 2.0944, 3.0543]) self.progress_buf = 1 self.obs_buf = np.zeros((18,), dtype=np.float32) def _get_observation_reward_done(self): # get robot state robot_state = self.robot.get_state(refresh=True) # observation robot_dof_pos = robot_state["joint_position"] robot_dof_vel = robot_state["joint_velocity"] end_effector_pos = robot_state["cartesian_position"] dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 dof_vel_scaled = robot_dof_vel * self.dof_vel_scale self.obs_buf[0] = self.progress_buf / float(self.max_episode_length) self.obs_buf[1:8] = dof_pos_scaled self.obs_buf[8:15] = dof_vel_scaled self.obs_buf[15:18] = self.target_pos # reward distance = np.linalg.norm(end_effector_pos - self.target_pos) reward = -distance # done done = self.progress_buf >= self.max_episode_length - 1 done = done or distance <= 0.075 print("Distance:", distance) if done: print("Target or Maximum episode length reached") time.sleep(1) return self.obs_buf, reward, done def reset(self): print("Reseting...") # go to 1) safe position, 2) random position self.robot.command_joint_position(self.robot_default_dof_pos) time.sleep(3) dof_pos = self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5) self.robot.command_joint_position(dof_pos) time.sleep(1) # get target position from prompt while True: try: print("Enter target position (X, Y, Z) in meters") raw = input("or press [Enter] key for a random target position: ") if raw: self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')]) else: noise = (2 * np.random.rand(3) - 1) * np.array([0.1, 0.2, 0.2]) self.target_pos = np.array([0.6, 0.0, 0.4]) + noise print("Target position:", self.target_pos) break except ValueError: print("Invalid input. Try something like: 0.65, 0.0, 0.4") input("Press [Enter] to continue") self.progress_buf = 0 observation, reward, done = self._get_observation_reward_done() return observation, {} def step(self, action): self.progress_buf += 1 # get robot state robot_state = self.robot.get_state(refresh=True) # control space # joint if self.control_space == "joint": dof_pos = robot_state["joint_position"] + (self.robot_dof_speed_scales * self.dt * action * self.action_scale) self.robot.command_joint_position(dof_pos) # cartesian elif self.control_space == "cartesian": end_effector_pos = robot_state["cartesian_position"] + action / 100.0 self.robot.command_cartesian_pose(end_effector_pos) # the use of time.sleep is for simplicity. It does not guarantee control at a specific frequency time.sleep(1 / 30.0) observation, reward, terminated = self._get_observation_reward_done() return observation, reward, terminated, False, {} def render(self, *args, **kwargs): pass def close(self): pass
5,314
Python
35.404109
144
0.589198
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_ros2_env.py
import time import numpy as np import gymnasium as gym import rclpy from rclpy.node import Node from rclpy.qos import QoSPresetProfiles import sensor_msgs.msg import geometry_msgs.msg import libiiwa_msgs.srv class ReachingIiwa(gym.Env): def __init__(self, control_space="joint"): self.control_space = control_space # joint or cartesian # spaces self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32) if self.control_space == "joint": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32) elif self.control_space == "cartesian": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32) else: raise ValueError("Invalid control space:", self.control_space) # initialize the ROS node rclpy.init() self.node = Node(self.__class__.__name__) import threading threading.Thread(target=self._spin).start() # create publishers self.pub_command_joint = self.node.create_publisher(sensor_msgs.msg.JointState, '/iiwa/command/joint', QoSPresetProfiles.SYSTEM_DEFAULT.value) self.pub_command_cartesian = self.node.create_publisher(geometry_msgs.msg.Pose, '/iiwa/command/cartesian', QoSPresetProfiles.SYSTEM_DEFAULT.value) # keep compatibility with libiiwa Python API self.robot_state = {"joint_position": np.zeros((7,)), "joint_velocity": np.zeros((7,)), "cartesian_position": np.zeros((3,))} # create subscribers self.node.create_subscription(msg_type=sensor_msgs.msg.JointState, topic='/iiwa/state/joint_states', callback=self._callback_joint_states, qos_profile=QoSPresetProfiles.SYSTEM_DEFAULT.value) self.node.create_subscription(msg_type=geometry_msgs.msg.Pose, topic='/iiwa/state/end_effector_pose', callback=self._callback_end_effector_pose, qos_profile=QoSPresetProfiles.SYSTEM_DEFAULT.value) # service clients client_control_interface = self.node.create_client(libiiwa_msgs.srv.SetString, '/iiwa/set_control_interface') client_control_interface.wait_for_service() request = libiiwa_msgs.srv.SetString.Request() request.data = "SERVO" # or "servo" client_control_interface.call(request) client_joint_velocity_rel = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_joint_velocity_rel') client_joint_acceleration_rel = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_joint_acceleration_rel') client_joint_jerk_rel = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_joint_jerk_rel') client_cartesian_velocity = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_cartesian_velocity') client_cartesian_acceleration = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_cartesian_acceleration') client_cartesian_jerk = self.node.create_client(libiiwa_msgs.srv.SetNumber, '/iiwa/set_desired_cartesian_jerk') client_joint_velocity_rel.wait_for_service() client_joint_acceleration_rel.wait_for_service() client_joint_jerk_rel.wait_for_service() client_cartesian_velocity.wait_for_service() client_cartesian_acceleration.wait_for_service() client_cartesian_jerk.wait_for_service() request = libiiwa_msgs.srv.SetNumber.Request() request.data = 0.5 client_joint_velocity_rel.call(request) client_joint_acceleration_rel.call(request) client_joint_jerk_rel.call(request) request.data = 10.0 client_cartesian_velocity.call(request) client_cartesian_acceleration.call(request) client_cartesian_jerk.call(request) print("Robot connected") self.motion = None self.motion_thread = None self.dt = 1 / 120.0 self.action_scale = 2.5 self.dof_vel_scale = 0.1 self.max_episode_length = 100 self.robot_dof_speed_scales = 1 self.target_pos = np.array([0.65, 0.2, 0.2]) self.robot_default_dof_pos = np.radians([0, 0, 0, -90, 0, 90, 0]) self.robot_dof_lower_limits = np.array([-2.9671, -2.0944, -2.9671, -2.0944, -2.9671, -2.0944, -3.0543]) self.robot_dof_upper_limits = np.array([ 2.9671, 2.0944, 2.9671, 2.0944, 2.9671, 2.0944, 3.0543]) self.progress_buf = 1 self.obs_buf = np.zeros((18,), dtype=np.float32) def _spin(self): rclpy.spin(self.node) def _callback_joint_states(self, msg): self.robot_state["joint_position"] = np.array(msg.position) self.robot_state["joint_velocity"] = np.array(msg.velocity) def _callback_end_effector_pose(self, msg): positon = msg.position self.robot_state["cartesian_position"] = np.array([positon.x, positon.y, positon.z]) def _get_observation_reward_done(self): # observation robot_dof_pos = self.robot_state["joint_position"] robot_dof_vel = self.robot_state["joint_velocity"] end_effector_pos = self.robot_state["cartesian_position"] dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 dof_vel_scaled = robot_dof_vel * self.dof_vel_scale self.obs_buf[0] = self.progress_buf / float(self.max_episode_length) self.obs_buf[1:8] = dof_pos_scaled self.obs_buf[8:15] = dof_vel_scaled self.obs_buf[15:18] = self.target_pos # reward distance = np.linalg.norm(end_effector_pos - self.target_pos) reward = -distance # done done = self.progress_buf >= self.max_episode_length - 1 done = done or distance <= 0.075 print("Distance:", distance) if done: print("Target or Maximum episode length reached") time.sleep(1) return self.obs_buf, reward, done def reset(self): print("Reseting...") # go to 1) safe position, 2) random position msg = sensor_msgs.msg.JointState() msg.position = self.robot_default_dof_pos.tolist() self.pub_command_joint.publish(msg) time.sleep(3) msg.position = (self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5)).tolist() self.pub_command_joint.publish(msg) time.sleep(1) # get target position from prompt while True: try: print("Enter target position (X, Y, Z) in meters") raw = input("or press [Enter] key for a random target position: ") if raw: self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')]) else: noise = (2 * np.random.rand(3) - 1) * np.array([0.1, 0.2, 0.2]) self.target_pos = np.array([0.6, 0.0, 0.4]) + noise print("Target position:", self.target_pos) break except ValueError: print("Invalid input. Try something like: 0.65, 0.0, 0.4") input("Press [Enter] to continue") self.progress_buf = 0 observation, reward, done = self._get_observation_reward_done() return observation, {} def step(self, action): self.progress_buf += 1 # control space # joint if self.control_space == "joint": joint_positions = self.robot_state["joint_position"] + (self.robot_dof_speed_scales * self.dt * action * self.action_scale) msg = sensor_msgs.msg.JointState() msg.position = joint_positions.tolist() self.pub_command_joint.publish(msg) # cartesian elif self.control_space == "cartesian": end_effector_pos = self.robot_state["cartesian_position"] + action / 100.0 msg = geometry_msgs.msg.Pose() msg.position.x = end_effector_pos[0] msg.position.y = end_effector_pos[1] msg.position.z = end_effector_pos[2] msg.orientation.x = np.nan msg.orientation.y = np.nan msg.orientation.z = np.nan msg.orientation.w = np.nan self.pub_command_cartesian.publish(msg) # the use of time.sleep is for simplicity. It does not guarantee control at a specific frequency time.sleep(1 / 30.0) observation, reward, terminated = self._get_observation_reward_done() return observation, reward, terminated, False, {} def render(self, *args, **kwargs): pass def close(self): # shutdown the node self.node.destroy_node() rclpy.shutdown()
9,047
Python
40.315068
154
0.607605
Toni-SM/skrl/docs/source/examples/real_world/kuka_lbr_iiwa/reaching_iiwa_real_ros_env.py
import time import numpy as np import gymnasium as gym import rospy import sensor_msgs.msg import geometry_msgs.msg import libiiwa_msgs.srv class ReachingIiwa(gym.Env): def __init__(self, control_space="joint"): self.control_space = control_space # joint or cartesian # spaces self.observation_space = gym.spaces.Box(low=-1000, high=1000, shape=(18,), dtype=np.float32) if self.control_space == "joint": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(7,), dtype=np.float32) elif self.control_space == "cartesian": self.action_space = gym.spaces.Box(low=-1, high=1, shape=(3,), dtype=np.float32) else: raise ValueError("Invalid control space:", self.control_space) # create publishers self.pub_command_joint = rospy.Publisher('/iiwa/command/joint', sensor_msgs.msg.JointState, queue_size=1) self.pub_command_cartesian = rospy.Publisher('/iiwa/command/cartesian', geometry_msgs.msg.Pose, queue_size=1) # keep compatibility with libiiwa Python API self.robot_state = {"joint_position": np.zeros((7,)), "joint_velocity": np.zeros((7,)), "cartesian_position": np.zeros((3,))} # create subscribers rospy.Subscriber('/iiwa/state/joint_states', sensor_msgs.msg.JointState, self._callback_joint_states) rospy.Subscriber('/iiwa/state/end_effector_pose', geometry_msgs.msg.Pose, self._callback_end_effector_pose) # create service clients rospy.wait_for_service('/iiwa/set_control_interface') proxy = rospy.ServiceProxy('/iiwa/set_control_interface', libiiwa_msgs.srv.SetString) proxy("SERVO") # or "servo" rospy.wait_for_service('/iiwa/set_desired_joint_velocity_rel') rospy.wait_for_service('/iiwa/set_desired_joint_acceleration_rel') rospy.wait_for_service('/iiwa/set_desired_joint_jerk_rel') proxy = rospy.ServiceProxy('/iiwa/set_desired_joint_velocity_rel', libiiwa_msgs.srv.SetNumber) proxy(0.5) proxy = rospy.ServiceProxy('/iiwa/set_desired_joint_acceleration_rel', libiiwa_msgs.srv.SetNumber) proxy(0.5) proxy = rospy.ServiceProxy('/iiwa/set_desired_joint_jerk_rel', libiiwa_msgs.srv.SetNumber) proxy(0.5) rospy.wait_for_service('/iiwa/set_desired_cartesian_velocity') rospy.wait_for_service('/iiwa/set_desired_cartesian_acceleration') rospy.wait_for_service('/iiwa/set_desired_cartesian_jerk') proxy = rospy.ServiceProxy('/iiwa/set_desired_cartesian_velocity', libiiwa_msgs.srv.SetNumber) proxy(10.0) proxy = rospy.ServiceProxy('/iiwa/set_desired_cartesian_acceleration', libiiwa_msgs.srv.SetNumber) proxy(10.0) proxy = rospy.ServiceProxy('/iiwa/set_desired_cartesian_jerk', libiiwa_msgs.srv.SetNumber) proxy(10.0) # initialize the ROS node rospy.init_node(self.__class__.__name__) print("Robot connected") self.motion = None self.motion_thread = None self.dt = 1 / 120.0 self.action_scale = 2.5 self.dof_vel_scale = 0.1 self.max_episode_length = 100 self.robot_dof_speed_scales = 1 self.target_pos = np.array([0.65, 0.2, 0.2]) self.robot_default_dof_pos = np.radians([0, 0, 0, -90, 0, 90, 0]) self.robot_dof_lower_limits = np.array([-2.9671, -2.0944, -2.9671, -2.0944, -2.9671, -2.0944, -3.0543]) self.robot_dof_upper_limits = np.array([ 2.9671, 2.0944, 2.9671, 2.0944, 2.9671, 2.0944, 3.0543]) self.progress_buf = 1 self.obs_buf = np.zeros((18,), dtype=np.float32) def _callback_joint_states(self, msg): self.robot_state["joint_position"] = np.array(msg.position) self.robot_state["joint_velocity"] = np.array(msg.velocity) def _callback_end_effector_pose(self, msg): positon = msg.position self.robot_state["cartesian_position"] = np.array([positon.x, positon.y, positon.z]) def _get_observation_reward_done(self): # observation robot_dof_pos = self.robot_state["joint_position"] robot_dof_vel = self.robot_state["joint_velocity"] end_effector_pos = self.robot_state["cartesian_position"] dof_pos_scaled = 2.0 * (robot_dof_pos - self.robot_dof_lower_limits) / (self.robot_dof_upper_limits - self.robot_dof_lower_limits) - 1.0 dof_vel_scaled = robot_dof_vel * self.dof_vel_scale self.obs_buf[0] = self.progress_buf / float(self.max_episode_length) self.obs_buf[1:8] = dof_pos_scaled self.obs_buf[8:15] = dof_vel_scaled self.obs_buf[15:18] = self.target_pos # reward distance = np.linalg.norm(end_effector_pos - self.target_pos) reward = -distance # done done = self.progress_buf >= self.max_episode_length - 1 done = done or distance <= 0.075 print("Distance:", distance) if done: print("Target or Maximum episode length reached") time.sleep(1) return self.obs_buf, reward, done def reset(self): print("Reseting...") # go to 1) safe position, 2) random position msg = sensor_msgs.msg.JointState() msg.position = self.robot_default_dof_pos.tolist() self.pub_command_joint.publish(msg) time.sleep(3) msg.position = (self.robot_default_dof_pos + 0.25 * (np.random.rand(7) - 0.5)).tolist() self.pub_command_joint.publish(msg) time.sleep(1) # get target position from prompt while True: try: print("Enter target position (X, Y, Z) in meters") raw = input("or press [Enter] key for a random target position: ") if raw: self.target_pos = np.array([float(p) for p in raw.replace(' ', '').split(',')]) else: noise = (2 * np.random.rand(3) - 1) * np.array([0.1, 0.2, 0.2]) self.target_pos = np.array([0.6, 0.0, 0.4]) + noise print("Target position:", self.target_pos) break except ValueError: print("Invalid input. Try something like: 0.65, 0.0, 0.4") input("Press [Enter] to continue") self.progress_buf = 0 observation, reward, done = self._get_observation_reward_done() return observation, {} def step(self, action): self.progress_buf += 1 # control space # joint if self.control_space == "joint": joint_positions = self.robot_state["joint_position"] + (self.robot_dof_speed_scales * self.dt * action * self.action_scale) msg = sensor_msgs.msg.JointState() msg.position = joint_positions.tolist() self.pub_command_joint.publish(msg) # cartesian elif self.control_space == "cartesian": end_effector_pos = self.robot_state["cartesian_position"] + action / 100.0 msg = geometry_msgs.msg.Pose() msg.position.x = end_effector_pos[0] msg.position.y = end_effector_pos[1] msg.position.z = end_effector_pos[2] msg.orientation.x = np.nan msg.orientation.y = np.nan msg.orientation.z = np.nan msg.orientation.w = np.nan self.pub_command_cartesian.publish(msg) # the use of time.sleep is for simplicity. It does not guarantee control at a specific frequency time.sleep(1 / 30.0) observation, reward, terminated = self._get_observation_reward_done() return observation, reward, terminated, False, {} def render(self, *args, **kwargs): pass def close(self): pass
7,831
Python
39.371134
144
0.605542
Toni-SM/skrl/docs/source/snippets/utils_postprocessing.py
# [start-memory_file_iterator-torch] from skrl.utils import postprocessing # assuming there is a directory called "memories" with Torch files in it memory_iterator = postprocessing.MemoryFileIterator("memories/*.pt") for filename, data in memory_iterator: filename # str: basename of the current file data # dict: keys are the names of the memory tensors in the file. # Tensor shapes are (memory size, number of envs, specific content size) # example of simple usage: # print the filenames of all memories and their tensor shapes print("\nfilename:", filename) print(" |-- states:", data['states'].shape) print(" |-- actions:", data['actions'].shape) print(" |-- rewards:", data['rewards'].shape) print(" |-- next_states:", data['next_states'].shape) print(" |-- dones:", data['dones'].shape) # [end-memory_file_iterator-torch] # [start-memory_file_iterator-numpy] from skrl.utils import postprocessing # assuming there is a directory called "memories" with NumPy files in it memory_iterator = postprocessing.MemoryFileIterator("memories/*.npz") for filename, data in memory_iterator: filename # str: basename of the current file data # dict: keys are the names of the memory arrays in the file. # Array shapes are (memory size, number of envs, specific content size) # example of simple usage: # print the filenames of all memories and their array shapes print("\nfilename:", filename) print(" |-- states:", data['states'].shape) print(" |-- actions:", data['actions'].shape) print(" |-- rewards:", data['rewards'].shape) print(" |-- next_states:", data['next_states'].shape) print(" |-- dones:", data['dones'].shape) # [end-memory_file_iterator-numpy] # [start-memory_file_iterator-csv] from skrl.utils import postprocessing # assuming there is a directory called "memories" with CSV files in it memory_iterator = postprocessing.MemoryFileIterator("memories/*.csv") for filename, data in memory_iterator: filename # str: basename of the current file data # dict: keys are the names of the memory list of lists extracted from the file. # List lengths are (memory size * number of envs) and # sublist lengths are (specific content size) # example of simple usage: # print the filenames of all memories and their list lengths print("\nfilename:", filename) print(" |-- states:", len(data['states'])) print(" |-- actions:", len(data['actions'])) print(" |-- rewards:", len(data['rewards'])) print(" |-- next_states:", len(data['next_states'])) print(" |-- dones:", len(data['dones'])) # [end-memory_file_iterator-csv] # [start-tensorboard_file_iterator-list] from skrl.utils import postprocessing # assuming there is a directory called "runs" with experiments and Tensorboard files in it tensorboard_iterator = postprocessing.TensorboardFileIterator("runs/*/events.out.tfevents.*", \ tags=["Reward / Total reward (mean)"]) for dirname, data in tensorboard_iterator: dirname # str: path of the directory (experiment name) containing the Tensorboard file data # dict: keys are the tags, values are lists of [step, value] pairs # example of simple usage: # print the directory name and the value length for the "Reward / Total reward (mean)" tag print("\ndirname:", dirname) for tag, values in data.items(): print(" |-- tag:", tag) print(" | |-- value length:", len(values)) # [end-tensorboard_file_iterator-list]
3,582
Python
40.66279
95
0.676159
Toni-SM/skrl/docs/source/snippets/shared_model.py
# [start-mlp-torch] import torch import torch.nn as nn from skrl.models.torch import Model, GaussianMixin, DeterministicMixin # define the shared model class SharedModel(GaussianMixin, DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction, role="policy") DeterministicMixin.__init__(self, clip_actions, role="value") # shared layers/network self.net = nn.Sequential(nn.Linear(self.num_observations, 32), nn.ELU(), nn.Linear(32, 32), nn.ELU()) # separated layers ("policy") self.mean_layer = nn.Linear(32, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) # separated layer ("value") self.value_layer = nn.Linear(32, 1) # override the .act(...) method to disambiguate its call def act(self, inputs, role): if role == "policy": return GaussianMixin.act(self, inputs, role) elif role == "value": return DeterministicMixin.act(self, inputs, role) # forward the input to compute model output according to the specified role def compute(self, inputs, role): if role == "policy": return self.mean_layer(self.net(inputs["states"])), self.log_std_parameter, {} elif role == "value": return self.value_layer(self.net(inputs["states"])), {} # instantiate the shared model and pass the same instance to the other key models = {} models["policy"] = SharedModel(env.observation_space, env.action_space, env.device) models["value"] = models["policy"] # [end-mlp-torch]
1,974
Python
39.306122
116
0.624113
Toni-SM/skrl/docs/source/snippets/noises.py
# [start-base-class-torch] from typing import Union, Tuple import torch from skrl.resources.noises.torch import Noise class CustomNoise(Noise): def __init__(self, device: Union[str, torch.device] = "cuda:0") -> None: """ :param device: Device on which a torch tensor is or will be allocated (default: "cuda:0") :type device: str or torch.device, optional """ super().__init__(device) def sample(self, size: Union[Tuple[int], torch.Size]) -> torch.Tensor: """Sample noise :param size: Shape of the sampled tensor :type size: tuple or list of integers, or torch.Size :return: Sampled noise :rtype: torch.Tensor """ # ================================ # - sample noise # ================================ # [end-base-class-torch] # [start-base-class-jax] from typing import Optional, Union, Tuple import numpy as np import jaxlib import jax.numpy as jnp from skrl.resources.noises.torch import Noise class CustomNoise(Noise): def __init__(self, device: Optional[Union[str, jaxlib.xla_extension.Device]] = None) -> None: """Custom noise :param device: Device on which a jax array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or jaxlib.xla_extension.Device, optional """ super().__init__(device) def sample(self, size: Tuple[int]) -> Union[np.ndarray, jnp.ndarray]: """Sample noise :param size: Shape of the sampled tensor :type size: tuple or list of integers :return: Sampled noise :rtype: np.ndarray or jnp.ndarray """ # ================================ # - sample noise # ================================ # [end-base-class-jax] # ============================================================================= # [torch-start-gaussian] from skrl.resources.noises.torch import GaussianNoise cfg = DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(mean=0, std=0.2, device="cuda:0") # [torch-end-gaussian] # [jax-start-gaussian] from skrl.resources.noises.jax import GaussianNoise cfg = DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = GaussianNoise(mean=0, std=0.2) # [jax-end-gaussian] # ============================================================================= # [torch-start-ornstein-uhlenbeck] from skrl.resources.noises.torch import OrnsteinUhlenbeckNoise cfg = DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.2, base_scale=1.0, device="cuda:0") # [torch-end-ornstein-uhlenbeck] # [jax-start-ornstein-uhlenbeck] from skrl.resources.noises.jax import OrnsteinUhlenbeckNoise cfg = DEFAULT_CONFIG.copy() cfg["exploration"]["noise"] = OrnsteinUhlenbeckNoise(theta=0.15, sigma=0.2, base_scale=1.0) # [jax-end-ornstein-uhlenbeck]
2,976
Python
28.77
108
0.589718
Toni-SM/skrl/docs/source/snippets/gaussian_model.py
# [start-definition-torch] class GaussianModel(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) # [end-definition-torch] # [start-definition-jax] class GaussianModel(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) # [end-definition-jax] # ============================================================================= # [start-mlp-sequential-torch] import torch import torch.nn as nn from skrl.models.torch import Model, GaussianMixin # define the model class MLP(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Linear(self.num_observations, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), self.log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = MLP(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # [end-mlp-sequential-torch] # [start-mlp-functional-torch] import torch import torch.nn as nn import torch.nn.functional as F from skrl.models.torch import Model, GaussianMixin # define the model class MLP(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.fc1 = nn.Linear(self.num_observations, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): x = self.fc1(inputs["states"]) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) return torch.tanh(x), self.log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = MLP(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # [end-mlp-functional-torch] # [start-mlp-setup-jax] import jax.numpy as jnp import flax.linen as nn from skrl.models.jax import Model, GaussianMixin # define the model class MLP(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) def setup(self): self.fc1 = nn.Dense(64) self.fc2 = nn.Dense(32) self.fc3 = nn.Dense(self.num_actions) self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions)) def __call__(self, inputs, role): x = self.fc1(inputs["states"]) x = nn.relu(x) x = self.fc2(x) x = nn.relu(x) x = self.fc3(x) return nn.tanh(x), self.log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = MLP(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # initialize model's state dict policy.init_state_dict("policy") # [end-mlp-setup-jax] # [start-mlp-compact-jax] import jax.numpy as jnp import flax.linen as nn from skrl.models.jax import Model, GaussianMixin # define the model class MLP(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.Dense(64)(inputs["states"]) x = nn.relu(x) x = nn.Dense(32)(x) x = nn.relu(x) x = nn.Dense(self.num_actions)(x) log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions)) return nn.tanh(x), log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = MLP(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # initialize model's state dict policy.init_state_dict("policy") # [end-mlp-compact-jax] # ============================================================================= # [start-cnn-sequential-torch] import torch import torch.nn as nn from skrl.models.torch import Model, GaussianMixin # define the model class CNN(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.net = nn.Sequential(nn.Conv2d(3, 32, kernel_size=8, stride=4), nn.ReLU(), nn.Conv2d(32, 64, kernel_size=4, stride=2), nn.ReLU(), nn.Conv2d(64, 64, kernel_size=3, stride=1), nn.ReLU(), nn.Flatten(), nn.Linear(1024, 512), nn.ReLU(), nn.Linear(512, 16), nn.Tanh(), nn.Linear(16, 64), nn.Tanh(), nn.Linear(64, 32), nn.Tanh(), nn.Linear(32, self.num_actions)) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # permute (samples, width * height * channels) -> (samples, channels, width, height) return self.net(inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2)), self.log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = CNN(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # [end-cnn-sequential-torch] # [start-cnn-functional-torch] import torch import torch.nn as nn import torch.nn.functional as F from skrl.models.torch import Model, GaussianMixin # define the model class CNN(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum"): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.conv1 = nn.Conv2d(3, 32, kernel_size=8, stride=4) self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2) self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1) self.fc1 = nn.Linear(1024, 512) self.fc2 = nn.Linear(512, 16) self.fc3 = nn.Linear(16, 64) self.fc4 = nn.Linear(64, 32) self.fc5 = nn.Linear(32, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def compute(self, inputs, role): # permute (samples, width * height * channels) -> (samples, channels, width, height) x = inputs["states"].view(-1, *self.observation_space.shape).permute(0, 3, 1, 2) x = self.conv1(x) x = F.relu(x) x = self.conv2(x) x = F.relu(x) x = self.conv3(x) x = F.relu(x) x = torch.flatten(x, start_dim=1) x = self.fc1(x) x = F.relu(x) x = self.fc2(x) x = torch.tanh(x) x = self.fc3(x) x = torch.tanh(x) x = self.fc4(x) x = torch.tanh(x) x = self.fc5(x) return x, self.log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = CNN(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # [end-cnn-functional-torch] # [start-cnn-setup-jax] import jax.numpy as jnp import flax.linen as nn from skrl.models.jax import Model, GaussianMixin # define the model class CNN(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) def setup(self): self.conv1 = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID") self.conv2 = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID") self.conv3 = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID") self.fc1 = nn.Dense(512) self.fc2 = nn.Dense(16) self.fc3 = nn.Dense(64) self.fc4 = nn.Dense(32) self.fc5 = nn.Dense(self.num_actions) self.log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions)) def __call__(self, inputs, role): x = inputs["states"].reshape((-1, *self.observation_space.shape)) x = self.conv1(x) x = nn.relu(x) x = self.conv2(x) x = nn.relu(x) x = self.conv3(x) x = nn.relu(x) x = x.reshape((x.shape[0], -1)) x = self.fc1(x) x = nn.relu(x) x = self.fc2(x) x = nn.tanh(x) x = self.fc3(x) x = nn.tanh(x) x = self.fc4(x) x = nn.tanh(x) x = self.fc5(x) return nn.tanh(x), self.log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = CNN(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # initialize model's state dict policy.init_state_dict("policy") # [end-cnn-setup-jax] # [start-cnn-compact-jax] import jax.numpy as jnp import flax.linen as nn from skrl.models.jax import Model, GaussianMixin # define the model class CNN(GaussianMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = inputs["states"].reshape((-1, *self.observation_space.shape)) x = nn.Conv(32, kernel_size=(8, 8), strides=(4, 4), padding="VALID")(x) x = nn.relu(x) x = nn.Conv(64, kernel_size=(4, 4), strides=(2, 2), padding="VALID")(x) x = nn.relu(x) x = nn.Conv(64, kernel_size=(3, 3), strides=(1, 1), padding="VALID")(x) x = nn.relu(x) x = x.reshape((x.shape[0], -1)) x = nn.Dense(512)(x) x = nn.relu(x) x = nn.Dense(16)(x) x = nn.tanh(x) x = nn.Dense(64)(x) x = nn.tanh(x) x = nn.Dense(32)(x) x = nn.tanh(x) x = nn.Dense(self.num_actions)(x) log_std_parameter = self.param("log_std_parameter", lambda _: jnp.zeros(self.num_actions)) return nn.tanh(x), log_std_parameter, {} # instantiate the model (assumes there is a wrapped environment: env) policy = CNN(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum") # initialize model's state dict policy.init_state_dict("policy") # [end-cnn-compact-jax] # ============================================================================= # [start-rnn-sequential-torch] import torch import torch.nn as nn from skrl.models.torch import Model, GaussianMixin # define the model class RNN(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=10): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs during rollout return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), self.log_std_parameter, {"rnn": [hidden_states]} # instantiate the model (assumes there is a wrapped environment: env) policy = RNN(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=env.num_envs, num_layers=1, hidden_size=64, sequence_length=10) # [end-rnn-sequential-torch] # [start-rnn-functional-torch] import torch import torch.nn as nn import torch.nn.functional as F from skrl.models.torch import Model, GaussianMixin # define the model class RNN(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=10): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.rnn = nn.RNN(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.fc1 = nn.Linear(self.hidden_size, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs during rollout return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.rnn(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.rnn(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = self.fc1(rnn_output) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) return torch.tanh(x), self.log_std_parameter, {"rnn": [hidden_states]} # instantiate the model (assumes there is a wrapped environment: env) policy = RNN(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=env.num_envs, num_layers=1, hidden_size=64, sequence_length=10) # [end-rnn-functional-torch] # ============================================================================= # [start-gru-sequential-torch] import torch import torch.nn as nn from skrl.models.torch import Model, GaussianMixin # define the model class GRU(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=10): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs during rollout return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), self.log_std_parameter, {"rnn": [hidden_states]} # instantiate the model (assumes there is a wrapped environment: env) policy = GRU(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=env.num_envs, num_layers=1, hidden_size=64, sequence_length=10) # [end-gru-sequential-torch] # [start-gru-functional-torch] import torch import torch.nn as nn import torch.nn.functional as F from skrl.models.torch import Model, GaussianMixin # define the model class GRU(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=10): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hout self.sequence_length = sequence_length self.gru = nn.GRU(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.fc1 = nn.Linear(self.hidden_size, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs during rollout return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size)]}} # hidden states (D ∗ num_layers, N, Hout) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states = inputs["rnn"][0] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) # get the hidden states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, hidden_states = self.gru(rnn_input[:,i0:i1,:], hidden_states) hidden_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, hidden_states = self.gru(rnn_input, hidden_states) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = self.fc1(rnn_output) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) return torch.tanh(x), self.log_std_parameter, {"rnn": [hidden_states]} # instantiate the model (assumes there is a wrapped environment: env) policy = GRU(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=env.num_envs, num_layers=1, hidden_size=64, sequence_length=10) # [end-gru-functional-torch] # ============================================================================= # [start-lstm-sequential-torch] import torch import torch.nn as nn from skrl.models.torch import Model, GaussianMixin # define the model class LSTM(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=10): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.net = nn.Sequential(nn.Linear(self.hidden_size, 64), nn.ReLU(), nn.Linear(64, 32), nn.ReLU(), nn.Linear(32, self.num_actions), nn.Tanh()) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs during rollout return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) return self.net(rnn_output), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]} # instantiate the model (assumes there is a wrapped environment: env) policy = LSTM(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=env.num_envs, num_layers=1, hidden_size=64, sequence_length=10) # [end-lstm-sequential-torch] # [start-lstm-functional-torch] import torch import torch.nn as nn import torch.nn.functional as F from skrl.models.torch import Model, GaussianMixin # define the model class LSTM(GaussianMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=1, num_layers=1, hidden_size=64, sequence_length=10): Model.__init__(self, observation_space, action_space, device) GaussianMixin.__init__(self, clip_actions, clip_log_std, min_log_std, max_log_std, reduction) self.num_envs = num_envs self.num_layers = num_layers self.hidden_size = hidden_size # Hcell (Hout is Hcell because proj_size = 0) self.sequence_length = sequence_length self.lstm = nn.LSTM(input_size=self.num_observations, hidden_size=self.hidden_size, num_layers=self.num_layers, batch_first=True) # batch_first -> (batch, sequence, features) self.fc1 = nn.Linear(self.hidden_size, 64) self.fc2 = nn.Linear(64, 32) self.fc3 = nn.Linear(32, self.num_actions) self.log_std_parameter = nn.Parameter(torch.zeros(self.num_actions)) def get_specification(self): # batch size (N) is the number of envs during rollout return {"rnn": {"sequence_length": self.sequence_length, "sizes": [(self.num_layers, self.num_envs, self.hidden_size), # hidden states (D ∗ num_layers, N, Hout) (self.num_layers, self.num_envs, self.hidden_size)]}} # cell states (D ∗ num_layers, N, Hcell) def compute(self, inputs, role): states = inputs["states"] terminated = inputs.get("terminated", None) hidden_states, cell_states = inputs["rnn"][0], inputs["rnn"][1] # training if self.training: rnn_input = states.view(-1, self.sequence_length, states.shape[-1]) # (N, L, Hin): N=batch_size, L=sequence_length hidden_states = hidden_states.view(self.num_layers, -1, self.sequence_length, hidden_states.shape[-1]) # (D * num_layers, N, L, Hout) cell_states = cell_states.view(self.num_layers, -1, self.sequence_length, cell_states.shape[-1]) # (D * num_layers, N, L, Hcell) # get the hidden/cell states corresponding to the initial sequence hidden_states = hidden_states[:,:,0,:].contiguous() # (D * num_layers, N, Hout) cell_states = cell_states[:,:,0,:].contiguous() # (D * num_layers, N, Hcell) # reset the RNN state in the middle of a sequence if terminated is not None and torch.any(terminated): rnn_outputs = [] terminated = terminated.view(-1, self.sequence_length) indexes = [0] + (terminated[:,:-1].any(dim=0).nonzero(as_tuple=True)[0] + 1).tolist() + [self.sequence_length] for i in range(len(indexes) - 1): i0, i1 = indexes[i], indexes[i + 1] rnn_output, (hidden_states, cell_states) = self.lstm(rnn_input[:,i0:i1,:], (hidden_states, cell_states)) hidden_states[:, (terminated[:,i1-1]), :] = 0 cell_states[:, (terminated[:,i1-1]), :] = 0 rnn_outputs.append(rnn_output) rnn_states = (hidden_states, cell_states) rnn_output = torch.cat(rnn_outputs, dim=1) # no need to reset the RNN state in the sequence else: rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # rollout else: rnn_input = states.view(-1, 1, states.shape[-1]) # (N, L, Hin): N=num_envs, L=1 rnn_output, rnn_states = self.lstm(rnn_input, (hidden_states, cell_states)) # flatten the RNN output rnn_output = torch.flatten(rnn_output, start_dim=0, end_dim=1) # (N, L, D ∗ Hout) -> (N * L, D ∗ Hout) x = self.fc1(rnn_output) x = F.relu(x) x = self.fc2(x) x = F.relu(x) x = self.fc3(x) return torch.tanh(x), self.log_std_parameter, {"rnn": [rnn_states[0], rnn_states[1]]} # instantiate the model (assumes there is a wrapped environment: env) policy = LSTM(observation_space=env.observation_space, action_space=env.action_space, device=env.device, clip_actions=True, clip_log_std=True, min_log_std=-20, max_log_std=2, reduction="sum", num_envs=env.num_envs, num_layers=1, hidden_size=64, sequence_length=10) # [end-lstm-functional-torch]
41,575
Python
41.038423
146
0.566133
Toni-SM/skrl/docs/source/snippets/model_mixin.py
# [start-model-torch] from typing import Optional, Union, Mapping, Sequence, Tuple, Any import gym, gymnasium import torch from skrl.models.torch import Model class CustomModel(Model): def __init__(self, observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], device: Optional[Union[str, torch.device]] = None) -> None: """Custom model :param observation_space: Observation/state space or shape. The ``num_observations`` property will contain the size of that space :type observation_space: int, sequence of int, gym.Space, gymnasium.Space :param action_space: Action space or shape. The ``num_actions`` property will contain the size of that space :type action_space: int, sequence of int, gym.Space, gymnasium.Space :param device: Device on which a torch tensor is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or torch.device, optional """ super().__init__(observation_space, action_space, device) # ===================================== # - define custom attributes and others # ===================================== flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act according to the specified behavior :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function for stochastic models or None for deterministic models. The third component is a dictionary containing extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dictionary """ # ============================== # - act in response to the state # ============================== # [end-model-torch] # [start-model-jax] from typing import Optional, Union, Mapping, Tuple, Any import gym, gymnasium import flax import jaxlib import jax.numpy as jnp from skrl.models.jax import Model class CustomModel(Model): def __init__(self, observation_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], action_space: Union[int, Sequence[int], gym.Space, gymnasium.Space], device: Optional[Union[str, jaxlib.xla_extension.Device]] = None, parent: Optional[Any] = None, name: Optional[str] = None) -> None: """Custom model :param observation_space: Observation/state space or shape. The ``num_observations`` property will contain the size of that space :type observation_space: int, sequence of int, gym.Space, gymnasium.Space :param action_space: Action space or shape. The ``num_actions`` property will contain the size of that space :type action_space: int, sequence of int, gym.Space, gymnasium.Space :param device: Device on which a jax array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or jaxlib.xla_extension.Device, optional :param parent: The parent Module of this Module (default: ``None``). It is a Flax reserved attribute :type parent: str, optional :param name: The name of this Module (default: ``None``). It is a Flax reserved attribute :type name: str, optional """ Model.__init__(self, observation_space, action_space, device, parent, name) # ===================================== # - define custom attributes and others # ===================================== flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[jnp.ndarray, Any]], role: str = "", params: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, Union[jnp.ndarray, None], Mapping[str, Union[jnp.ndarray, Any]]]: """Act according to the specified behavior :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically jnp.ndarray :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the mean actions ``"mean_actions"`` and extra output values :rtype: tuple of jnp.ndarray, jnp.ndarray or None, and dictionary """ # ============================== # - act in response to the state # ============================== # [end-model-jax] # ============================================================================= # [start-mixin-torch] from typing import Union, Mapping, Tuple, Any import torch class CustomMixin: def __init__(self, role: str = "") -> None: """Custom mixin :param role: Role play by the model (default: ``""``) :type role: str, optional """ # ===================================== # - define custom attributes and others # ===================================== def act(self, inputs: Mapping[str, Union[torch.Tensor, Any]], role: str = "") -> Tuple[torch.Tensor, Union[torch.Tensor, None], Mapping[str, Union[torch.Tensor, Any]]]: """Act according to the specified behavior :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically torch.Tensor :param role: Role play by the model (default: ``""``) :type role: str, optional :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function for stochastic models or None for deterministic models. The third component is a dictionary containing extra output values :rtype: tuple of torch.Tensor, torch.Tensor or None, and dictionary """ # ============================== # - act in response to the state # ============================== # [end-mixin-torch] # [start-mixin-jax] from typing import Optional, Union, Mapping, Tuple, Any import flax import jax.numpy as jnp class CustomMixin: def __init__(self, role: str = "") -> None: """Custom mixin :param role: Role play by the model (default: ``""``) :type role: str, optional """ # ===================================== # - define custom attributes and others # ===================================== flax.linen.Module.__post_init__(self) def act(self, inputs: Mapping[str, Union[jnp.ndarray, Any]], role: str = "", params: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, Union[jnp.ndarray, None], Mapping[str, Union[jnp.ndarray, Any]]]: """Act according to the specified behavior :param inputs: Model inputs. The most common keys are: - ``"states"``: state of the environment used to make the decision - ``"taken_actions"``: actions taken by the policy for the given states :type inputs: dict where the values are typically jnp.ndarray :param role: Role play by the model (default: ``""``) :type role: str, optional :param params: Parameters used to compute the output (default: ``None``). If ``None``, internal parameters will be used :type params: jnp.array :return: Model output. The first component is the action to be taken by the agent. The second component is the log of the probability density function. The third component is a dictionary containing the mean actions ``"mean_actions"`` and extra output values :rtype: tuple of jnp.ndarray, jnp.ndarray or None, and dictionary """ # ============================== # - act in response to the state # ============================== # [end-mixin-jax]
9,778
Python
43.857798
137
0.562896
Toni-SM/skrl/docs/source/snippets/multi_agents_basic_usage.py
# [start-ippo-torch] # import the agent and its default configuration from skrl.multi_agents.torch.ippo import IPPO, IPPO_DEFAULT_CONFIG # instantiate the agent's models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = ... models[agent_name]["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = IPPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memories <memories>) agent = IPPO(possible_agents=env.possible_agents, models=models, memory=memories, # only required during training cfg=cfg_agent, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=env.device) # [end-ippo-torch] # [start-ippo-jax] # import the agent and its default configuration from skrl.multi_agents.jax.ippo import IPPO, IPPO_DEFAULT_CONFIG # instantiate the agent's models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = ... models[agent_name]["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = IPPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memories <memories>) agent = IPPO(possible_agents=env.possible_agents, models=models, memory=memories, # only required during training cfg=cfg_agent, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=env.device) # [end-ippo-jax] # [start-mappo-torch] # import the agent and its default configuration from skrl.multi_agents.torch.mappo import MAPPO, MAPPO_DEFAULT_CONFIG # instantiate the agent's models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = ... models[agent_name]["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = MAPPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memories <memories>) agent = MAPPO(possible_agents=env.possible_agents, models=models, memory=memories, # only required during training cfg=cfg_agent, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=env.device, shared_observation_spaces=env.shared_observation_spaces) # [end-mappo-torch] # [start-mappo-jax] # import the agent and its default configuration from skrl.multi_agents.jax.mappo import MAPPO, MAPPO_DEFAULT_CONFIG # instantiate the agent's models models = {} for agent_name in env.possible_agents: models[agent_name] = {} models[agent_name]["policy"] = ... models[agent_name]["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = MAPPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memories <memories>) agent = MAPPO(possible_agents=env.possible_agents, models=models, memory=memories, # only required during training cfg=cfg_agent, observation_spaces=env.observation_spaces, action_spaces=env.action_spaces, device=env.device, shared_observation_spaces=env.shared_observation_spaces) # [end-mappo-jax]
3,674
Python
32.715596
70
0.66957
Toni-SM/skrl/docs/source/snippets/agents_basic_usage.py
# [torch-start-a2c] # import the agent and its default configuration from skrl.agents.torch.a2c import A2C, A2C_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = A2C_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = A2C(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-a2c] # [jax-start-a2c] # import the agent and its default configuration from skrl.agents.jax.a2c import A2C, A2C_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = A2C_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = A2C(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-a2c] # [torch-start-a2c-rnn] # import the agent and its default configuration from skrl.agents.torch.a2c import A2C_RNN as A2C, A2C_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = A2C_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = A2C(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-a2c-rnn] # ============================================================================= # [torch-start-amp] # import the agent and its default configuration from skrl.agents.torch.amp import AMP, AMP_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training models["discriminator"] = ... # only required during training # adjust some configuration if necessary cfg_agent = AMP_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) # (assuming defined memories for motion <motion_dataset> and <reply_buffer>) # (assuming defined methods to collect motion <collect_reference_motions> and <collect_observation>) agent = AMP(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device, amp_observation_space=env.amp_observation_space, motion_dataset=motion_dataset, reply_buffer=reply_buffer, collect_reference_motions=collect_reference_motions, collect_observation=collect_observation) # [torch-end-amp] # ============================================================================= # [torch-start-cem] # import the agent and its default configuration from skrl.agents.torch.cem import CEM, CEM_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... # adjust some configuration if necessary cfg_agent = CEM_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = CEM(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-cem] # [jax-start-cem] # import the agent and its default configuration from skrl.agents.jax.cem import CEM, CEM_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... # adjust some configuration if necessary cfg_agent = CEM_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = CEM(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-cem] # ============================================================================= # [torch-start-ddpg] # import the agent and its default configuration from skrl.agents.torch.ddpg import DDPG, DDPG_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["target_policy"] = ... # only required during training models["critic"] = ... # only required during training models["target_critic"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DDPG_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DDPG(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-ddpg] # [jax-start-ddpg] # import the agent and its default configuration from skrl.agents.jax.ddpg import DDPG, DDPG_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["target_policy"] = ... # only required during training models["critic"] = ... # only required during training models["target_critic"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DDPG_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DDPG(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-ddpg] # [torch-start-ddpg-rnn] # import the agent and its default configuration from skrl.agents.torch.ddpg import DDPG_RNN as DDPG, DDPG_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["target_policy"] = ... # only required during training models["critic"] = ... # only required during training models["target_critic"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DDPG_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DDPG(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-ddpg-rnn] # ============================================================================= # [torch-start-ddqn] # import the agent and its default configuration from skrl.agents.torch.dqn import DDQN, DDQN_DEFAULT_CONFIG # instantiate the agent's models models = {} models["q_network"] = ... models["target_q_network"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DDQN_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DDQN(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-ddqn] # [jax-start-ddqn] # import the agent and its default configuration from skrl.agents.jax.dqn import DDQN, DDQN_DEFAULT_CONFIG # instantiate the agent's models models = {} models["q_network"] = ... models["target_q_network"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DDQN_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DDQN(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-ddqn] # ============================================================================= # [torch-start-dqn] # import the agent and its default configuration from skrl.agents.torch.dqn import DQN, DQN_DEFAULT_CONFIG # instantiate the agent's models models = {} models["q_network"] = ... models["target_q_network"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DQN_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DQN(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-dqn] # [jax-start-dqn] # import the agent and its default configuration from skrl.agents.jax.dqn import DQN, DQN_DEFAULT_CONFIG # instantiate the agent's models models = {} models["q_network"] = ... models["target_q_network"] = ... # only required during training # adjust some configuration if necessary cfg_agent = DQN_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = DQN(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-dqn] # ============================================================================= # [torch-start-ppo] # import the agent and its default configuration from skrl.agents.torch.ppo import PPO, PPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = PPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = PPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-ppo] # [jax-start-ppo] # import the agent and its default configuration from skrl.agents.jax.ppo import PPO, PPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = PPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = PPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-ppo] # [torch-start-ppo-rnn] # import the agent and its default configuration from skrl.agents.torch.ppo import PPO_RNN as PPO, PPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = PPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = PPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-ppo-rnn] # ============================================================================= # [torch-start-q-learning] # import the agent and its default configuration from skrl.agents.torch.q_learning import Q_LEARNING, Q_LEARNING_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... # adjust some configuration if necessary cfg_agent = Q_LEARNING_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env>) agent = Q_LEARNING(models=models, memory=None, cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-q-learning] # ============================================================================= # [torch-start-rpo-with-rpo] class Policy(GaussianMixin, Model): ... def compute(self, inputs, role): # compute the mean actions using the neural network mean_actions = self.net(inputs["states"]) # perturb the mean actions by adding a randomized uniform sample rpo_alpha = inputs["alpha"] perturbation = torch.zeros_like(mean_actions).uniform_(-rpo_alpha, rpo_alpha) mean_actions += perturbation return mean_actions, self.log_std_parameter, {} # [torch-end-rpo-with-rpo] # [jax-start-rpo-with-rpo] class Policy(GaussianMixin, Model): ... def __call__(self, inputs, role): # compute the mean actions using the neural network mean_actions = ... log_std = ... # perturb the mean actions by adding a randomized uniform sample rpo_alpha = inputs["alpha"] perturbation = jax.random.uniform(inputs["key"], mean_actions.shape, minval=-rpo_alpha, maxval=rpo_alpha) mean_actions += perturbation return mean_actions, log_std, {} # [jax-end-rpo-with-rpo] # [torch-start-rpo-without-rpo] class Policy(GaussianMixin, Model): ... def compute(self, inputs, role): # compute the mean actions using the neural network mean_actions = self.net(inputs["states"]) # perturb the mean actions by adding a randomized uniform sample rpo_alpha = 0.5 perturbation = torch.zeros_like(mean_actions).uniform_(-rpo_alpha, rpo_alpha) mean_actions += perturbation return mean_actions, self.log_std_parameter, {} # [torch-end-rpo-without-rpo] # [jax-start-rpo-without-rpo] class Policy(GaussianMixin, Model): ... def __call__(self, inputs, role): # compute the mean actions using the neural network mean_actions = ... log_std = ... # perturb the mean actions by adding a randomized uniform sample rpo_alpha = 0.5 perturbation = jax.random.uniform(inputs["key"], mean_actions.shape, minval=-rpo_alpha, maxval=rpo_alpha) mean_actions += perturbation return mean_actions, log_std, {} # [jax-end-rpo-without-rpo] # [torch-start-rpo] # import the agent and its default configuration from skrl.agents.torch.rpo import RPO, RPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = RPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = RPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-rpo] # [jax-start-rpo] # import the agent and its default configuration from skrl.agents.jax.rpo import RPO, RPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = RPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = RPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-rpo] # [torch-start-rpo-rnn] # import the agent and its default configuration from skrl.agents.torch.rpo import RPO_RNN as RPO, RPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = RPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = RPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-rpo-rnn] # ============================================================================= # [torch-start-sac] # import the agent and its default configuration from skrl.agents.torch.sac import SAC, SAC_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["critic_1"] = ... # only required during training models["critic_2"] = ... # only required during training models["target_critic_1"] = ... # only required during training models["target_critic_2"] = ... # only required during training # adjust some configuration if necessary cfg_agent = SAC_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = SAC(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-sac] # [jax-start-sac] # import the agent and its default configuration from skrl.agents.jax.sac import SAC, SAC_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["critic_1"] = ... # only required during training models["critic_2"] = ... # only required during training models["target_critic_1"] = ... # only required during training models["target_critic_2"] = ... # only required during training # adjust some configuration if necessary cfg_agent = SAC_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = SAC(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-sac] # [torch-start-sac-rnn] # import the agent and its default configuration from skrl.agents.torch.sac import SAC_RNN as SAC, SAC_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["critic_1"] = ... # only required during training models["critic_2"] = ... # only required during training models["target_critic_1"] = ... # only required during training models["target_critic_2"] = ... # only required during training # adjust some configuration if necessary cfg_agent = SAC_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = SAC(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-sac-rnn] # ============================================================================= # [torch-start-sarsa] # import the agent and its default configuration from skrl.agents.torch.sarsa import SARSA, SARSA_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... # adjust some configuration if necessary cfg_agent = SARSA_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env>) agent = SARSA(models=models, memory=None, cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-sarsa] # ============================================================================= # [torch-start-td3] # import the agent and its default configuration from skrl.agents.torch.td3 import TD3, TD3_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["target_policy"] = ... # only required during training models["critic_1"] = ... # only required during training models["critic_2"] = ... # only required during training models["target_critic_1"] = ... # only required during training models["target_critic_2"] = ... # only required during training # adjust some configuration if necessary cfg_agent = TD3_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = TD3(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-td3] # [jax-start-td3] # import the agent and its default configuration from skrl.agents.jax.td3 import TD3, TD3_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["target_policy"] = ... # only required during training models["critic_1"] = ... # only required during training models["critic_2"] = ... # only required during training models["target_critic_1"] = ... # only required during training models["target_critic_2"] = ... # only required during training # adjust some configuration if necessary cfg_agent = TD3_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = TD3(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [jax-end-td3] # [torch-start-td3-rnn] # import the agent and its default configuration from skrl.agents.torch.td3 import TD3_RNN as TD3, TD3_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["target_policy"] = ... # only required during training models["critic_1"] = ... # only required during training models["critic_2"] = ... # only required during training models["target_critic_1"] = ... # only required during training models["target_critic_2"] = ... # only required during training # adjust some configuration if necessary cfg_agent = TD3_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = TD3(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-td3-rnn] # ============================================================================= # [torch-start-trpo] # import the agent and its default configuration from skrl.agents.torch.trpo import TRPO, TRPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = TRPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = TRPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-trpo] # [torch-start-trpo-rnn] # import the agent and its default configuration from skrl.agents.torch.trpo import TRPO_RNN as TRPO, TRPO_DEFAULT_CONFIG # instantiate the agent's models models = {} models["policy"] = ... models["value"] = ... # only required during training # adjust some configuration if necessary cfg_agent = TRPO_DEFAULT_CONFIG.copy() cfg_agent["<KEY>"] = ... # instantiate the agent # (assuming a defined environment <env> and memory <memory>) agent = TRPO(models=models, memory=memory, # only required during training cfg=cfg_agent, observation_space=env.observation_space, action_space=env.action_space, device=env.device) # [torch-end-trpo-rnn]
25,726
Python
30.840346
113
0.645378
Toni-SM/skrl/docs/source/snippets/memories.py
# [start-base-class-torch] from typing import Union, Tuple, List import torch from skrl.memories.torch import Memory class CustomMemory(Memory): def __init__(self, memory_size: int, num_envs: int = 1, device: Union[str, torch.device] = "cuda:0") -> None: """Custom memory :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: 1) :type num_envs: int, optional :param device: Device on which a torch tensor is or will be allocated (default: "cuda:0") :type device: str or torch.device, optional """ super().__init__(memory_size, num_envs, device) def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[torch.Tensor]]: """Sample a batch from memory :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: 1) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of torch.Tensor list """ # ================================ # - sample a batch from memory. # It is possible to generate only the sampling indexes and call self.sample_by_index(...) # ================================ # [end-base-class-torch] # [start-base-class-jax] from typing import Optional, Union, Tuple, List import jaxlib import jax.numpy as jnp from skrl.memories.jax import Memory class CustomMemory(Memory): def __init__(self, memory_size: int, num_envs: int = 1, device: Optional[jaxlib.xla_extension.Device] = None) -> None: """Custom memory :param memory_size: Maximum number of elements in the first dimension of each internal storage :type memory_size: int :param num_envs: Number of parallel environments (default: 1) :type num_envs: int, optional :param device: Device on which an array is or will be allocated (default: None) :type device: jaxlib.xla_extension.Device, optional """ super().__init__(memory_size, num_envs, device) def sample(self, names: Tuple[str], batch_size: int, mini_batches: int = 1) -> List[List[jnp.ndarray]]: """Sample a batch from memory :param names: Tensors names from which to obtain the samples :type names: tuple or list of strings :param batch_size: Number of element to sample :type batch_size: int :param mini_batches: Number of mini-batches to sample (default: 1) :type mini_batches: int, optional :return: Sampled data from tensors sorted according to their position in the list of names. The sampled tensors will have the following shape: (batch size, data size) :rtype: list of jnp.ndarray list """ # ================================ # - sample a batch from memory. # It is possible to generate only the sampling indexes and call self.sample_by_index(...) # ================================ # [end-base-class-jax] # ============================================================================= # [start-random-torch] # import the memory class from skrl.memories.torch import RandomMemory # instantiate the memory (assumes there is a wrapped environment: env) memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=env.device) # [end-random-torch] # [start-random-jax] # import the memory class from skrl.memories.jax import RandomMemory # instantiate the memory (assumes there is a wrapped environment: env) memory = RandomMemory(memory_size=1000, num_envs=env.num_envs, device=env.device) # [end-random-jax]
4,112
Python
38.171428
113
0.625486
Toni-SM/skrl/docs/source/snippets/data.py
# [start-tensorboard-configuration] DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-tensorboard-configuration] # [start-wandb-configuration] DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-wandb-configuration] # [start-checkpoint-configuration] DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } # [end-checkpoint-configuration] # [start-checkpoint-load-agent-torch] from skrl.agents.torch.ppo import PPO # Instantiate the agent agent = PPO(models=models, # models dict memory=memory, # memory instance, or None if not required cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.) observation_space=env.observation_space, action_space=env.action_space, device=env.device) # Load the checkpoint agent.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/agent_1200.pt") # [end-checkpoint-load-agent-torch] # [start-checkpoint-load-agent-jax] from skrl.agents.jax.ppo import PPO # Instantiate the agent agent = PPO(models=models, # models dict memory=memory, # memory instance, or None if not required cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.) observation_space=env.observation_space, action_space=env.action_space, device=env.device) # Load the checkpoint agent.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/agent_1200.pickle") # [end-checkpoint-load-agent-jax] # [start-checkpoint-load-model-torch] from skrl.models.torch import Model, DeterministicMixin # Define the model class Policy(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 32), nn.ReLU(), nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # Instantiate the model policy = Policy(env.observation_space, env.action_space, env.device, clip_actions=True) # Load the checkpoint policy.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/2500_policy.pt") # [end-checkpoint-load-model-torch] # [start-checkpoint-load-model-jax] from skrl.models.jax import Model, DeterministicMixin # Define the model class Policy(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device=None, clip_actions=False, **kwargs): Model.__init__(self, observation_space, action_space, device, **kwargs) DeterministicMixin.__init__(self, clip_actions) @nn.compact # marks the given module method allowing inlined submodules def __call__(self, inputs, role): x = nn.Dense(32)(inputs["states"]) x = nn.relu(x) x = nn.Dense(32)(x) x = nn.relu(x) x = nn.Dense(self.num_actions)(x) return x, {} # Instantiate the model policy = Policy(env.observation_space, env.action_space, env.device, clip_actions=True) # Load the checkpoint policy.load("./runs/22-09-29_22-48-49-816281_DDPG/checkpoints/2500_policy.pickle") # [end-checkpoint-load-model-jax] # [start-checkpoint-load-huggingface-torch] from skrl.agents.torch.ppo import PPO from skrl.utils.huggingface import download_model_from_huggingface # Instantiate the agent agent = PPO(models=models, # models dict memory=memory, # memory instance, or None if not required cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.) observation_space=env.observation_space, action_space=env.action_space, device=env.device) # Load the checkpoint from Hugging Face Hub path = download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO", filename="agent.pt") agent.load(path) # [end-checkpoint-load-huggingface-torch] # [start-checkpoint-load-huggingface-jax] from skrl.agents.jax.ppo import PPO from skrl.utils.huggingface import download_model_from_huggingface # Instantiate the agent agent = PPO(models=models, # models dict memory=memory, # memory instance, or None if not required cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.) observation_space=env.observation_space, action_space=env.action_space, device=env.device) # Load the checkpoint from Hugging Face Hub path = download_model_from_huggingface("skrl/OmniIsaacGymEnvs-Cartpole-PPO", filename="agent.pickle") agent.load(path) # [end-checkpoint-load-huggingface-jax] # [start-checkpoint-migrate-agent-torch] from skrl.agents.torch.ppo import PPO # Instantiate the agent agent = PPO(models=models, # models dict memory=memory, # memory instance, or None if not required cfg=agent_cfg, # configuration dict (preprocessors, learning rate schedulers, etc.) observation_space=env.observation_space, action_space=env.action_space, device=env.device) # Migrate a rl_games checkpoint agent.migrate(path="./runs/Cartpole/nn/Cartpole.pth") # [end-checkpoint-migrate-agent-torch] # [start-checkpoint-migrate-model-torch] from skrl.models.torch import Model, DeterministicMixin # Define the model class Policy(DeterministicMixin, Model): def __init__(self, observation_space, action_space, device, clip_actions=False): Model.__init__(self, observation_space, action_space, device) DeterministicMixin.__init__(self, clip_actions) self.net = nn.Sequential(nn.Linear(self.num_observations, 32), nn.ReLU(), nn.Linear(32, 32), nn.ReLU(), nn.Linear(32, self.num_actions)) def compute(self, inputs, role): return self.net(inputs["states"]), {} # Instantiate the model policy = Policy(env.observation_space, env.action_space, env.device, clip_actions=True) # Migrate a rl_games checkpoint (only the model) policy.migrate(path="./runs/Cartpole/nn/Cartpole.pth") # or migrate a stable-baselines3 checkpoint policy.migrate(path="./ddpg_pendulum.zip") # or migrate a checkpoint of any other library state_dict = torch.load("./external_model.pt") policy.migrate(state_dict=state_dict) # [end-checkpoint-migrate-model-torch] # [start-export-memory-torch] from skrl.memories.torch import RandomMemory # Instantiate a memory and enable its export memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device, export=True, export_format="pt", export_directory="./memories") # [end-export-memory-torch] # [start-export-memory-jax] from skrl.memories.jax import RandomMemory # Instantiate a memory and enable its export memory = RandomMemory(memory_size=16, num_envs=env.num_envs, device=device, export=True, export_format="np", export_directory="./memories") # [end-export-memory-jax]
9,058
Python
35.091633
101
0.643851
Toni-SM/skrl/docs/source/snippets/isaacgym_utils.py
import math from isaacgym import gymapi from skrl.utils import isaacgym_utils # create a web viewer instance web_viewer = isaacgym_utils.WebViewer() # configure and create simulation sim_params = gymapi.SimParams() sim_params.up_axis = gymapi.UP_AXIS_Z sim_params.gravity = gymapi.Vec3(0.0, 0.0, -9.8) sim_params.physx.solver_type = 1 sim_params.physx.num_position_iterations = 4 sim_params.physx.num_velocity_iterations = 1 sim_params.physx.use_gpu = True sim_params.use_gpu_pipeline = True gym = gymapi.acquire_gym() sim = gym.create_sim(compute_device=0, graphics_device=0, type=gymapi.SIM_PHYSX, params=sim_params) # setup num_envs and env's grid num_envs = 1 spacing = 2.0 env_lower = gymapi.Vec3(-spacing, -spacing, 0.0) env_upper = gymapi.Vec3(spacing, 0.0, spacing) # add ground plane plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) gym.add_ground(sim, plane_params) envs = [] cameras = [] for i in range(num_envs): # create env env = gym.create_env(sim, env_lower, env_upper, int(math.sqrt(num_envs))) # add sphere pose = gymapi.Transform() pose.p, pose.r = gymapi.Vec3(0.0, 0.0, 1.0), gymapi.Quat(0.0, 0.0, 0.0, 1.0) gym.create_actor(env, gym.create_sphere(sim, 0.2, None), pose, "sphere", i, 0) # add camera cam_props = gymapi.CameraProperties() cam_props.width, cam_props.height = 300, 300 cam_handle = gym.create_camera_sensor(env, cam_props) gym.set_camera_location(cam_handle, env, gymapi.Vec3(1, 1, 1), gymapi.Vec3(0, 0, 0)) envs.append(env) cameras.append(cam_handle) # setup web viewer web_viewer.setup(gym, sim, envs, cameras) gym.prepare_sim(sim) for i in range(100000): gym.simulate(sim) # render the scene web_viewer.render(fetch_results=True, step_graphics=True, render_all_camera_sensors=True, wait_for_page_load=True)
1,927
Python
26.942029
99
0.677218
Toni-SM/skrl/docs/source/snippets/loaders.py
# [start-omniverse-isaac-gym-envs-parameters-torch] # import the environment loader from skrl.envs.loaders.torch import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env(task_name="Cartpole") # [end-omniverse-isaac-gym-envs-parameters-torch] # [start-omniverse-isaac-gym-envs-parameters-jax] # import the environment loader from skrl.envs.loaders.jax import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env(task_name="Cartpole") # [end-omniverse-isaac-gym-envs-parameters-jax] # [start-omniverse-isaac-gym-envs-cli-torch] # import the environment loader from skrl.envs.loaders.torch import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env() # [end-omniverse-isaac-gym-envs-cli-torch] # [start-omniverse-isaac-gym-envs-cli-jax] # import the environment loader from skrl.envs.loaders.jax import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env() # [end-omniverse-isaac-gym-envs-cli-jax] # [start-omniverse-isaac-gym-envs-multi-threaded-parameters-torch] import threading # import the environment loader from skrl.envs.loaders.torch import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30) # ... # start training in a separate thread threading.Thread(target=trainer.train).start() # run the simulation in the main thread env.run() # [end-omniverse-isaac-gym-envs-multi-threaded-parameters-torch] # [start-omniverse-isaac-gym-envs-multi-threaded-parameters-jax] import threading # import the environment loader from skrl.envs.loaders.jax import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30) # ... # start training in a separate thread threading.Thread(target=trainer.train).start() # run the simulation in the main thread env.run() # [end-omniverse-isaac-gym-envs-multi-threaded-parameters-jax] # [start-omniverse-isaac-gym-envs-multi-threaded-cli-torch] import threading # import the environment loader from skrl.envs.loaders.torch import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env(multi_threaded=True, timeout=30) # ... # start training in a separate thread threading.Thread(target=trainer.train).start() # run the simulation in the main thread env.run() # [end-omniverse-isaac-gym-envs-multi-threaded-cli-torch] # [start-omniverse-isaac-gym-envs-multi-threaded-cli-jax] import threading # import the environment loader from skrl.envs.loaders.jax import load_omniverse_isaacgym_env # load environment env = load_omniverse_isaacgym_env(multi_threaded=True, timeout=30) # ... # start training in a separate thread threading.Thread(target=trainer.train).start() # run the simulation in the main thread env.run() # [end-omniverse-isaac-gym-envs-multi-threaded-cli-jax] # ============================================================================= # [start-isaac-orbit-envs-parameters-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaac_orbit_env # load environment env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0") # [end-isaac-orbit-envs-parameters-torch] # [start-isaac-orbit-envs-parameters-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaac_orbit_env # load environment env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0") # [end-isaac-orbit-envs-parameters-jax] # [start-isaac-orbit-envs-cli-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaac_orbit_env # load environment env = load_isaac_orbit_env() # [end-isaac-orbit-envs-cli-torch] # [start-isaac-orbit-envs-cli-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaac_orbit_env # load environment env = load_isaac_orbit_env() # [end-isaac-orbit-envs-cli-jax] # ============================================================================= # [start-isaac-gym-envs-preview-4-api] import isaacgymenvs env = isaacgymenvs.make(seed=0, task="Cartpole", num_envs=2000, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=False) # [end-isaac-gym-envs-preview-4-api] # [start-isaac-gym-envs-preview-4-parameters-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaacgym_env_preview4 # load environment env = load_isaacgym_env_preview4(task_name="Cartpole") # [end-isaac-gym-envs-preview-4-parameters-torch] # [start-isaac-gym-envs-preview-4-parameters-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaacgym_env_preview4 # load environment env = load_isaacgym_env_preview4(task_name="Cartpole") # [end-isaac-gym-envs-preview-4-parameters-jax] # [start-isaac-gym-envs-preview-4-cli-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaacgym_env_preview4 # load environment env = load_isaacgym_env_preview4() # [end-isaac-gym-envs-preview-4-cli-torch] # [start-isaac-gym-envs-preview-4-cli-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaacgym_env_preview4 # load environment env = load_isaacgym_env_preview4() # [end-isaac-gym-envs-preview-4-cli-jax] # [start-isaac-gym-envs-preview-3-parameters-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaacgym_env_preview3 # load environment env = load_isaacgym_env_preview3(task_name="Cartpole") # [end-isaac-gym-envs-preview-3-parameters-torch] # [start-isaac-gym-envs-preview-3-parameters-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaacgym_env_preview3 # load environment env = load_isaacgym_env_preview3(task_name="Cartpole") # [end-isaac-gym-envs-preview-3-parameters-jax] # [start-isaac-gym-envs-preview-3-cli-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaacgym_env_preview3 # load environment env = load_isaacgym_env_preview3() # [end-isaac-gym-envs-preview-3-cli-torch] # [start-isaac-gym-envs-preview-3-cli-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaacgym_env_preview3 # load environment env = load_isaacgym_env_preview3() # [end-isaac-gym-envs-preview-3-cli-jax] # [start-isaac-gym-envs-preview-2-parameters-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaacgym_env_preview2 # load environment env = load_isaacgym_env_preview2(task_name="Cartpole") # [end-isaac-gym-envs-preview-2-parameters-torch] # [start-isaac-gym-envs-preview-2-parameters-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaacgym_env_preview2 # load environment env = load_isaacgym_env_preview2(task_name="Cartpole") # [end-isaac-gym-envs-preview-2-parameters-jax] # [start-isaac-gym-envs-preview-2-cli-torch] # import the environment loader from skrl.envs.loaders.torch import load_isaacgym_env_preview2 # load environment env = load_isaacgym_env_preview2() # [end-isaac-gym-envs-preview-2-cli-torch] # [start-isaac-gym-envs-preview-2-cli-jax] # import the environment loader from skrl.envs.loaders.jax import load_isaacgym_env_preview2 # load environment env = load_isaacgym_env_preview2() # [end-isaac-gym-envs-preview-2-cli-jax]
7,458
Python
26.625926
88
0.739877
Toni-SM/skrl/docs/source/snippets/agent.py
# [start-agent-base-class-torch] from typing import Union, Tuple, Dict, Any, Optional import gym, gymnasium import copy import torch from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.agents.torch import Agent CUSTOM_DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } class CUSTOM(Agent): def __init__(self, models: Dict[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Custom agent :param models: Models used by the agent :type models: dictionary of skrl.models.torch.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.torch.Memory, list of skrl.memory.torch.Memory or None :param observation_space: Observation/state space or shape (default: None) :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None) :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a torch tensor is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ _cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # ======================================================================= # - get and process models from `self.models` # - populate `self.checkpoint_modules` dictionary for storing checkpoints # - parse configurations from `self.cfg` # - setup optimizers and learning rate scheduler # - set up preprocessors # ======================================================================= def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # ================================================================= # - create tensors in memory if required # - # create temporary variables needed for storage and computation # ================================================================= def act(self, states: torch.Tensor, timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # ====================================== # - sample random actions if required or # sample and return agent's actions # ====================================== def record_transition(self, states: torch.Tensor, actions: torch.Tensor, rewards: torch.Tensor, next_states: torch.Tensor, terminated: torch.Tensor, truncated: torch.Tensor, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: torch.Tensor :param actions: Actions taken by the agent :type actions: torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: torch.Tensor :param next_states: Next observations/states of the environment :type next_states: torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: torch.Tensor :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # ======================================== # - record agent's specific data in memory # ======================================== def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== # call parent's method for checkpointing and TensorBoard writing super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # =================================================== # - implement algorithm's update step # - record tracking data using `self.track_data(...)` # =================================================== # [end-agent-base-class-torch] # [start-agent-base-class-jax] from typing import Union, Tuple, Dict, Any, Optional import gym, gymnasium import copy import jaxlib import jax.numpy as jnp from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam from skrl.agents.jax import Agent CUSTOM_DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } class CUSTOM(Agent): def __init__(self, models: Dict[str, Model], memory: Optional[Union[Memory, Tuple[Memory]]] = None, observation_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, action_space: Optional[Union[int, Tuple[int], gym.Space, gymnasium.Space]] = None, device: Optional[Union[str, jaxlib.xla_extension.Device]] = None, cfg: Optional[dict] = None) -> None: """Custom agent :param models: Models used by the agent :type models: dictionary of skrl.models.jax.Model :param memory: Memory to storage the transitions. If it is a tuple, the first element will be used for training and for the rest only the environment transitions will be added :type memory: skrl.memory.jax.Memory, list of skrl.memory.jax.Memory or None :param observation_space: Observation/state space or shape (default: None) :type observation_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param action_space: Action space or shape (default: None) :type action_space: int, tuple or list of integers, gym.Space, gymnasium.Space or None, optional :param device: Device on which a jax array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or jaxlib.xla_extension.Device, optional :param cfg: Configuration dictionary :type cfg: dict """ _cfg = CUSTOM_DEFAULT_CONFIG _cfg.update(cfg if cfg is not None else {}) super().__init__(models=models, memory=memory, observation_space=observation_space, action_space=action_space, device=device, cfg=_cfg) # ======================================================================= # - get and process models from `self.models` # - populate `self.checkpoint_modules` dictionary for storing checkpoints # - parse configurations from `self.cfg` # - setup optimizers and learning rate scheduler # - set up preprocessors # ======================================================================= def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # ================================================================= # - create tensors in memory if required # - # create temporary variables needed for storage and computation # - set up models for just-in-time compilation with XLA # ================================================================= def act(self, states: jnp.ndarray, timestep: int, timesteps: int) -> jnp.ndarray: """Process the environment's states to make a decision (actions) using the main policy :param states: Environment's states :type states: jnp.ndarray :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: jnp.ndarray """ # ====================================== # - sample random actions if required or # sample and return agent's actions # ====================================== def record_transition(self, states: jnp.ndarray, actions: jnp.ndarray, rewards: jnp.ndarray, next_states: jnp.ndarray, terminated: jnp.ndarray, truncated: jnp.ndarray, infos: Any, timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: jnp.ndarray :param actions: Actions taken by the agent :type actions: jnp.ndarray :param rewards: Instant rewards achieved by the current actions :type rewards: jnp.ndarray :param next_states: Next observations/states of the environment :type next_states: jnp.ndarray :param terminated: Signals to indicate that episodes have terminated :type terminated: jnp.ndarray :param truncated: Signals to indicate that episodes have been truncated :type truncated: jnp.ndarray :param infos: Additional information about the environment :type infos: Any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # ======================================== # - record agent's specific data in memory # ======================================== def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== # call parent's method for checkpointing and TensorBoard writing super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # =================================================== # - implement algorithm's update step # - record tracking data using `self.track_data(...)` # =================================================== # [end-agent-base-class-jax]
15,562
Python
42.472067
123
0.543182
Toni-SM/skrl/docs/source/snippets/tabular_model.py
# [start-definition-torch] class TabularModel(TabularMixin, Model): def __init__(self, observation_space, action_space, device=None, num_envs=1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) # [end-definition-torch] # ============================================================================= # [start-epsilon-greedy-torch] import torch from skrl.models.torch import Model, TabularMixin # define the model class EpilonGreedyPolicy(TabularMixin, Model): def __init__(self, observation_space, action_space, device, num_envs=1, epsilon=0.1): Model.__init__(self, observation_space, action_space, device) TabularMixin.__init__(self, num_envs) self.epsilon = epsilon self.q_table = torch.ones((num_envs, self.num_observations, self.num_actions), dtype=torch.float32) def compute(self, inputs, role): states = inputs["states"] actions = torch.argmax(self.q_table[torch.arange(self.num_envs).view(-1, 1), states], dim=-1, keepdim=True).view(-1,1) indexes = (torch.rand(states.shape[0], device=self.device) < self.epsilon).nonzero().view(-1) if indexes.numel(): actions[indexes] = torch.randint(self.num_actions, (indexes.numel(), 1), device=self.device) return actions, {} # instantiate the model (assumes there is a wrapped environment: env) policy = EpilonGreedyPolicy(observation_space=env.observation_space, action_space=env.action_space, device=env.device, num_envs=env.num_envs, epsilon=0.15) # [end-epsilon-greedy-torch]
1,744
Python
39.581394
107
0.601491
Toni-SM/skrl/docs/source/snippets/multi_agent.py
# [start-multi-agent-base-class-torch] from typing import Union, Dict, Any, Optional, Sequence, Mapping import gym, gymnasium import copy import torch from skrl.memories.torch import Memory from skrl.models.torch import Model from skrl.multi_agents.torch import MultiAgent CUSTOM_DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } class CUSTOM(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Dict[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, torch.device]] = None, cfg: Optional[dict] = None) -> None: """Custom multi-agent :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a torch tensor is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or torch.device, optional :param cfg: Configuration dictionary :type cfg: dict """ _cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) # ======================================================================= # - get and process models from `self.models` # - populate `self.checkpoint_modules` dictionary for storing checkpoints # - parse configurations from `self.cfg` # - setup optimizers and learning rate scheduler # - set up preprocessors # ======================================================================= def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # ================================================================= # - create tensors in memory if required # - # create temporary variables needed for storage and computation # ================================================================= def act(self, states: Mapping[str, torch.Tensor], timestep: int, timesteps: int) -> torch.Tensor: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of torch.Tensor :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: torch.Tensor """ # ====================================== # - sample random actions if required or # sample and return agent's actions # ====================================== def record_transition(self, states: Mapping[str, torch.Tensor], actions: Mapping[str, torch.Tensor], rewards: Mapping[str, torch.Tensor], next_states: Mapping[str, torch.Tensor], terminated: Mapping[str, torch.Tensor], truncated: Mapping[str, torch.Tensor], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of torch.Tensor :param actions: Actions taken by the agent :type actions: dictionary of torch.Tensor :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of torch.Tensor :param next_states: Next observations/states of the environment :type next_states: dictionary of torch.Tensor :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of torch.Tensor :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of torch.Tensor :param infos: Additional information about the environment :type infos: dictionary of any supported type :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # ======================================== # - record agent's specific data in memory # ======================================== def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== # call parent's method for checkpointing and TensorBoard writing super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # =================================================== # - implement algorithm's update step # - record tracking data using `self.track_data(...)` # =================================================== # [end-multi-agent-base-class-torch] # [start-multi-agent-base-class-jax] from typing import Union, Dict, Any, Optional, Sequence, Mapping import gym, gymnasium import copy import jaxlib import jax.numpy as jnp from skrl.memories.jax import Memory from skrl.models.jax import Model from skrl.resources.optimizers.jax import Adam from skrl.multi_agents.jax import MultiAgent CUSTOM_DEFAULT_CONFIG = { # ... "experiment": { "directory": "", # experiment's parent directory "experiment_name": "", # experiment name "write_interval": 250, # TensorBoard writing interval (timesteps) "checkpoint_interval": 1000, # interval for checkpoints (timesteps) "store_separately": False, # whether to store checkpoints separately "wandb": False, # whether to use Weights & Biases "wandb_kwargs": {} # wandb kwargs (see https://docs.wandb.ai/ref/python/init) } } class CUSTOM(MultiAgent): def __init__(self, possible_agents: Sequence[str], models: Dict[str, Model], memories: Optional[Mapping[str, Memory]] = None, observation_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, action_spaces: Optional[Union[Mapping[str, int], Mapping[str, gym.Space], Mapping[str, gymnasium.Space]]] = None, device: Optional[Union[str, jaxlib.xla_extension.Device]] = None, cfg: Optional[dict] = None) -> None: """Custom multi-agent :param possible_agents: Name of all possible agents the environment could generate :type possible_agents: list of str :param models: Models used by the agents. External keys are environment agents' names. Internal keys are the models required by the algorithm :type models: nested dictionary of skrl.models.torch.Model :param memories: Memories to storage the transitions. :type memories: dictionary of skrl.memory.torch.Memory, optional :param observation_spaces: Observation/state spaces or shapes (default: ``None``) :type observation_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param action_spaces: Action spaces or shapes (default: ``None``) :type action_spaces: dictionary of int, sequence of int, gym.Space or gymnasium.Space, optional :param device: Device on which a jax array is or will be allocated (default: ``None``). If None, the device will be either ``"cuda:0"`` if available or ``"cpu"`` :type device: str or jaxlib.xla_extension.Device, optional :param cfg: Configuration dictionary :type cfg: dict """ _cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) super().__init__(possible_agents=possible_agents, models=models, memories=memories, observation_spaces=observation_spaces, action_spaces=action_spaces, device=device, cfg=_cfg) # ======================================================================= # - get and process models from `self.models` # - populate `self.checkpoint_modules` dictionary for storing checkpoints # - parse configurations from `self.cfg` # - setup optimizers and learning rate scheduler # - set up preprocessors # ======================================================================= def init(self, trainer_cfg: Optional[Dict[str, Any]] = None) -> None: """Initialize the agent """ super().init(trainer_cfg=trainer_cfg) self.set_mode("eval") # ================================================================= # - create tensors in memory if required # - # create temporary variables needed for storage and computation # ================================================================= def act(self, states: Mapping[str, jnp.ndarray], timestep: int, timesteps: int) -> jnp.ndarray: """Process the environment's states to make a decision (actions) using the main policies :param states: Environment's states :type states: dictionary of jnp.ndarray :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int :return: Actions :rtype: jnp.ndarray """ # ====================================== # - sample random actions if required or # sample and return agent's actions # ====================================== def record_transition(self, states: Mapping[str, jnp.ndarray], actions: Mapping[str, jnp.ndarray], rewards: Mapping[str, jnp.ndarray], next_states: Mapping[str, jnp.ndarray], terminated: Mapping[str, jnp.ndarray], truncated: Mapping[str, jnp.ndarray], infos: Mapping[str, Any], timestep: int, timesteps: int) -> None: """Record an environment transition in memory :param states: Observations/states of the environment used to make the decision :type states: dictionary of jnp.ndarray :param actions: Actions taken by the agent :type actions: dictionary of jnp.ndarray :param rewards: Instant rewards achieved by the current actions :type rewards: dictionary of jnp.ndarray :param next_states: Next observations/states of the environment :type next_states: dictionary of jnp.ndarray :param terminated: Signals to indicate that episodes have terminated :type terminated: dictionary of jnp.ndarray :param truncated: Signals to indicate that episodes have been truncated :type truncated: dictionary of jnp.ndarray :param infos: Additional information about the environment :type infos: dictionary of any type supported by the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ super().record_transition(states, actions, rewards, next_states, terminated, truncated, infos, timestep, timesteps) # ======================================== # - record agent's specific data in memory # ======================================== def pre_interaction(self, timestep: int, timesteps: int) -> None: """Callback called before the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== def post_interaction(self, timestep: int, timesteps: int) -> None: """Callback called after the interaction with the environment :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # ===================================== # - call `self.update(...)` if required # ===================================== # call parent's method for checkpointing and TensorBoard writing super().post_interaction(timestep, timesteps) def _update(self, timestep: int, timesteps: int) -> None: """Algorithm's main update step :param timestep: Current timestep :type timestep: int :param timesteps: Number of timesteps :type timesteps: int """ # =================================================== # - implement algorithm's update step # - record tracking data using `self.track_data(...)` # =================================================== # [end-multi-agent-base-class-jax]
16,574
Python
44.661157
135
0.556715
Toni-SM/skrl/docs/source/snippets/wrapping.py
# [pytorch-start-omniverse-isaacgym] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_omniverse_isaacgym_env # load the environment env = load_omniverse_isaacgym_env(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")' # [pytorch-end-omniverse-isaacgym] # [jax-start-omniverse-isaacgym] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_omniverse_isaacgym_env # load the environment env = load_omniverse_isaacgym_env(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")' # [jax-end-omniverse-isaacgym] # [pytorch-start-omniverse-isaacgym-mt] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_omniverse_isaacgym_env # load the multi-threaded environment env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")' # [pytorch-end-omniverse-isaacgym-mt] # [jax-start-omniverse-isaacgym-mt] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_omniverse_isaacgym_env # load the multi-threaded environment env = load_omniverse_isaacgym_env(task_name="Cartpole", multi_threaded=True, timeout=30) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="omniverse-isaacgym")' # [jax-end-omniverse-isaacgym-mt] # [pytorch-start-isaac-orbit] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_isaac_orbit_env # load the environment env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaac-orbit")' # [pytorch-end-isaac-orbit] # [jax-start-isaac-orbit] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_isaac_orbit_env # load the environment env = load_isaac_orbit_env(task_name="Isaac-Cartpole-v0") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaac-orbit")' # [jax-end-isaac-orbit] # [pytorch-start-isaacgym-preview4-make] import isaacgymenvs # import the environment wrapper from skrl.envs.wrappers.torch import wrap_env # create/load the environment using the easy-to-use API from NVIDIA env = isaacgymenvs.make(seed=0, task="Cartpole", num_envs=512, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=False) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")' # [pytorch-end-isaacgym-preview4-make] # [jax-start-isaacgym-preview4-make] import isaacgymenvs # import the environment wrapper from skrl.envs.wrappers.jax import wrap_env # create/load the environment using the easy-to-use API from NVIDIA env = isaacgymenvs.make(seed=0, task="Cartpole", num_envs=512, sim_device="cuda:0", rl_device="cuda:0", graphics_device_id=0, headless=False) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")' # [jax-end-isaacgym-preview4-make] # [pytorch-start-isaacgym-preview4] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_isaacgym_env_preview4 # load the environment env = load_isaacgym_env_preview4(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")' # [pytorch-end-isaacgym-preview4] # [jax-start-isaacgym-preview4] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_isaacgym_env_preview4 # load the environment env = load_isaacgym_env_preview4(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview4")' # [jax-end-isaacgym-preview4] # [pytorch-start-isaacgym-preview3] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_isaacgym_env_preview3 # load the environment env = load_isaacgym_env_preview3(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview3")' # [pytorch-end-isaacgym-preview3] # [jax-start-isaacgym-preview3] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_isaacgym_env_preview3 # load the environment env = load_isaacgym_env_preview3(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview3")' # [jax-end-isaacgym-preview3] # [pytorch-start-isaacgym-preview2] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_isaacgym_env_preview2 # load the environment env = load_isaacgym_env_preview2(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview2")' # [pytorch-end-isaacgym-preview2] # [jax-start-isaacgym-preview2] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_isaacgym_env_preview2 # load the environment env = load_isaacgym_env_preview2(task_name="Cartpole") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="isaacgym-preview2")' # [jax-end-isaacgym-preview2] # [pytorch-start-gym] # import the environment wrapper and gym from skrl.envs.wrappers.torch import wrap_env import gym # load the environment env = gym.make('Pendulum-v1') # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")' # [pytorch-end-gym] # [jax-start-gym] # import the environment wrapper and gym from skrl.envs.wrappers.jax import wrap_env import gym # load the environment env = gym.make('Pendulum-v1') # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")' # [jax-end-gym] # [pytorch-start-gym-vectorized] # import the environment wrapper and gym from skrl.envs.wrappers.torch import wrap_env import gym # load a vectorized environment env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")' # [pytorch-end-gym-vectorized] # [jax-start-gym-vectorized] # import the environment wrapper and gym from skrl.envs.wrappers.jax import wrap_env import gym # load a vectorized environment env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gym")' # [jax-end-gym-vectorized] # [pytorch-start-gymnasium] # import the environment wrapper and gymnasium from skrl.envs.wrappers.torch import wrap_env import gymnasium as gym # load the environment env = gym.make('Pendulum-v1') # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")' # [pytorch-end-gymnasium] # [jax-start-gymnasium] # import the environment wrapper and gymnasium from skrl.envs.wrappers.jax import wrap_env import gymnasium as gym # load the environment env = gym.make('Pendulum-v1') # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")' # [jax-end-gymnasium] # [pytorch-start-gymnasium-vectorized] # import the environment wrapper and gymnasium from skrl.envs.wrappers.torch import wrap_env import gymnasium as gym # load a vectorized environment env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")' # [pytorch-end-gymnasium-vectorized] # [jax-start-gymnasium-vectorized] # import the environment wrapper and gymnasium from skrl.envs.wrappers.jax import wrap_env import gymnasium as gym # load a vectorized environment env = gym.vector.make("Pendulum-v1", num_envs=10, asynchronous=False) # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")' # [jax-end-gymnasium-vectorized] # [pytorch-start-shimmy] # import the environment wrapper and gymnasium from skrl.envs.wrappers.torch import wrap_env import gymnasium as gym # load the environment (API conversion) env = gym.make("ALE/Pong-v5") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")' # [pytorch-end-shimmy] # [jax-start-shimmy] # import the environment wrapper and gymnasium from skrl.envs.wrappers.jax import wrap_env import gymnasium as gym # load the environment (API conversion) env = gym.make("ALE/Pong-v5") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="gymnasium")' # [jax-end-shimmy] # [pytorch-start-deepmind] # import the environment wrapper and the deepmind suite from skrl.envs.wrappers.torch import wrap_env from dm_control import suite # load the environment env = suite.load(domain_name="cartpole", task_name="swingup") # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="dm")' # [pytorch-end-deepmind] # [pytorch-start-robosuite] # import the environment wrapper from skrl.envs.wrappers.torch import wrap_env # import the robosuite wrapper import robosuite from robosuite.controllers import load_controller_config # load the environment controller_config = load_controller_config(default_controller="OSC_POSE") env = robosuite.make("TwoArmLift", robots=["Sawyer", "Panda"], # load a Sawyer robot and a Panda robot gripper_types="default", # use default grippers per robot arm controller_configs=controller_config, # each arm is controlled using OSC env_configuration="single-arm-opposed", # (two-arm envs only) arms face each other has_renderer=True, # on-screen rendering render_camera="frontview", # visualize the "frontview" camera has_offscreen_renderer=False, # no off-screen rendering control_freq=20, # 20 hz control for applied actions horizon=200, # each episode terminates after 200 steps use_object_obs=True, # provide object observations to agent use_camera_obs=False, # don't provide image observations to agent reward_shaping=True) # use a dense reward signal for learning # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="robosuite")' # [pytorch-end-robosuite] # [start-bidexhands-torch] # import the environment wrapper and loader from skrl.envs.wrappers.torch import wrap_env from skrl.envs.loaders.torch import load_bidexhands_env # load the environment env = load_bidexhands_env(task_name="ShadowHandOver") # wrap the environment env = wrap_env(env, wrapper="bidexhands") # [end-bidexhands-torch] # [start-bidexhands-jax] # import the environment wrapper and loader from skrl.envs.wrappers.jax import wrap_env from skrl.envs.loaders.jax import load_bidexhands_env # load the environment env = load_bidexhands_env(task_name="ShadowHandOver") # wrap the environment env = wrap_env(env, wrapper="bidexhands") # [end-bidexhands-jax] # [start-pettingzoo-torch] # import the environment wrapper from skrl.envs.wrappers.torch import wrap_env # import a PettingZoo environment from pettingzoo.sisl import multiwalker_v9 # load the environment env = multiwalker_v9.parallel_env() # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="pettingzoo")' # [end-pettingzoo-torch] # [start-pettingzoo-jax] # import the environment wrapper from skrl.envs.wrappers.jax import wrap_env # import a PettingZoo environment from pettingzoo.sisl import multiwalker_v9 # load the environment env = multiwalker_v9.parallel_env() # wrap the environment env = wrap_env(env) # or 'env = wrap_env(env, wrapper="pettingzoo")' # [end-pettingzoo-jax]
12,845
Python
29.440758
104
0.712028
Toni-SM/skrl/docs/source/snippets/trainer.py
# [pytorch-start-base] from typing import Union, List, Optional import copy from skrl.envs.wrappers.torch import Wrapper from skrl.agents.torch import Agent from skrl.trainers.torch import Trainer CUSTOM_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } class CustomTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent], List[List[Agent]]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """ :param env: Environment to train on :type env: skrl.envs.wrappers.torch.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: []) :type agents_scope: tuple or list of integers :param cfg: Configuration dictionary :type cfg: dict, optional """ _cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # ================================ # - init agents # ================================ def train(self) -> None: """Train the agents """ # ================================ # - run training loop # + call agents.pre_interaction(...) # + compute actions using agents.act(...) # + step environment using env.step(...) # + render scene using env.render(...) # + record environment transition in memory using agents.record_transition(...) # + call agents.post_interaction(...) # + reset environment using env.reset(...) # ================================ def eval(self) -> None: """Evaluate the agents """ # ================================ # - run evaluation loop # + compute actions using agents.act(...) # + step environment using env.step(...) # + render scene using env.render(...) # + call agents.post_interaction(...) parent method to write data to TensorBoard # + reset environment using env.reset(...) # ================================ # [pytorch-end-base] # [jax-start-base] from typing import Union, List, Optional import copy from skrl.envs.wrappers.jax import Wrapper from skrl.agents.jax import Agent from skrl.trainers.jax import Trainer CUSTOM_DEFAULT_CONFIG = { "timesteps": 100000, # number of timesteps to train for "headless": False, # whether to use headless mode (no rendering) "disable_progressbar": False, # whether to disable the progressbar. If None, disable on non-TTY "close_environment_at_exit": True, # whether to close the environment on normal program termination } class CustomTrainer(Trainer): def __init__(self, env: Wrapper, agents: Union[Agent, List[Agent], List[List[Agent]]], agents_scope: Optional[List[int]] = None, cfg: Optional[dict] = None) -> None: """ :param env: Environment to train on :type env: skrl.envs.wrappers.jax.Wrapper :param agents: Agents to train :type agents: Union[Agent, List[Agent]] :param agents_scope: Number of environments for each agent to train on (default: []) :type agents_scope: tuple or list of integers :param cfg: Configuration dictionary :type cfg: dict, optional """ _cfg = copy.deepcopy(CUSTOM_DEFAULT_CONFIG) _cfg.update(cfg if cfg is not None else {}) agents_scope = agents_scope if agents_scope is not None else [] super().__init__(env=env, agents=agents, agents_scope=agents_scope, cfg=_cfg) # ================================ # - init agents # ================================ def train(self) -> None: """Train the agents """ # ================================ # - run training loop # + call agents.pre_interaction(...) # + compute actions using agents.act(...) # + step environment using env.step(...) # + render scene using env.render(...) # + record environment transition in memory using agents.record_transition(...) # + call agents.post_interaction(...) # + reset environment using env.reset(...) # ================================ def eval(self) -> None: """Evaluate the agents """ # ================================ # - run evaluation loop # + compute actions using agents.act(...) # + step environment using env.step(...) # + render scene using env.render(...) # + call agents.post_interaction(...) parent method to write data to TensorBoard # + reset environment using env.reset(...) # ================================ # [jax-end-base] # ============================================================================= # [pytorch-start-sequential] from skrl.trainers.torch import SequentialTrainer # assuming there is an environment called 'env' # and an agent or a list of agents called 'agents' # create a sequential trainer cfg = {"timesteps": 50000, "headless": False} trainer = SequentialTrainer(env=env, agents=agents, cfg=cfg) # train the agent(s) trainer.train() # evaluate the agent(s) trainer.eval() # [pytorch-end-sequential] # [jax-start-sequential] from skrl.trainers.jax import SequentialTrainer # assuming there is an environment called 'env' # and an agent or a list of agents called 'agents' # create a sequential trainer cfg = {"timesteps": 50000, "headless": False} trainer = SequentialTrainer(env=env, agents=agents, cfg=cfg) # train the agent(s) trainer.train() # evaluate the agent(s) trainer.eval() # [jax-end-sequential] # ============================================================================= # [pytorch-start-parallel] from skrl.trainers.torch import ParallelTrainer # assuming there is an environment called 'env' # and an agent or a list of agents called 'agents' # create a sequential trainer cfg = {"timesteps": 50000, "headless": False} trainer = ParallelTrainer(env=env, agents=agents, cfg=cfg) # train the agent(s) trainer.train() # evaluate the agent(s) trainer.eval() # [pytorch-end-parallel] # ============================================================================= # [pytorch-start-step] from skrl.trainers.torch import StepTrainer # assuming there is an environment called 'env' # and an agent or a list of agents called 'agents' # create a sequential trainer cfg = {"timesteps": 50000, "headless": False} trainer = StepTrainer(env=env, agents=agents, cfg=cfg) # train the agent(s) for timestep in range(cfg["timesteps"]): trainer.train(timestep=timestep) # evaluate the agent(s) for timestep in range(cfg["timesteps"]): trainer.eval(timestep=timestep) # [pytorch-end-step] # [jax-start-step] from skrl.trainers.jax import StepTrainer # assuming there is an environment called 'env' # and an agent or a list of agents called 'agents' # create a sequential trainer cfg = {"timesteps": 50000, "headless": False} trainer = StepTrainer(env=env, agents=agents, cfg=cfg) # train the agent(s) for timestep in range(cfg["timesteps"]): trainer.train(timestep=timestep) # evaluate the agent(s) for timestep in range(cfg["timesteps"]): trainer.eval(timestep=timestep) # [jax-end-step] # ============================================================================= # [pytorch-start-manual-training] # [pytorch-end-manual-training] # [pytorch-start-manual-evaluation] # assuming there is an environment named 'env' # and an agent named 'agents' (or a state-preprocessor and a policy) states, infos = env.reset() for i in range(1000): # state-preprocessor + policy with torch.no_grad(): states = state_preprocessor(states) actions = policy.act({"states": states})[0] # step the environment next_states, rewards, terminated, truncated, infos = env.step(actions) # render the environment env.render() # check for termination/truncation if terminated.any() or truncated.any(): states, infos = env.reset() else: states = next_states # [pytorch-end-manual-evaluation] # [jax-start-manual-training] # [jax-end-manual-training] # [jax-start-manual-evaluation] # [jax-end-manual-evaluation]
8,996
Python
31.132143
105
0.587372
Toni-SM/skrl/docs/source/api/resources.rst
Resources ========= .. toctree:: :hidden: Noises <resources/noises> Preprocessors <resources/preprocessors> Learning rate schedulers <resources/schedulers> Optimizers <resources/optimizers> Resources groups a variety of components that may be used to improve the agents' performance. .. raw:: html <br><hr> Available resources are :doc:`noises <resources/noises>`, input :doc:`preprocessors <resources/preprocessors>`, learning rate :doc:`schedulers <resources/schedulers>` and :doc:`optimizers <resources/optimizers>` (this last one only for JAX). .. list-table:: :header-rows: 1 * - Noises - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Gaussian <resources/noises/gaussian>` noise - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Ornstein-Uhlenbeck <resources/noises/ornstein_uhlenbeck>` noise |_2| - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` .. list-table:: :header-rows: 1 * - Preprocessors - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Running standard scaler <resources/preprocessors/running_standard_scaler>` |_4| - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` .. list-table:: :header-rows: 1 * - Learning rate schedulers - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`KL Adaptive <resources/schedulers/kl_adaptive>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` .. list-table:: :header-rows: 1 * - Optimizers - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Adam <resources/optimizers/adam>`\ |_5| |_5| |_5| |_5| |_5| |_5| |_3| - .. centered:: :math:`\scriptscriptstyle \texttt{PyTorch}` - .. centered:: :math:`\blacksquare`
1,974
reStructuredText
30.854838
241
0.591692
Toni-SM/skrl/docs/source/api/multi_agents.rst
Multi-agents ============ .. toctree:: :hidden: IPPO <multi_agents/ippo> MAPPO <multi_agents/mappo> Multi-agents are autonomous entities that interact with the environment to learn and improve their behavior. Multi-agents' goal is to learn optimal policies, which are correspondence between states and actions that maximize the cumulative reward received from the environment over time. .. raw:: html <br><hr> .. list-table:: :header-rows: 1 * - Multi-agents - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Independent Proximal Policy Optimization <multi_agents/ippo>` (**IPPO**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Multi-Agent Proximal Policy Optimization <multi_agents/mappo>` (**MAPPO**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` Base class ---------- .. note:: This is the base class for all multi-agents and provides only basic functionality that is not tied to any implementation of the optimization algorithms. **It is not intended to be used directly**. .. raw:: html <br> Basic inheritance usage ^^^^^^^^^^^^^^^^^^^^^^^ .. tabs:: .. tab:: Inheritance .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../snippets/multi_agent.py :language: python :start-after: [start-multi-agent-base-class-torch] :end-before: [end-multi-agent-base-class-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../snippets/multi_agent.py :language: python :start-after: [start-multi-agent-base-class-jax] :end-before: [end-multi-agent-base-class-jax] .. raw:: html <br> API (PyTorch) ^^^^^^^^^^^^^ .. autoclass:: skrl.multi_agents.torch.base.MultiAgent :undoc-members: :show-inheritance: :inherited-members: :private-members: _update, _empty_preprocessor, _get_internal_value, _as_dict :members: .. automethod:: __init__ .. automethod:: __str__ .. raw:: html <br> API (JAX) ^^^^^^^^^ .. autoclass:: skrl.multi_agents.jax.base.MultiAgent :undoc-members: :show-inheritance: :inherited-members: :private-members: _update, _empty_preprocessor, _get_internal_value, _as_dict :members: .. automethod:: __init__ .. automethod:: __str__
2,510
reStructuredText
24.886598
286
0.586853
Toni-SM/skrl/docs/source/api/models.rst
Models ====== .. toctree:: :hidden: Tabular <models/tabular> Categorical <models/categorical> Multi-Categorical <models/multicategorical> Gaussian <models/gaussian> Multivariate Gaussian <models/multivariate_gaussian> Deterministic <models/deterministic> Shared model <models/shared_model> Models (or agent models) refer to a representation of the agent's policy, value function, etc. that the agent uses to make decisions. Agents can have one or more models, and their parameters are adjusted by the optimization algorithms. .. raw:: html <br><hr> .. list-table:: :header-rows: 1 * - Models - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Tabular model <models/tabular>` (discrete domain) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - :doc:`Categorical model <models/categorical>` (discrete domain) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Multi-Categorical model <models/multicategorical>` (discrete domain) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Gaussian model <models/gaussian>` (continuous domain) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Multivariate Gaussian model <models/multivariate_gaussian>` (continuous domain) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - :doc:`Deterministic model <models/deterministic>` (continuous domain) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Shared model <models/shared_model>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` Base class ---------- .. note:: This is the base class for all models in this module and provides only basic functionality that is not tied to any specific implementation. **It is not intended to be used directly**. .. raw:: html <br> Mixin and inheritance ^^^^^^^^^^^^^^^^^^^^^ .. tabs:: .. tab:: Mixin .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../snippets/model_mixin.py :language: python :start-after: [start-mixin-torch] :end-before: [end-mixin-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../snippets/model_mixin.py :language: python :start-after: [start-mixin-jax] :end-before: [end-mixin-jax] .. tab:: Model inheritance .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../snippets/model_mixin.py :language: python :start-after: [start-model-torch] :end-before: [end-model-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../snippets/model_mixin.py :language: python :start-after: [start-model-jax] :end-before: [end-model-jax] .. raw:: html <br> .. _models_base_class: API (PyTorch) ^^^^^^^^^^^^^ .. autoclass:: skrl.models.torch.base.Model :undoc-members: :show-inheritance: :private-members: _get_space_size :members: .. automethod:: __init__ .. py:property:: device Device to be used for the computations .. py:property:: observation_space Observation/state space. It is a replica of the class constructor parameter of the same name .. py:property:: action_space Action space. It is a replica of the class constructor parameter of the same name .. py:property:: num_observations Number of elements in the observation/state space .. py:property:: num_actions Number of elements in the action space .. raw:: html <br> API (JAX) ^^^^^^^^^ .. autoclass:: skrl.models.jax.base.Model :undoc-members: :show-inheritance: :private-members: _get_space_size :members: .. automethod:: __init__ .. py:property:: device Device to be used for the computations .. py:property:: observation_space Observation/state space. It is a replica of the class constructor parameter of the same name .. py:property:: action_space Action space. It is a replica of the class constructor parameter of the same name .. py:property:: num_observations Number of elements in the observation/state space .. py:property:: num_actions Number of elements in the action space
4,733
reStructuredText
26.364162
235
0.587788
Toni-SM/skrl/docs/source/api/trainers.rst
Trainers ======== .. toctree:: :hidden: Sequential <trainers/sequential> Parallel <trainers/parallel> Step <trainers/step> Manual training <trainers/manual> Trainers are responsible for orchestrating and managing the training/evaluation of agents and their interactions with the environment. .. raw:: html <br><hr> .. list-table:: :header-rows: 1 * - Trainers - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Sequential trainer <trainers/sequential>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Parallel trainer <trainers/parallel>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - :doc:`Step trainer <trainers/step>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Manual training <trainers/manual>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` Base class ---------- .. note:: This is the base class for all the other classes in this module. It provides the basic functionality for the other classes. **It is not intended to be used directly**. .. raw:: html <br> Basic inheritance usage ^^^^^^^^^^^^^^^^^^^^^^^ .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../snippets/trainer.py :language: python :start-after: [pytorch-start-base] :end-before: [pytorch-end-base] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../snippets/trainer.py :language: python :start-after: [jax-start-base] :end-before: [jax-end-base] .. raw:: html <br> API (PyTorch) ^^^^^^^^^^^^^ .. autoclass:: skrl.trainers.torch.base.Trainer :undoc-members: :show-inheritance: :inherited-members: :private-members: _setup_agents :members: .. automethod:: __init__ .. automethod:: __str__ .. raw:: html <br> API (JAX) ^^^^^^^^^ .. autoclass:: skrl.trainers.jax.base.Trainer :undoc-members: :show-inheritance: :inherited-members: :private-members: _setup_agents :members: .. automethod:: __init__ .. automethod:: __str__
2,279
reStructuredText
21.352941
134
0.573936
Toni-SM/skrl/docs/source/api/envs.rst
Environments ============ .. toctree:: :hidden: Wrapping (single-agent) <envs/wrapping> Wrapping (multi-agents) <envs/multi_agents_wrapping> Isaac Gym environments <envs/isaac_gym> Isaac Orbit environments <envs/isaac_orbit> Omniverse Isaac Gym environments <envs/omniverse_isaac_gym> The environment plays a fundamental and crucial role in defining the RL setup. It is the place where the agent interacts, and it is responsible for providing the agent with information about its current state, as well as the rewards/penalties associated with each action. .. raw:: html <br><hr> Grouped in this section you will find how to load environments from NVIDIA Isaac Gym, Isaac Orbit and Omniverse Isaac Gym with a simple function. In addition, you will be able to :doc:`wrap single-agent <envs/wrapping>` and :doc:`multi-agent <envs/multi_agents_wrapping>` RL environment interfaces. .. list-table:: :header-rows: 1 * - Loaders - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Isaac Gym environments <envs/isaac_gym>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Isaac Orbit environments <envs/isaac_orbit>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Omniverse Isaac Gym environments <envs/omniverse_isaac_gym>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` .. list-table:: :header-rows: 1 * - Wrappers - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - Bi-DexHands - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - DeepMind - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - Gym - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - Gymnasium - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - Isaac Gym (previews) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - Isaac Orbit - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - Omniverse Isaac Gym |_5| |_5| |_5| |_5| |_2| - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - PettingZoo - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - robosuite - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - Shimmy - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare`
2,701
reStructuredText
35.026666
271
0.599408
Toni-SM/skrl/docs/source/api/utils.rst
Utils and configurations ======================== .. toctree:: :hidden: ML frameworks configuration <config/frameworks> Random seed <utils/seed> Memory and Tensorboard file post-processing <utils/postprocessing> Model instantiators <utils/model_instantiators> Hugging Face integration <utils/huggingface> Isaac Gym utils <utils/isaacgym_utils> Omniverse Isaac Gym utils <utils/omniverse_isaacgym_utils> A set of utilities and configurations for managing an RL setup is provided as part of the library. .. raw:: html <br><hr> .. list-table:: :header-rows: 1 * - Configurations - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`ML frameworks <config/frameworks>` configuration |_5| |_5| |_5| |_5| |_5| |_2| - .. centered:: :math:`\square` - .. centered:: :math:`\blacksquare` .. list-table:: :header-rows: 1 * - Utils - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Random seed <utils/seed>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - Memory and Tensorboard :doc:`file post-processing <utils/postprocessing>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Model instantiators <utils/model_instantiators>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Hugging Face integration <utils/huggingface>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Isaac Gym utils <utils/isaacgym_utils>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Omniverse Isaac Gym utils <utils/omniverse_isaacgym_utils>` - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare`
1,902
reStructuredText
33.599999
98
0.599895
Toni-SM/skrl/docs/source/api/agents.rst
Agents ====== .. toctree:: :hidden: A2C <agents/a2c> AMP <agents/amp> CEM <agents/cem> DDPG <agents/ddpg> DDQN <agents/ddqn> DQN <agents/dqn> PPO <agents/ppo> Q-learning <agents/q_learning> RPO <agents/rpo> SAC <agents/sac> SARSA <agents/sarsa> TD3 <agents/td3> TRPO <agents/trpo> Agents are autonomous entities that interact with the environment to learn and improve their behavior. Agents' goal is to learn an optimal policy, which is a correspondence between states and actions that maximizes the cumulative reward received from the environment over time. .. raw:: html <br><hr> .. list-table:: :header-rows: 1 * - Agents - .. centered:: |_4| |pytorch| |_4| - .. centered:: |_4| |jax| |_4| * - :doc:`Advantage Actor Critic <agents/a2c>` (**A2C**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Adversarial Motion Priors <agents/amp>` (**AMP**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - :doc:`Cross-Entropy Method <agents/cem>` (**CEM**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Deep Deterministic Policy Gradient <agents/ddpg>` (**DDPG**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Double Deep Q-Network <agents/ddqn>` (**DDQN**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Deep Q-Network <agents/dqn>` (**DQN**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Proximal Policy Optimization <agents/ppo>` (**PPO**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Q-learning <agents/q_learning>` (**Q-learning**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - :doc:`Robust Policy Optimization <agents/rpo>` (**RPO**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Soft Actor-Critic <agents/sac>` (**SAC**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`State Action Reward State Action <agents/sarsa>` (**SARSA**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` * - :doc:`Twin-Delayed DDPG <agents/td3>` (**TD3**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\blacksquare` * - :doc:`Trust Region Policy Optimization <agents/trpo>` (**TRPO**) - .. centered:: :math:`\blacksquare` - .. centered:: :math:`\square` Base class ---------- .. note:: This is the base class for all agents in this module and provides only basic functionality that is not tied to any implementation of the optimization algorithms. **It is not intended to be used directly**. .. raw:: html <br> Basic inheritance usage ^^^^^^^^^^^^^^^^^^^^^^^ .. tabs:: .. tab:: Inheritance .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../snippets/agent.py :language: python :start-after: [start-agent-base-class-torch] :end-before: [end-agent-base-class-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../snippets/agent.py :language: python :start-after: [start-agent-base-class-jax] :end-before: [end-agent-base-class-jax] .. raw:: html <br> API (PyTorch) ^^^^^^^^^^^^^ .. autoclass:: skrl.agents.torch.base.Agent :undoc-members: :show-inheritance: :inherited-members: :private-members: _update, _empty_preprocessor, _get_internal_value :members: .. automethod:: __init__ .. automethod:: __str__ .. raw:: html <br> API (JAX) ^^^^^^^^^ .. autoclass:: skrl.agents.jax.base.Agent :undoc-members: :show-inheritance: :inherited-members: :private-members: _update, _empty_preprocessor, _get_internal_value :members: .. automethod:: __init__ .. automethod:: __str__
4,230
reStructuredText
29.007092
277
0.564775
Toni-SM/skrl/docs/source/api/envs/omniverse_isaac_gym.rst
Omniverse Isaac Gym environments ================================ .. image:: ../../_static/imgs/example_omniverse_isaacgym.png :width: 100% :align: center :alt: Omniverse Isaac Gym environments .. raw:: html <br><br><hr> Environments ------------ The repository https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs provides the example reinforcement learning environments for Omniverse Isaac Gym. These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see OmniIsaacGymEnvs's `configuration-and-command-line-arguments <https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs#configuration-and-command-line-arguments>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`). Additionally, multi-threaded environments can be loaded. These are designed to isolate the RL policy in a new thread, separate from the main simulation and rendering thread. Read more about it in the OmniIsaacGymEnvs framework documentation: `Multi-Threaded Environment Wrapper <https://github.com/NVIDIA-Omniverse/OmniIsaacGymEnvs/blob/220d34c6b68d3f7518c4aa008ae009d13cc60c03/docs/framework.md#multi-threaded-environment-wrapper>`_. .. note:: The command line arguments has priority over the function parameters. .. note:: Only the configuration related to the environment will be used. The configuration related to RL algorithms are discarded since they do not belong to this library. .. note:: Omniverse Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py headless=True`. .. raw:: html <br> Usage ^^^^^ .. raw:: html <br> Common environments """"""""""""""""""" In this approach, the RL algorithm maintains the main execution loop. .. tabs:: .. group-tab:: Function parameters .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-omniverse-isaac-gym-envs-parameters-torch] :end-before: [end-omniverse-isaac-gym-envs-parameters-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-omniverse-isaac-gym-envs-parameters-jax] :end-before: [end-omniverse-isaac-gym-envs-parameters-jax] .. group-tab:: Command line arguments (priority) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-omniverse-isaac-gym-envs-cli-torch] :end-before: [end-omniverse-isaac-gym-envs-cli-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-omniverse-isaac-gym-envs-cli-jax] :end-before: [end-omniverse-isaac-gym-envs-cli-jax] Run the main script passing the configuration as command line arguments. For example: .. code-block:: python main.py task=Cartpole .. raw:: html <br> Multi-threaded environments """"""""""""""""""""""""""" In this approach, the RL algorithm is executed on a secondary thread while the simulation and rendering is executed on the main thread. .. tabs:: .. group-tab:: Function parameters .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 1, 4, 7, 12, 15 :start-after: [start-omniverse-isaac-gym-envs-multi-threaded-parameters-torch] :end-before: [end-omniverse-isaac-gym-envs-multi-threaded-parameters-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 1, 4, 7, 12, 15 :start-after: [start-omniverse-isaac-gym-envs-multi-threaded-parameters-jax] :end-before: [end-omniverse-isaac-gym-envs-multi-threaded-parameters-jax] .. group-tab:: Command line arguments (priority) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 1, 4, 7, 12, 15 :start-after: [start-omniverse-isaac-gym-envs-multi-threaded-cli-torch] :end-before: [end-omniverse-isaac-gym-envs-multi-threaded-cli-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 1, 4, 7, 12, 15 :start-after: [start-omniverse-isaac-gym-envs-multi-threaded-cli-jax] :end-before: [end-omniverse-isaac-gym-envs-multi-threaded-cli-jax] Run the main script passing the configuration as command line arguments. For example: .. code-block:: python main.py task=Cartpole .. raw:: html <br> API ^^^ .. autofunction:: skrl.envs.loaders.torch.load_omniverse_isaacgym_env
6,034
reStructuredText
36.02454
488
0.606397
Toni-SM/skrl/docs/source/api/envs/isaac_orbit.rst
Isaac Orbit environments ======================== .. image:: ../../_static/imgs/example_isaac_orbit.png :width: 100% :align: center :alt: Isaac Orbit environments .. raw:: html <br><br><hr> Environments ------------ The repository https://github.com/NVIDIA-Omniverse/Orbit provides the example reinforcement learning environments for Isaac orbit. These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see Isaac Orbit's `Running an RL environment <https://isaac-orbit.github.io/orbit/source/tutorials_envs/00_gym_env.html>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`). .. note:: The command line arguments has priority over the function parameters. .. note:: Isaac Orbit environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`orbit -p script.py --headless`. .. raw:: html <br> Usage ^^^^^ .. tabs:: .. tab:: Function parameters .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-orbit-envs-parameters-torch] :end-before: [end-isaac-orbit-envs-parameters-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-orbit-envs-parameters-jax] :end-before: [end-isaac-orbit-envs-parameters-jax] .. tab:: Command line arguments (priority) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-orbit-envs-cli-torch] :end-before: [end-isaac-orbit-envs-cli-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-orbit-envs-cli-jax] :end-before: [end-isaac-orbit-envs-cli-jax] Run the main script passing the configuration as command line arguments. For example: .. code-block:: orbit -p main.py --task Isaac-Cartpole-v0 .. raw:: html <br> API ^^^ .. autofunction:: skrl.envs.loaders.torch.load_isaac_orbit_env
3,041
reStructuredText
32.428571
448
0.591911
Toni-SM/skrl/docs/source/api/envs/multi_agents_wrapping.rst
:tocdepth: 3 Wrapping (multi-agents) ======================= .. raw:: html <br><hr> This library works with a common API to interact with the following RL multi-agent environments: * Farama `PettingZoo <https://pettingzoo.farama.org>`_ (parallel API) * `Bi-DexHands <https://github.com/PKU-MARL/DexterousHands>`_ To operate with them and to support interoperability between these non-compatible interfaces, a **wrapping mechanism is provided** as shown in the diagram below .. raw:: html <br> .. image:: ../../_static/imgs/multi_agent_wrapping-light.svg :width: 100% :align: center :class: only-light :alt: Environment wrapping .. image:: ../../_static/imgs/multi_agent_wrapping-dark.svg :width: 100% :align: center :class: only-dark :alt: Environment wrapping .. raw:: html <br> Usage ----- .. tabs:: .. tab:: PettingZoo .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [start-pettingzoo-torch] :end-before: [end-pettingzoo-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [start-pettingzoo-jax] :end-before: [end-pettingzoo-jax] .. tab:: Bi-DexHands .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [start-bidexhands-torch] :end-before: [end-bidexhands-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [start-bidexhands-jax] :end-before: [end-bidexhands-jax] .. raw:: html <br> API (PyTorch) ------------- .. autofunction:: skrl.envs.wrappers.torch.wrap_env .. raw:: html <br> API (JAX) --------- .. autofunction:: skrl.envs.wrappers.jax.wrap_env .. raw:: html <br> Internal API (PyTorch) ---------------------- .. autoclass:: skrl.envs.wrappers.torch.MultiAgentEnvWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. py:property:: device The device used by the environment If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda:0"`` or ``"cpu"`` depending on the device availability .. py:property:: possible_agents A list of all possible_agents the environment could generate .. autoclass:: skrl.envs.wrappers.torch.BiDexHandsWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.PettingZooWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. raw:: html <br> Internal API (JAX) ------------------ .. autoclass:: skrl.envs.wrappers.jax.MultiAgentEnvWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. py:property:: device The device used by the environment If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda:0"`` or ``"cpu"`` depending on the device availability .. py:property:: possible_agents A list of all possible_agents the environment could generate .. autoclass:: skrl.envs.wrappers.jax.BiDexHandsWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.jax.PettingZooWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__
3,917
reStructuredText
21.912281
171
0.581057
Toni-SM/skrl/docs/source/api/envs/isaac_gym.rst
Isaac Gym environments ====================== .. image:: ../../_static/imgs/example_isaacgym.png :width: 100% :align: center :alt: Omniverse Isaac Gym environments .. raw:: html <br><br><hr> Environments (preview 4) ------------------------ The repository https://github.com/NVIDIA-Omniverse/IsaacGymEnvs provides the example reinforcement learning environments for Isaac Gym (preview 4). With the release of Isaac Gym (preview 4), NVIDIA developers provide an easy-to-use API for creating/loading preset vectorized environments (see IsaacGymEnvs's `creating-an-environment <https://github.com/NVIDIA-Omniverse/IsaacGymEnvs#creating-an-environment>`_). .. tabs:: .. tab:: Easy-to-use API from NVIDIA .. literalinclude:: ../../snippets/loaders.py :language: python :start-after: [start-isaac-gym-envs-preview-4-api] :end-before: [end-isaac-gym-envs-preview-4-api] Nevertheless, in order to maintain the loading style of previous versions, **skrl** provides its own implementation for loading such environments. The environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see IsaacGymEnvs's `configuration-and-command-line-arguments <https://github.com/NVIDIA-Omniverse/IsaacGymEnvs#configuration-and-command-line-arguments>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`). .. note:: Only the configuration related to the environment will be used. The configuration related to RL algorithms are discarded since they do not belong to this library. .. note:: Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py headless=True`. .. raw:: html <br> Usage ^^^^^ .. tabs:: .. group-tab:: Function parameters .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-4-parameters-torch] :end-before: [end-isaac-gym-envs-preview-4-parameters-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-4-parameters-jax] :end-before: [end-isaac-gym-envs-preview-4-parameters-jax] .. group-tab:: Command line arguments (priority) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-4-cli-torch] :end-before: [end-isaac-gym-envs-preview-4-cli-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-4-cli-jax] :end-before: [end-isaac-gym-envs-preview-4-cli-jax] Run the main script passing the configuration as command line arguments. For example: .. code-block:: python main.py task=Cartpole .. raw:: html <br> API ^^^ .. autofunction:: skrl.envs.loaders.torch.load_isaacgym_env_preview4 .. raw:: html <br><hr> Environments (preview 3) ------------------------ The repository https://github.com/NVIDIA-Omniverse/IsaacGymEnvs provides the example reinforcement learning environments for Isaac Gym (preview 3). These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments (see IsaacGymEnvs's `configuration-and-command-line-arguments <https://github.com/NVIDIA-Omniverse/IsaacGymEnvs#configuration-and-command-line-arguments>`_) or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`). .. note:: Only the configuration related to the environment will be used. The configuration related to RL algorithms are discarded since they do not belong to this library. .. note:: Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py headless=True`. .. raw:: html <br> Usage ^^^^^ .. tabs:: .. group-tab:: Function parameters .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-3-parameters-torch] :end-before: [end-isaac-gym-envs-preview-3-parameters-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-3-parameters-jax] :end-before: [end-isaac-gym-envs-preview-3-parameters-jax] .. group-tab:: Command line arguments (priority) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-3-cli-torch] :end-before: [end-isaac-gym-envs-preview-3-cli-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-3-cli-jax] :end-before: [end-isaac-gym-envs-preview-3-cli-jax] Run the main script passing the configuration as command line arguments. For example: .. code-block:: python main.py task=Cartpole .. raw:: html <br> API ^^^ .. autofunction:: skrl.envs.loaders.torch.load_isaacgym_env_preview3 .. raw:: html <br><hr> Environments (preview 2) ------------------------ The example reinforcement learning environments for Isaac Gym (preview 2) are located within the same package (in the :code:`python/rlgpu` directory). These environments can be easily loaded and configured by calling a single function provided with this library. This function also makes it possible to configure the environment from the command line arguments or from its parameters (:literal:`task_name`, :literal:`num_envs`, :literal:`headless`, and :literal:`cli_args`). .. note:: Isaac Gym environments implement a functionality to get their configuration from the command line. Setting the :literal:`headless` option from the trainer configuration will not work. In this case, it is necessary to set the load function's :literal:`headless` argument to True or to invoke the scripts as follows: :literal:`python script.py --headless`. .. raw:: html <br> Usage ^^^^^ .. tabs:: .. group-tab:: Function parameters .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-2-parameters-torch] :end-before: [end-isaac-gym-envs-preview-2-parameters-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-2-parameters-jax] :end-before: [end-isaac-gym-envs-preview-2-parameters-jax] .. group-tab:: Command line arguments (priority) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-2-cli-torch] :end-before: [end-isaac-gym-envs-preview-2-cli-torch] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/loaders.py :language: python :emphasize-lines: 2, 5 :start-after: [start-isaac-gym-envs-preview-2-cli-jax] :end-before: [end-isaac-gym-envs-preview-2-cli-jax] Run the main script passing the configuration as command line arguments. For example: .. code-block:: python main.py --task Cartpole .. raw:: html <br> API ^^^ .. autofunction:: skrl.envs.loaders.torch.load_isaacgym_env_preview2
9,804
reStructuredText
36.140151
625
0.609037
Toni-SM/skrl/docs/source/api/envs/wrapping.rst
:tocdepth: 3 Wrapping (single-agent) ======================= .. raw:: html <br><hr> This library works with a common API to interact with the following RL environments: * OpenAI `Gym <https://www.gymlibrary.dev>`_ / Farama `Gymnasium <https://gymnasium.farama.org/>`_ (single and vectorized environments) * `Farama Shimmy <https://shimmy.farama.org/>`_ * `DeepMind <https://github.com/deepmind/dm_env>`_ * `robosuite <https://robosuite.ai/>`_ * `NVIDIA Isaac Gym <https://developer.nvidia.com/isaac-gym>`_ (preview 2, 3 and 4) * `NVIDIA Isaac Orbit <https://isaac-orbit.github.io/orbit/index.html>`_ * `NVIDIA Omniverse Isaac Gym <https://docs.omniverse.nvidia.com/isaacsim/latest/tutorial_gym_isaac_gym.html>`_ To operate with them and to support interoperability between these non-compatible interfaces, a **wrapping mechanism is provided** as shown in the diagram below .. raw:: html <br> .. image:: ../../_static/imgs/wrapping-light.svg :width: 100% :align: center :class: only-light :alt: Environment wrapping .. image:: ../../_static/imgs/wrapping-dark.svg :width: 100% :align: center :class: only-dark :alt: Environment wrapping .. raw:: html <br> Usage ----- .. tabs:: .. tab:: Omniverse Isaac Gym .. tabs:: .. tab:: Common environment .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-omniverse-isaacgym] :end-before: [pytorch-end-omniverse-isaacgym] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-omniverse-isaacgym] :end-before: [jax-end-omniverse-isaacgym] .. tab:: Multi-threaded environment .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-omniverse-isaacgym-mt] :end-before: [pytorch-end-omniverse-isaacgym-mt] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-omniverse-isaacgym-mt] :end-before: [jax-end-omniverse-isaacgym-mt] .. tab:: Isaac Orbit .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-isaac-orbit] :end-before: [pytorch-end-isaac-orbit] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-isaac-orbit] :end-before: [jax-end-isaac-orbit] .. tab:: Isaac Gym .. tabs:: .. tab:: Preview 4 (isaacgymenvs.make) .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-isaacgym-preview4-make] :end-before: [pytorch-end-isaacgym-preview4-make] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-isaacgym-preview4-make] :end-before: [jax-end-isaacgym-preview4-make] .. tab:: Preview 4 .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-isaacgym-preview4] :end-before: [pytorch-end-isaacgym-preview4] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-isaacgym-preview4] :end-before: [jax-end-isaacgym-preview4] .. tab:: Preview 3 .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-isaacgym-preview3] :end-before: [pytorch-end-isaacgym-preview3] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-isaacgym-preview3] :end-before: [jax-end-isaacgym-preview3] .. tab:: Preview 2 .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-isaacgym-preview2] :end-before: [pytorch-end-isaacgym-preview2] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-isaacgym-preview2] :end-before: [jax-end-isaacgym-preview2] .. tab:: Gym / Gymnasium .. tabs:: .. tab:: Gym .. tabs:: .. tab:: Single environment .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-gym] :end-before: [pytorch-end-gym] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-gym] :end-before: [jax-end-gym] .. tab:: Vectorized environment Visit the Gym documentation (`Vector <https://www.gymlibrary.dev/api/vector>`__) for more information about the creation and usage of vectorized environments .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-gym-vectorized] :end-before: [pytorch-end-gym-vectorized] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-gym-vectorized] :end-before: [jax-end-gym-vectorized] .. tab:: Gymnasium .. tabs:: .. tab:: Single environment .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-gymnasium] :end-before: [pytorch-end-gymnasium] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-gymnasium] :end-before: [jax-end-gymnasium] .. tab:: Vectorized environment Visit the Gymnasium documentation (`Vector <https://gymnasium.farama.org/api/vector>`__) for more information about the creation and usage of vectorized environments .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-gymnasium-vectorized] :end-before: [pytorch-end-gymnasium-vectorized] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-gymnasium-vectorized] :end-before: [jax-end-gymnasium-vectorized] .. tab:: Shimmy .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-shimmy] :end-before: [pytorch-end-shimmy] .. group-tab:: |_4| |jax| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [jax-start-shimmy] :end-before: [jax-end-shimmy] .. tab:: DeepMind .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-deepmind] :end-before: [pytorch-end-deepmind] .. .. group-tab:: |_4| |jax| |_4| .. .. literalinclude:: ../../snippets/wrapping.py .. :language: python .. :start-after: [jax-start-deepmind] .. :end-before: [jax-end-deepmind] .. tab:: robosuite .. tabs:: .. group-tab:: |_4| |pytorch| |_4| .. literalinclude:: ../../snippets/wrapping.py :language: python :start-after: [pytorch-start-robosuite] :end-before: [pytorch-end-robosuite] .. .. group-tab:: |_4| |jax| |_4| .. .. literalinclude:: ../../snippets/wrapping.py .. :language: python .. :start-after: [jax-start-robosuite] .. :end-before: [jax-end-robosuite] .. raw:: html <br> API (PyTorch) ------------- .. autofunction:: skrl.envs.wrappers.torch.wrap_env .. raw:: html <br> API (JAX) --------- .. autofunction:: skrl.envs.wrappers.jax.wrap_env .. raw:: html <br> Internal API (PyTorch) ---------------------- .. autoclass:: skrl.envs.wrappers.torch.Wrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. py:property:: device The device used by the environment If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda:0"`` or ``"cpu"`` depending on the device availability .. autoclass:: skrl.envs.wrappers.torch.OmniverseIsaacGymWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.IsaacOrbitWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.IsaacGymPreview3Wrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.IsaacGymPreview2Wrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.GymWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.GymnasiumWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.DeepMindWrapper :undoc-members: :show-inheritance: :private-members: _spec_to_space, _observation_to_tensor, _tensor_to_action :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.torch.RobosuiteWrapper :undoc-members: :show-inheritance: :private-members: _spec_to_space, _observation_to_tensor, _tensor_to_action :members: .. automethod:: __init__ .. raw:: html <br> Internal API (JAX) ------------------ .. autoclass:: skrl.envs.wrappers.jax.Wrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. py:property:: device The device used by the environment If the wrapped environment does not have the ``device`` property, the value of this property will be ``"cuda"`` or ``"cpu"`` depending on the device availability .. autoclass:: skrl.envs.wrappers.jax.OmniverseIsaacGymWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.jax.IsaacOrbitWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.jax.IsaacGymPreview3Wrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.jax.IsaacGymPreview2Wrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__ .. autoclass:: skrl.envs.wrappers.jax.GymnasiumWrapper :undoc-members: :show-inheritance: :members: .. automethod:: __init__
14,676
reStructuredText
30.029598
189
0.474448