Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null | coax-main/coax/proba_dists/_normal.py | import warnings
import jax
import jax.numpy as jnp
import numpy as onp
from gymnasium.spaces import Box
from ..utils import clipped_logit, jit
from ._base import BaseProbaDist
__all__ = (
'NormalDist',
)
class NormalDist(BaseProbaDist):
r"""
A differentiable normal distribution.
The input ``dist_params`` to each of the functions is expected to be of the form:
.. code:: python
dist_params = {'mu': array([...]), 'logvar': array([...])}
which represent the (conditional) distribution parameters. Here, ``mu`` is the mean :math:`\mu`
and ``logvar`` is the log-variance :math:`\log(\sigma^2)`.
Parameters
----------
space : gymnasium.spaces.Box
The gymnasium-style space that specifies the domain of the distribution.
clip_box : pair of floats, optional
The range of values to allow for *clean* (compact) variates. This is mainly to ensure
reasonable values when one or more dimensions of the Box space have very large ranges, while
in reality only a small part of that range is occupied.
clip_reals : pair of floats, optional
The range of values to allow for *raw* (decompactified) variates, the *reals*, used
internally. This range is set for numeric stability. Namely, the :attr:`postprocess_variate`
method compactifies the reals to a closed interval (Box) by applying a logistic sigmoid.
Setting a finite range for :code:`clip_reals` ensures that the sigmoid doesn't fully
saturate.
clip_logvar : pair of floats, optional
The range of values to allow for the log-variance of the distribution.
"""
def __init__(self, space,
clip_box=(-256., 256.),
clip_reals=(-30., 30.),
clip_logvar=(-20., 20.)):
if not isinstance(space, Box):
raise TypeError(f"{self.__class__.__name__} can only be defined over Box spaces")
super().__init__(space)
self.clip_box = clip_box
self.clip_reals = clip_reals
self.clip_logvar = clip_logvar
self._low = onp.maximum(onp.expand_dims(self.space.low, axis=0), self.clip_box[0])
self._high = onp.minimum(onp.expand_dims(self.space.high, axis=0), self.clip_box[1])
onp.testing.assert_array_less(
self._low, self._high,
"Box clipping resulted in inconsistent boundaries: "
f"low={self._low}, high={self._high}; please specify proper clipping values, "
"e.g. NormalDist(space, clip_box=(-1000., 1000.))")
if onp.any(self._low > self.space.low) or onp.any(self._high < self.space.high):
with onp.printoptions(precision=1):
warnings.warn(
f"one or more dimensions of Box(low={self.space.low}, high={self.space.high}) "
f"will be clipped to Box(low={self._low[0]}, high={self._high[0]})")
log_2pi = onp.asarray(1.8378770664093453) # abbreviation
def check_shape(x, name, flatten):
if not isinstance(x, jnp.ndarray):
raise TypeError(f"expected an jax.numpy.ndarray, got: {type(x)}")
if not (x.ndim == len(space.shape) + 1 and x.shape[1:] == space.shape):
expected = ', '.join(f'{i:d}' for i in space.shape)
raise ValueError(f"expected {name}.shape: (?, {expected}), got: {x.shape}")
if flatten:
x = x.reshape(x.shape[0], -1) # batch-flatten
if name.startswith("logvar"):
x = jnp.clip(x, *self.clip_logvar)
return x
def sample(dist_params, rng):
mu = check_shape(dist_params['mu'], name='mu', flatten=True)
logvar = check_shape(dist_params['logvar'], name='logvar', flatten=True)
X = mu + jnp.exp(logvar / 2) * jax.random.normal(rng, mu.shape)
return X.reshape(-1, *self.space.shape)
def mean(dist_params):
mu = check_shape(dist_params['mu'], name='mu', flatten=False)
return mu
def mode(dist_params):
return mean(dist_params)
def log_proba(dist_params, X):
X = check_shape(X, name='X', flatten=True)
mu = check_shape(dist_params['mu'], name='mu', flatten=True)
logvar = check_shape(dist_params['logvar'], name='logvar', flatten=True)
n = logvar.shape[-1]
logdetvar = jnp.sum(logvar, axis=-1) # log(det(M)) = tr(log(M))
quadratic = jnp.einsum('ij,ij->i', jnp.square(X - mu), jnp.exp(-logvar))
logp = -0.5 * (n * log_2pi + logdetvar + quadratic)
return logp
def entropy(dist_params):
logvar = check_shape(dist_params['logvar'], name='logvar', flatten=True)
assert logvar.ndim == 2 # check if flattened
logdetvar = jnp.sum(logvar, axis=-1) # log(det(M)) = tr(log(M))
n = logvar.shape[-1]
return 0.5 * (n * log_2pi + logdetvar + n)
def cross_entropy(dist_params_p, dist_params_q):
mu1 = check_shape(dist_params_p['mu'], name='mu_p', flatten=True)
mu2 = check_shape(dist_params_q['mu'], name='mu_q', flatten=True)
logvar1 = check_shape(dist_params_p['logvar'], name='logvar_p', flatten=True)
logvar2 = check_shape(dist_params_q['logvar'], name='logvar_q', flatten=True)
n = mu1.shape[-1]
assert n == mu2.shape[-1] == logvar1.shape[-1] == logvar2.shape[-1]
var1 = jnp.exp(logvar1)
var2_inv = jnp.exp(-logvar2)
logdetvar2 = jnp.sum(logvar2, axis=-1) # log(det(M)) = tr(log(M))
quadratic = jnp.einsum('ij,ij->i', var1 + jnp.square(mu1 - mu2), var2_inv)
return 0.5 * (n * log_2pi + logdetvar2 + quadratic)
def kl_divergence(dist_params_p, dist_params_q):
mu1 = check_shape(dist_params_p['mu'], name='mu_p', flatten=True)
mu2 = check_shape(dist_params_q['mu'], name='mu_q', flatten=True)
logvar1 = check_shape(dist_params_p['logvar'], name='logvar_p', flatten=True)
logvar2 = check_shape(dist_params_q['logvar'], name='logvar_q', flatten=True)
n = mu1.shape[-1]
assert n == mu2.shape[-1] == logvar1.shape[-1] == logvar2.shape[-1]
var1 = jnp.exp(logvar1)
var2_inv = jnp.exp(-logvar2)
logdetvar1 = jnp.sum(logvar1, axis=-1) # log(det(M)) = tr(log(M))
logdetvar2 = jnp.sum(logvar2, axis=-1) # log(det(M)) = tr(log(M))
quadratic = jnp.einsum('ij,ij->i', var1 + jnp.square(mu1 - mu2), var2_inv)
return 0.5 * (logdetvar2 - logdetvar1 + quadratic - n)
def affine_transform_func(dist_params, scale, shift, value_transform=None):
if value_transform is None:
f = f_inv = lambda x: x
else:
f, f_inv = value_transform
mu = check_shape(dist_params['mu'], name='mu', flatten=False)
logvar = check_shape(dist_params['logvar'], name='logvar', flatten=False)
var_new = f(f_inv(jnp.exp(logvar)) * jnp.square(scale))
return {'mu': f(f_inv(mu) + shift), 'logvar': jnp.log(var_new)}
self._sample_func = jit(sample)
self._mean_func = jit(mean)
self._mode_func = jit(mode)
self._log_proba_func = jit(log_proba)
self._entropy_func = jit(entropy)
self._cross_entropy_func = jit(cross_entropy)
self._kl_divergence_func = jit(kl_divergence)
self._affine_transform_func = jit(affine_transform_func, static_argnums=(3,))
@property
def default_priors(self):
shape = (1, *self.space.shape) # include batch axis
return {'mu': jnp.zeros(shape), 'logvar': jnp.zeros(shape)}
def preprocess_variate(self, rng, X):
X = jnp.asarray(X, dtype=self.space.dtype) # ensure ndarray
X = jnp.reshape(X, (-1, *self.space.shape)) # ensure batch axis
X = jnp.clip(X, self._low, self._high) # clip to be safe
X = clipped_logit((X - self._low) / (self._high - self._low)) # closed intervals->reals
return X
def postprocess_variate(self, rng, X, index=0, batch_mode=False):
X = jnp.asarray(X, dtype=self.space.dtype) # ensure ndarray
X = jnp.reshape(X, (-1, *self.space.shape)) # ensure correct shape
X = jnp.clip(X, *self.clip_reals) # clip for stability
X = self._low + (self._high - self._low) * jax.nn.sigmoid(X) # reals->closed interval
return X if batch_mode else onp.asanyarray(X[index])
@property
def sample(self):
r"""
JIT-compiled function that generates differentiable variates using the reparametrization
trick, i.e. :math:`x\sim\mathcal{N}(\mu,\sigma^2)` is implemented as
.. math::
\varepsilon\ &\sim\ \mathcal{N}(0,1) \\
x\ &=\ \mu + \sigma\,\varepsilon
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
rng : PRNGKey
A key for seeding the pseudo-random number generator.
Returns
-------
X : ndarray
A batch of differentiable variates.
"""
return self._sample_func
@property
def mean(self):
r"""
JIT-compiled functions that generates differentiable means of the distribution, in this case
simply :math:`\mu`.
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
Returns
-------
X : ndarray
A batch of differentiable variates.
"""
return self._mean_func
@property
def mode(self):
r"""
JIT-compiled functions that generates differentiable modes of the distribution, which for a
normal distribution is the same as the :attr:`mean`.
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
Returns
-------
X : ndarray
A batch of differentiable variates.
"""
return self._mode_func
@property
def log_proba(self):
r"""
JIT-compiled function that evaluates log-probabilities.
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
X : ndarray
A batch of variates, e.g. a batch of actions :math:`a` collected from experience.
Returns
-------
logP : ndarray of floats
A batch of log-probabilities associated with the provided variates.
"""
return self._log_proba_func
@property
def entropy(self):
r"""
JIT-compiled function that computes the entropy of the distribution.
.. math::
H\ =\ -\mathbb{E}_p \log p
\ =\ \frac12\left( \log(2\pi\sigma^2) + 1\right)
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
Returns
-------
H : ndarray of floats
A batch of entropy values.
"""
return self._entropy_func
@property
def cross_entropy(self):
r"""
JIT-compiled function that computes the cross-entropy of a distribution :math:`q` relative
to another categorical distribution :math:`p`:
.. math::
\text{CE}[p,q]\ =\ -\mathbb{E}_p \log q
\ =\ \frac12\left(
\log(2\pi\sigma_q^2)
+ \frac{(\mu_p-\mu_q)^2+\sigma_p^2}{\sigma_q^2}
\right)
Parameters
----------
dist_params_p : pytree with ndarray leaves
The distribution parameters of the *base* distribution :math:`p`.
dist_params_q : pytree with ndarray leaves
The distribution parameters of the *auxiliary* distribution :math:`q`.
"""
return self._cross_entropy_func
@property
def kl_divergence(self):
r"""
JIT-compiled function that computes the Kullback-Leibler divergence of a categorical
distribution :math:`q` relative to another distribution :math:`p`:
.. math::
\text{KL}[p,q]\ = -\mathbb{E}_p \left(\log q -\log p\right)
\ =\ \frac12\left(
\log(\sigma_q^2) - \log(\sigma_p^2)
+ \frac{(\mu_p-\mu_q)^2+\sigma_p^2}{\sigma_q^2}
- 1
\right)
Parameters
----------
dist_params_p : pytree with ndarray leaves
The distribution parameters of the *base* distribution :math:`p`.
dist_params_q : pytree with ndarray leaves
The distribution parameters of the *auxiliary* distribution :math:`q`.
"""
return self._kl_divergence_func
| 13,182 | 33.692105 | 100 | py |
null | coax-main/coax/proba_dists/_normal_test.py | import gymnasium
import jax
import haiku as hk
from .._base.test_case import TestCase
from ._normal import NormalDist
class TestNormalDist(TestCase):
decimal = 5
def setUp(self):
self.rngs = hk.PRNGSequence(13)
def tearDown(self):
del self.rngs
def test_kl_divergence(self):
dist = NormalDist(gymnasium.spaces.Box(low=0, high=1, shape=(7,)))
params_p = {
'mu': jax.random.normal(next(self.rngs), shape=(3, 7)),
'logvar': jax.random.normal(next(self.rngs), shape=(3, 7))}
params_q = {
'mu': jax.random.normal(next(self.rngs), shape=(3, 7)),
'logvar': jax.random.normal(next(self.rngs), shape=(3, 7))}
# params_q = {k: v + 0.001 for k, v in params_p.items()}
kl_div_direct = dist.kl_divergence(params_p, params_q)
kl_div_from_ce = dist.cross_entropy(params_p, params_q) - dist.entropy(params_p)
self.assertArrayAlmostEqual(kl_div_direct, kl_div_from_ce)
def test_box_clip(self):
msg = (
r"one or more dimensions of Box\(low=.*, high=.*\) "
r"will be clipped to Box\(low=.*, high=.*\)"
)
with self.assertWarnsRegex(UserWarning, msg):
dist = NormalDist(gymnasium.spaces.Box(low=-1000, high=10000000, shape=(1,)))
self.assertGreater(dist._low[0, 0], dist.space.low[0])
self.assertLess(dist._high[0, 0], dist.space.high[0])
| 1,443 | 33.380952 | 89 | py |
null | coax-main/coax/proba_dists/_squashed_normal.py | import jax
import jax.numpy as jnp
import numpy as onp
from ._base import BaseProbaDist
from ._normal import NormalDist
class SquashedNormalDist(BaseProbaDist):
r"""
A differentiable squashed normal distribution.
The input ``dist_params`` to each of the functions is expected to be of the form:
.. code:: python
dist_params = {'mu': array([...]), 'logvar': array([...])}
which represent the (conditional) distribution parameters. Here, ``mu`` is the mean :math:`\mu`
and ``logvar`` is the log-variance :math:`\log(\sigma^2)`.
Parameters
----------
space : gymnasium.spaces.Box
The gymnasium-style space that specifies the domain of the distribution.
clip_logvar : pair of floats, optional
The range of values to allow for the log-variance of the distribution.
"""
def __init__(self, space, clip_logvar=None):
super().__init__(space)
self._normal_dist = NormalDist(space=space, clip_logvar=clip_logvar)
self._scale = (space.high - space.low) / 2.0
self._offset = (space.high + space.low) / 2.0
def sample(dist_params, rng):
X = self._normal_dist.sample(dist_params, rng)
return jnp.tanh(X) * self._scale + self._offset
def mean(dist_params):
mu = self._normal_dist.mean(dist_params)
return jnp.tanh(mu) * self._scale + self._offset
def mode(dist_params):
return mean(dist_params)
arctanh_eps = 1e-7 # avoid arctanh(1) = acrtanh(-1) = inf
def log_proba(dist_params, X):
X = jnp.arctanh(jnp.clip(X, a_min=-1.0 + arctanh_eps, a_max=1.0 - arctanh_eps))
logp = self._normal_dist.log_proba(dist_params, X)
return logp - jnp.sum(2 * (jnp.log(2) - X - jnp.log(1 + jnp.exp(-2 * X))), axis=-1)
self._sample_func = jax.jit(sample)
self._mean_func = jax.jit(mean)
self._mode_func = jax.jit(mode)
self._log_proba_func = jax.jit(log_proba)
self._affine_transform_func = self._normal_dist.affine_transform
@property
def default_priors(self):
return self._normal_dist.default_priors
def preprocess_variate(self, rng, X):
X = jnp.asarray(X, dtype=self.space.dtype) # ensure ndarray
X = jnp.reshape(X, (-1, *self.space.shape)) # ensure batch axis
return X
def postprocess_variate(self, rng, X, index=0, batch_mode=False):
X = jnp.asarray(X, dtype=self.space.dtype) # ensure ndarray
X = jnp.reshape(X, (-1, *self.space.shape)) # ensure correct shape
return X if batch_mode else onp.asanyarray(X[index])
@property
def sample(self):
r"""
JIT-compiled function that generates differentiable variates using the reparametrization
trick, i.e. :math:`x\sim\tanh(\mathcal{N}(\mu,\sigma^2))` is implemented as
.. math::
\varepsilon\ &\sim\ \mathcal{N}(0,1) \\
x\ &=\ \tanh(\mu + \sigma\,\varepsilon)
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
rng : PRNGKey
A key for seeding the pseudo-random number generator.
Returns
-------
X : ndarray
A batch of differentiable variates.
"""
return self._sample_func
@property
def mean(self):
r"""
JIT-compiled functions that generates differentiable means of the distribution, in this case
simply :math:`\tanh(\mu)`.
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
Returns
-------
X : ndarray
A batch of differentiable variates.
"""
return self._mean_func
@property
def mode(self):
r"""
JIT-compiled functions that generates differentiable modes of the distribution, which for a
normal distribution is the same as the :attr:`mean`.
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
Returns
-------
X : ndarray
A batch of differentiable variates.
"""
return self._mode_func
@property
def log_proba(self):
r"""
JIT-compiled function that evaluates log-probabilities.
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
X : ndarray
A batch of variates, e.g. a batch of actions :math:`a` collected from experience.
Returns
-------
logP : ndarray of floats
A batch of log-probabilities associated with the provided variates.
"""
return self._log_proba_func
@property
def entropy(self):
r"""
JIT-compiled function that computes the entropy of the distribution.
.. math::
H\ =\ -\mathbb{E}_p \log p
\ =\ \frac12\left( \log(2\pi\sigma^2) + 1\right)
Parameters
----------
dist_params : pytree with ndarray leaves
A batch of distribution parameters.
Returns
-------
H : ndarray of floats
A batch of entropy values.
"""
return self._normal_dist.entropy
@property
def cross_entropy(self):
r"""
JIT-compiled function that computes the cross-entropy of a distribution :math:`q` relative
to another categorical distribution :math:`p`:
.. math::
\text{CE}[p,q]\ =\ -\mathbb{E}_p \log q
\ =\ \frac12\left(
\log(2\pi\sigma_q^2)
+ \frac{(\mu_p-\mu_q)^2+\sigma_p^2}{\sigma_q^2}
\right)
Parameters
----------
dist_params_p : pytree with ndarray leaves
The distribution parameters of the *base* distribution :math:`p`.
dist_params_q : pytree with ndarray leaves
The distribution parameters of the *auxiliary* distribution :math:`q`.
"""
return self._normal_dist.cross_entropy
@property
def kl_divergence(self):
r"""
JIT-compiled function that computes the Kullback-Leibler divergence of a categorical
distribution :math:`q` relative to another distribution :math:`p`:
.. math::
\text{KL}[p,q]\ = -\mathbb{E}_p \left(\log q -\log p\right)
\ =\ \frac12\left(
\log(\sigma_q^2) - \log(\sigma_p^2)
+ \frac{(\mu_p-\mu_q)^2+\sigma_p^2}{\sigma_q^2}
- 1
\right)
Parameters
----------
dist_params_p : pytree with ndarray leaves
The distribution parameters of the *base* distribution :math:`p`.
dist_params_q : pytree with ndarray leaves
The distribution parameters of the *auxiliary* distribution :math:`q`.
"""
return self._normal_dist.kl_divergence
| 7,209 | 26.519084 | 100 | py |
null | coax-main/coax/regularizers/__init__.py | r"""
Regularizers
============
.. autosummary::
:nosignatures:
coax.regularizers.EntropyRegularizer
coax.regularizers.KLDivRegularizer
----
This is a collection of regularizers that can be used to put soft constraints on stochastic function
approximators. These is typically added to the loss/objective to avoid premature exploitation of a
policy.
Object Reference
----------------
.. autoclass:: coax.regularizers.EntropyRegularizer
.. autoclass:: coax.regularizers.KLDivRegularizer
"""
from ._entropy import Regularizer
from ._entropy import EntropyRegularizer
from ._kl_div import KLDivRegularizer
from ._nstep_entropy import NStepEntropyRegularizer
__all__ = (
'Regularizer',
'EntropyRegularizer',
'KLDivRegularizer',
'NStepEntropyRegularizer'
)
| 789 | 19.789474 | 100 | py |
null | coax-main/coax/regularizers/_base.py | import haiku as hk
from ..utils import is_stochastic, jit
from .._core.base_stochastic_func_type1 import BaseStochasticFuncType1
from .._core.base_stochastic_func_type2 import BaseStochasticFuncType2
class Regularizer:
r"""
Abstract base class for policy regularizers. Check out
:class:`coax.regularizers.EntropyRegularizer` for a specific example.
Parameters
----------
f : stochastic function approximator
The stochastic function approximator (e.g. :class:`coax.Policy`) to regularize.
"""
def __init__(self, f):
if not is_stochastic(f):
raise TypeError(f"proba_dist must be a stochastic function, got {type(f)}")
self.f = f
@property
def hyperparams(self):
return {}
@property
def function(self):
r"""
JIT-compiled function that returns the values for the regularization term.
Parameters
----------
dist_params : pytree with ndarray leaves
The distribution parameters of the (conditional) probability distribution.
\*\*hyperparams
Hyperparameters specific to the regularizer, see :attr:`hyperparams`.
"""
return self._function
@property
def metrics_func(self):
r"""
JIT-compiled function that returns the performance metrics for the regularization term.
Parameters
----------
dist_params : pytree with ndarray leaves
The distribution parameters of the (conditional) probability distribution
:math:`\pi(a|s)`.
\*\*hyperparams
Hyperparameters specific to the regularizer, see :attr:`hyperparams`.
"""
return self._metrics_func
@property
def batch_eval(self):
if not hasattr(self, '_batch_eval_func'):
def batch_eval_func(params, hyperparams, state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
if isinstance(self.f, BaseStochasticFuncType1):
S = self.f.observation_preprocessor(next(rngs), transition_batch.S)
A = self.f.action_preprocessor(next(rngs), transition_batch.A)
dist_params, _ = self.f.function(params, state, next(rngs), S, A, False)
if isinstance(self.f, BaseStochasticFuncType2):
S = self.f.observation_preprocessor(next(rngs), transition_batch.S)
dist_params, _ = self.f.function(params, state, next(rngs), S, False)
else:
raise TypeError(
"f must be derived from BaseStochasticFuncType1 or BaseStochasticFuncType2")
return self.function(dist_params, **hyperparams), self.metrics_func(dist_params,
**hyperparams)
self._batch_eval_func = jit(batch_eval_func)
return self._batch_eval_func
| 2,979 | 32.111111 | 100 | py |
null | coax-main/coax/regularizers/_entropy.py | import jax.numpy as jnp
from ..utils import jit
from ._base import Regularizer
class EntropyRegularizer(Regularizer):
r"""
Policy regularization term based on the entropy of the policy.
The regularization term is to be added to the loss function:
.. math::
\text{loss}(\theta; s,a)\ =\ -J(\theta; s,a) - \beta\,H[\pi_\theta(.|s)]
where :math:`J(\theta)` is the bare policy objective.
Parameters
----------
f : stochastic function approximator
The stochastic function approximator (e.g. :class:`coax.Policy`) to regularize.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
"""
def __init__(self, f, beta=0.001):
super().__init__(f)
self.beta = beta
def function(dist_params, beta):
entropy = self.f.proba_dist.entropy(dist_params)
return -beta * entropy
def metrics(dist_params, beta):
entropy = self.f.proba_dist.entropy(dist_params)
return {
'EntropyRegularizer/beta': beta,
'EntropyRegularizer/entropy': jnp.mean(entropy)}
self._function = jit(function)
self._metrics_func = jit(metrics)
@property
def hyperparams(self):
return {'beta': self.beta}
@property
def function(self):
r"""
JIT-compiled function that returns the values for the regularization term.
Parameters
----------
dist_params : pytree with ndarray leaves
The distribution parameters of the (conditional) probability distribution.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
"""
return self._function
@property
def metrics_func(self):
r"""
JIT-compiled function that returns the performance metrics for the regularization term.
Parameters
----------
dist_params : pytree with ndarray leaves
The distribution parameters of the (conditional) probability distribution.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
"""
return self._metrics_func
| 2,326 | 25.146067 | 95 | py |
null | coax-main/coax/regularizers/_kl_div.py | import jax.numpy as jnp
from ..utils import jit
from ._base import Regularizer
class KLDivRegularizer(Regularizer):
r"""
Policy regularization term based on the Kullback-Leibler divergence of the policy relative to a
given set of priors.
The regularization term is to be added to the loss function:
.. math::
\text{loss}(\theta; s,a)\ =\
-J(\theta; s,a)
+ \beta\,KL[\pi_\theta, \pi_\text{prior}]
where :math:`J(\theta)` is the bare policy objective. Also, in order to unclutter the notation
we abbreviated :math:`\pi(.|s)` by :math:`\pi`.
Parameters
----------
f : stochastic function approximator
The stochastic function approximator (e.g. :class:`coax.Policy`) to regularize.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
priors : pytree with ndarray leaves, optional
The distribution parameters that correspond to the priors. If left unspecified, we'll use
:attr:`proba_dist.default_priors`, see e.g. :attr:`NormalDist.default_priors
<coax.proba_dists.NormalDist.default_priors>`.
"""
def __init__(self, f, beta=0.001, priors=None):
super().__init__(f)
self.beta = beta
self.priors = self.f.proba_dist.default_priors if priors is None else priors
def function(dist_params, priors, beta):
kl_div = self.f.proba_dist.kl_divergence(dist_params, priors)
return beta * kl_div
def metrics(dist_params, priors, beta):
kl_div = self.f.proba_dist.kl_divergence(dist_params, priors)
return {
'KLDivRegularizer/beta': beta,
'KLDivRegularizer/kl_div': jnp.mean(kl_div)}
self._function = jit(function)
self._metrics_func = jit(metrics)
@property
def hyperparams(self):
return {'beta': self.beta, 'priors': self.priors}
@property
def function(self):
r"""
JIT-compiled function that returns the values for the regularization term.
Parameters
----------
dist_params : pytree with ndarray leaves
The distribution parameters of the (conditional) probability distribution.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
priors : pytree with ndarray leaves
The distribution parameters that correspond to the priors.
"""
return self._function
@property
def metrics_func(self):
r"""
JIT-compiled function that returns the performance metrics for the regularization term.
Parameters
----------
dist_params : pytree with ndarray leaves
The distribution parameters of the (conditional) probability distribution.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
priors : pytree with ndarray leaves
The distribution parameters that correspond to the priors.
"""
return self._metrics_func
| 3,192 | 28.564815 | 99 | py |
null | coax-main/coax/regularizers/_nstep_entropy.py | import haiku as hk
import jax
import jax.numpy as jnp
from .._core.base_stochastic_func_type2 import BaseStochasticFuncType2
from ..utils import jit
from ._entropy import EntropyRegularizer
class NStepEntropyRegularizer(EntropyRegularizer):
r"""
Policy regularization term based on the n-step entropy of the policy.
The regularization term is to be added to the loss function:
.. math::
\text{loss}(\theta; s,a)\ =\ -J(\theta; s,a) - \beta\,H[\pi_\theta(.|s)]
where :math:`J(\theta)` is the bare policy objective.
Parameters
----------
f : stochastic function approximator
The stochastic function approximator (e.g. :class:`coax.Policy`) to regularize.
n : tuple(int), list(int), ndarray
Time indices of the steps (counted from the current state at time `t`)
to include in the regularization. For example `n = [2, 3]` adds an entropy bonus for the
policy at the states t + 2 and t + 3 to the objective.
beta : non-negative float
The coefficient that determines the strength of the overall regularization term.
gamma : float between 0 and 1
The amount by which to discount the entropy bonuses.
"""
def __init__(self, f, n, beta=0.001, gamma=0.99):
super().__init__(f)
if not isinstance(n, (tuple, list, jnp.ndarray)):
raise TypeError(f"n must be a list, an ndarray or a tuple, got: {type(n)}")
if len(n) == 0:
raise ValueError("n cannot be empty")
self.n = n
self._n = jnp.array(n)
self.beta = beta
self.gamma = gamma
self._gammas = jnp.take(jnp.power(self.gamma, jnp.arange(self.n[-1] + 1)), self._n)
def entropy(dist_params, dones):
valid = self.valid_from_done(dones)
vf = jax.vmap(lambda p, v, gamma: gamma * self.f.proba_dist.entropy(p) * v)
return jnp.sum(vf(dist_params, valid, self._gammas), axis=0)
def function(dist_params, dones, beta):
assert len(dist_params) == 2
return -beta * entropy(dist_params, dones)
def metrics(dist_params, dones, beta):
assert len(dist_params) == 2
valid = self.valid_from_done(dones)
return {
'EntropyRegularizer/beta': beta,
'EntropyRegularizer/entropy': jnp.mean(entropy(dist_params, dones) /
jnp.sum(valid, axis=0))
}
self._function = jit(function)
self._metrics_func = jit(metrics)
@property
def batch_eval(self):
if not hasattr(self, '_batch_eval_func'):
def batch_eval_func(params, hyperparams, state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
if not isinstance(transition_batch.extra_info, dict):
raise TypeError(
'TransitionBatch.extra_info has to be a dict containing "states" and' +
' "dones" for the n-step entropy regularization. Make sure to set the' +
' record_extra_info flag in the NStep tracer.')
if isinstance(self.f, BaseStochasticFuncType2):
def f(s_next):
return self.f.function(params,
state, next(rngs),
self.f.observation_preprocessor(
next(rngs), s_next), True)
n_states = transition_batch.extra_info['states']
dist_params, _ = jax.vmap(f)(jax.tree_util.tree_map(
lambda *t: jnp.stack(t), *n_states))
dist_params = jax.tree_util.tree_map(
lambda t: jnp.take(t, self._n, axis=0), dist_params)
else:
raise TypeError(
"f must be derived from BaseStochasticFuncType2")
dones = jnp.stack(transition_batch.extra_info['dones'])
dones = jnp.take(dones, self._n, axis=0)
return self.function(dist_params,
dones,
**hyperparams), self.metrics_func(dist_params, dones,
**hyperparams)
self._batch_eval_func = jit(batch_eval_func)
return self._batch_eval_func
def valid_from_done(self, dones):
"""
Generates a mask that filters all time steps after a done signal has been reached.
Parameters
----------
dones : ndarray
Array of boolean entries indicating whether the episode has ended.
Returns
-------
valid : ndarray
Mask that filters all entries after a done=True has been reached.
"""
valid = jnp.ones_like(dones, dtype=jnp.float32)
return valid.at[1:].set(1 - jnp.clip(jnp.cumsum(dones[:-1], axis=0), a_max=1))
| 5,091 | 37.575758 | 96 | py |
null | coax-main/coax/reward_tracing/__init__.py | r"""
Reward Tracing
==============
.. autosummary::
:nosignatures:
coax.reward_tracing.NStep
coax.reward_tracing.MonteCarlo
coax.reward_tracing.TransitionBatch
----
The term **reward tracing** refers to the process of turning raw experience into
:class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` objects. These
:class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` objects are then used to learn, i.e.
to update our function approximators.
Reward tracing typically entails keeping some episodic cache in order to relate a state :math:`S_t`
or state-action pair :math:`(S_t, A_t)` to a collection of objects that can be used to construct a
target (feedback signal):
.. math::
\left(R^{(n)}_t, I^{(n)}_t, S_{t+n}, A_{t+n}\right)
where
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
For example, in :math:`n`-step SARSA target is constructed as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,q(S_{t+n}, A_{t+n})
Object Reference
----------------
.. autoclass:: coax.reward_tracing.NStep
.. autoclass:: coax.reward_tracing.MonteCarlo
.. autoclass:: coax.reward_tracing.TransitionBatch
"""
from ._transition import TransitionBatch
from ._montecarlo import MonteCarlo
from ._nstep import NStep
__all__ = (
'TransitionBatch',
'MonteCarlo',
'NStep',
)
| 1,509 | 22.968254 | 99 | py |
null | coax-main/coax/reward_tracing/_base.py | from abc import ABC, abstractmethod
import jax
import numpy as onp
from .._base.errors import InsufficientCacheError
__all__ = (
'BaseRewardTracer',
)
class BaseRewardTracer(ABC):
@abstractmethod
def reset(self):
r"""
Reset the cache to the initial state.
"""
pass
@abstractmethod
def add(self, s, a, r, done, logp=0.0, w=1.0):
r"""
Add a transition to the experience cache.
Parameters
----------
s : state observation
A single state observation.
a : action
A single action.
r : float
A single observed reward.
done : bool
Whether the episode has finished.
logp : float, optional
The log-propensity :math:`\log\pi(a|s)`.
w : float, optional
Sample weight associated with the given state-action pair.
"""
pass
@abstractmethod
def pop(self):
r"""
Pop a single transition from the cache.
Returns
-------
transition : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object with
``batch_size=1``.
"""
pass
def flush(self):
r"""
Flush all transitions from the cache.
Returns
-------
transitions : TransitionBatch
A :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` object.
"""
if not self:
raise InsufficientCacheError(
"cache needs to receive more transitions before it can be flushed")
transitions = []
while self:
transitions.append(self.pop())
return jax.tree_map(lambda *leaves: onp.concatenate(leaves, axis=0), *transitions)
| 1,848 | 18.670213 | 90 | py |
null | coax-main/coax/reward_tracing/_montecarlo.py | from .._base.errors import InsufficientCacheError, EpisodeDoneError
from ._base import BaseRewardTracer
from ._transition import TransitionBatch
__all__ = (
'MonteCarlo',
)
class MonteCarlo(BaseRewardTracer):
r"""
A short-term cache for episodic Monte Carlo sampling.
Parameters
----------
gamma : float between 0 and 1
The amount by which to discount future rewards.
"""
def __init__(self, gamma):
self.gamma = float(gamma)
self.reset()
def reset(self):
self._list = []
self._done = False
self._g = 0 # accumulator for return
def add(self, s, a, r, done, logp=0.0, w=1.0):
if self._done and len(self):
raise EpisodeDoneError(
"please flush cache (or repeatedly pop) before appending new transitions")
self._list.append((s, a, r, logp, w))
self._done = bool(done)
if self._done:
self._g = 0. # init return
def __len__(self):
return len(self._list)
def __bool__(self):
return bool(len(self)) and self._done
def pop(self):
if not self:
if not len(self):
raise InsufficientCacheError(
"cache needs to receive more transitions before it can be popped from")
else:
raise InsufficientCacheError(
"cannot pop from cache before before receiving done=True")
# pop state-action (propensities) pair
s, a, r, logp, w = self._list.pop()
# update return
self._g = r + self.gamma * self._g
return TransitionBatch.from_single(
s=s, a=a, logp=logp, r=self._g, done=True, gamma=self.gamma, # no bootstrapping
s_next=s, a_next=a, logp_next=logp, w=w) # dummy values for *_next
| 1,854 | 27.538462 | 99 | py |
null | coax-main/coax/reward_tracing/_montecarlo_test.py | from itertools import islice
import pytest
import gymnasium
import jax.numpy as jnp
from numpy.testing import assert_array_almost_equal
from .._base.errors import InsufficientCacheError
from ..utils import check_array
from ._montecarlo import MonteCarlo
class MockEnv:
action_space = gymnasium.spaces.Discrete(10)
class TestMonteCarlo:
env = MockEnv()
gamma = 0.85
S = jnp.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
A = jnp.array([6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7, 7])
# P = jnp.array([
# [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # a=6
# [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # a=3
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # a=4
# [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # a=6
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # a=9
# [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # a=2
# [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # a=6
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # a=4
# [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # a=3
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# ])
R = jnp.array(
[-0.48, 0.16, 0.23, 0.11, 1.46, 1.53, -2.43, 0.60, -0.25, -0.16, -1.47, 1.48, -0.02])
D = jnp.array([False] * 12 + [True])
G = jnp.zeros_like(R)
for i, r in enumerate(R[::-1]):
G = G.at[i].set(r + gamma * G[i - 1])
G = G[::-1]
episode = list(zip(S, A, R, D))
def test_append_pop_too_soon(self):
cache = MonteCarlo(self.gamma)
for s, a, r, done in self.episode:
cache.add(s, a, r, done)
break
with pytest.raises(InsufficientCacheError):
cache.pop()
def test_append_pop_expected(self):
cache = MonteCarlo(self.gamma)
for i, (s, a, r, done) in enumerate(self.episode, 1):
cache.add(s, a, r, done)
assert len(cache) == i
assert cache
assert len(cache) == 13
for i in range(13):
assert cache
transition = cache.pop()
check_array(transition.S, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.Rn, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.In, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.S_next, ndim=1, axis_size=1, axis=0, except_np=True)
assert_array_almost_equal(transition.S[0], self.S[12 - i])
assert_array_almost_equal(transition.A[0], self.A[12 - i])
assert_array_almost_equal(transition.Rn[0], self.G[12 - i])
assert_array_almost_equal(transition.In[0], jnp.zeros(0))
assert not cache
def test_append_flush_too_soon(self):
cache = MonteCarlo(self.gamma)
for i, (s, a, r, done) in islice(enumerate(self.episode, 1), 4):
cache.add(s, a, r, done)
assert len(cache) == i
with pytest.raises(InsufficientCacheError):
cache.flush()
def test_append_flush_expected(self):
cache = MonteCarlo(self.gamma)
for i, (s, a, r, done) in enumerate(self.episode, 1):
cache.add(s, a, r, done)
assert len(cache) == i
transitions = cache.flush()
assert_array_almost_equal(transitions.S, self.S[::-1])
assert_array_almost_equal(transitions.A, self.A[::-1])
assert_array_almost_equal(transitions.Rn, self.G[::-1])
assert_array_almost_equal(transitions.In, jnp.zeros(i))
| 3,645 | 35.828283 | 93 | py |
null | coax-main/coax/reward_tracing/_nstep.py | from collections import deque
from itertools import islice
import numpy as onp
from .._base.errors import InsufficientCacheError, EpisodeDoneError
from ._base import BaseRewardTracer
from ._transition import TransitionBatch
__all__ = (
'NStep',
)
class NStep(BaseRewardTracer):
r"""
A short-term cache for :math:`n`-step bootstrapping.
Parameters
----------
n : positive int
The number of steps over which to bootstrap.
gamma : float between 0 and 1
The amount by which to discount future rewards.
record_extra_info : bool, optional
Store all states, actions and rewards in the `extra_info` field
of the `TransitionBatch`, e.g. for :code:`coax.regularizers.NStepEntropyRegularizer`.
"""
def __init__(self, n, gamma, record_extra_info=False):
self.n = int(n)
self.gamma = float(gamma)
self.record_extra_info = record_extra_info
self.reset()
def reset(self):
self._deque_s = deque([])
self._deque_r = deque([])
self._done = False
self._gammas = onp.power(self.gamma, onp.arange(self.n))
self._gamman = onp.power(self.gamma, self.n)
def add(self, s, a, r, done, logp=0.0, w=1.0):
if self._done and len(self):
raise EpisodeDoneError(
"please flush cache (or repeatedly call popleft) before appending new transitions")
self._deque_s.append((s, a, logp, w))
self._deque_r.append(r)
self._done = bool(done)
def __len__(self):
return len(self._deque_s)
def __bool__(self):
return bool(len(self)) and (self._done or len(self) > self.n)
def pop(self):
if not self:
raise InsufficientCacheError(
"cache needs to receive more transitions before it can be popped from")
# pop state-action (propensities) pair
s, a, logp, w = self._deque_s.popleft()
# n-step partial return
zipped = zip(self._gammas, self._deque_r)
rn = sum(x * r for x, r in islice(zipped, self.n))
r = self._deque_r.popleft()
# keep in mind that we've already popped (s, a, logp)
if len(self) >= self.n:
s_next, a_next, logp_next, _ = self._deque_s[self.n - 1]
done = False
else:
# no more bootstrapping
s_next, a_next, logp_next, done = s, a, logp, True
extra_info = self._extra_info(
s, a, r, len(self) == 0, logp, w) if self.record_extra_info else None
return TransitionBatch.from_single(
s=s, a=a, logp=logp, r=rn, done=done, gamma=self._gamman,
s_next=s_next, a_next=a_next, logp_next=logp_next, w=w, extra_info=extra_info)
def _extra_info(self, s, a, r, done, logp, w):
last_s = s
last_a = a
last_r = r
last_done = done
last_logp = logp
last_w = w
states = []
actions = []
rewards = []
dones = []
log_props = []
weights = []
for i in range(self.n + 1):
states.append(last_s)
actions.append(last_a)
rewards.append(last_r)
dones.append(last_done)
log_props.append(last_logp)
weights.append(last_w)
if i < len(self._deque_s):
last_s, last_a, last_logp, last_w = self._deque_s[i]
last_r = self._deque_r[i]
if done or (i == len(self._deque_s) - 1 and self._done):
last_done = True
else:
last_done = False
else:
last_done = True
assert len(states) == len(actions) == len(
rewards) == len(dones) == len(log_props) == len(weights) == self.n + 1
extra_info = {'states': states, 'actions': actions,
'rewards': rewards, 'dones': dones,
'log_props': log_props, 'weights': weights}
return {k: tuple(v) for k, v in extra_info.items()}
| 4,059 | 30.968504 | 99 | py |
null | coax-main/coax/reward_tracing/_nstep_test.py | from itertools import islice
import pytest
import gymnasium
import jax.numpy as jnp
from numpy.testing import assert_array_almost_equal
from .._base.errors import InsufficientCacheError, EpisodeDoneError
from ..utils import check_array
from ._nstep import NStep
class MockEnv:
action_space = gymnasium.spaces.Discrete(10)
class TestNStep:
env = MockEnv()
gamma = 0.85
n = 5
# rnd = jnp.random.RandomState(42)
# S = jnp.arange(13)
# A = rnd.randint(10, size=13)
# R = rnd.randn(13)
# D = jnp.zeros(13, dtype='bool')
# D[-1] = True
# In = (gamma ** n) * jnp.ones(13, dtype='bool')
# In[-n:] = 0
S = jnp.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
A = jnp.array([6, 3, 7, 4, 6, 9, 2, 6, 7, 4, 3, 7, 7])
# P = jnp.array([
# [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # a=6
# [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # a=3
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # a=4
# [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # a=6
# [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], # a=9
# [0, 0, 1, 0, 0, 0, 0, 0, 0, 0], # a=2
# [0, 0, 0, 0, 0, 0, 1, 0, 0, 0], # a=6
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# [0, 0, 0, 0, 1, 0, 0, 0, 0, 0], # a=4
# [0, 0, 0, 1, 0, 0, 0, 0, 0, 0], # a=3
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# [0, 0, 0, 0, 0, 0, 0, 1, 0, 0], # a=7
# ])
R = jnp.array(
[-0.48, 0.16, 0.23, 0.11, 1.46, 1.53, -2.43, 0.60, -0.25, -0.16, -1.47, 1.48, -0.02])
D = jnp.array([False] * 12 + [True])
In = jnp.array([0.44370531249999995] * 8 + [0.0] * 5)
episode = list(zip(S, A, R, D))
@property
def Rn(self):
Rn_ = jnp.zeros_like(self.R)
gammas = jnp.power(self.gamma, jnp.arange(13))
for i in range(len(Rn_)):
Rn_ = Rn_.at[i].set(
self.R[i:(i + self.n)].dot(gammas[:len(self.R[i:(i + self.n)])]))
return Rn_
def test_append_done_twice(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in enumerate(self.episode, 1):
if i == 1:
cache.add(s, a, r, True)
else:
with pytest.raises(EpisodeDoneError):
cache.add(s, a, r, True)
def test_append_done_one(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in enumerate(self.episode, 1):
if i == 1:
cache.add(s, a, r, True)
else:
break
assert cache
transitions = cache.flush()
assert_array_almost_equal(transitions.S, self.S[:1])
assert_array_almost_equal(transitions.A, self.A[:1])
assert_array_almost_equal(transitions.Rn, self.R[:1])
assert_array_almost_equal(transitions.In, [0])
def test_pop(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in enumerate(self.episode, 1):
cache.add(s, a, r, done)
assert len(cache) == i
if i <= self.n:
assert not cache
if i > self.n:
assert cache
i = 0
while cache:
transition = cache.pop()
check_array(transition.S, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.Rn, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.In, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.S_next, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A_next, ndim=1, axis_size=1, axis=0, except_np=True)
assert_array_almost_equal(transition.S[0], self.S[i])
assert_array_almost_equal(transition.A[0], self.A[i])
assert_array_almost_equal(transition.Rn[0], self.Rn[i])
assert_array_almost_equal(transition.In[0], self.In[i])
if i < 13 - self.n:
assert_array_almost_equal(
transition.S_next[0], self.S[i + self.n])
assert_array_almost_equal(
transition.A_next[0], self.A[i + self.n])
i += 1
def test_pop_eager(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in enumerate(self.episode):
cache.add(s, a, r, done)
assert len(cache) == min(i + 1, self.n + 1)
if cache:
assert i + 1 > self.n
transition = cache.pop()
check_array(transition.S, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.Rn, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.In, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.S_next, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A_next, ndim=1, axis_size=1, axis=0, except_np=True)
assert_array_almost_equal(transition.S[0], self.S[i - self.n])
assert_array_almost_equal(transition.A[0], self.A[i - self.n])
assert_array_almost_equal(
transition.Rn[0], self.Rn[i - self.n])
assert_array_almost_equal(
transition.In[0], self.In[i - self.n])
assert_array_almost_equal(transition.S_next[0], self.S[i])
assert_array_almost_equal(transition.A_next[0], self.A[i])
else:
assert i + 1 <= self.n
i = 13 - self.n
while cache:
transition = cache.pop()
check_array(transition.S, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.Rn, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.In, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.S_next, ndim=1, axis_size=1, axis=0, except_np=True)
check_array(transition.A_next, ndim=1, axis_size=1, axis=0, except_np=True)
assert_array_almost_equal(transition.S[0], self.S[i])
assert_array_almost_equal(transition.A[0], self.A[i])
assert_array_almost_equal(transition.Rn[0], self.Rn[i])
assert_array_almost_equal(transition.In[0], self.In[i])
if i < 13 - self.n:
assert_array_almost_equal(
transition.S_next[0], self.S[i + self.n])
assert_array_almost_equal(
transition.A_next[0], self.A[i + self.n])
i += 1
def test_flush(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in enumerate(self.episode, 1):
cache.add(s, a, r, done)
assert len(cache) == i
if i <= self.n:
assert not cache
if i > self.n:
assert cache
transitions = cache.flush()
assert_array_almost_equal(transitions.S, self.S)
assert_array_almost_equal(transitions.A, self.A)
assert_array_almost_equal(transitions.Rn, self.Rn)
assert_array_almost_equal(transitions.In, self.In)
assert_array_almost_equal(
transitions.S_next[:-self.n], self.S[self.n:])
assert_array_almost_equal(
transitions.A_next[:-self.n], self.A[self.n:])
def test_flush_eager(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in enumerate(self.episode):
cache.add(s, a, r, done)
assert len(cache) == min(i + 1, self.n + 1)
if cache:
assert i + 1 > self.n
transitions = cache.flush()
if i == 12:
slc = slice(i - self.n, None)
assert_array_almost_equal(transitions.S, self.S[slc])
assert_array_almost_equal(transitions.A, self.A[slc])
assert_array_almost_equal(transitions.Rn, self.Rn[slc])
assert_array_almost_equal(transitions.In, self.In[slc])
assert_array_almost_equal(
transitions.S_next.shape, (self.n + 1,))
assert_array_almost_equal(
transitions.A_next.shape, (self.n + 1,))
else:
slc = slice(i - self.n, i - self.n + 1)
slc_next = slice(i, i + 1)
assert_array_almost_equal(transitions.S, self.S[slc])
assert_array_almost_equal(transitions.A, self.A[slc])
assert_array_almost_equal(transitions.Rn, self.Rn[slc])
assert_array_almost_equal(transitions.In, self.In[slc])
assert_array_almost_equal(transitions.S_next, self.S[slc_next])
assert_array_almost_equal(transitions.A_next, self.A[slc_next])
else:
assert i + 1 <= self.n
i = 13 - self.n
while cache:
transition = cache.flush()
assert transition.S == self.S[i]
assert_array_almost_equal(a, self.A[i])
assert transition.Rn == self.Rn[i]
assert transition.In == self.In[i]
if i < 13 - self.n:
assert transition.S_next == self.S[i + self.n]
assert transition.A_next == self.A[i + self.n]
i += 1
def test_flush_insufficient(self):
cache = NStep(self.n, gamma=self.gamma)
for i, (s, a, r, done) in islice(enumerate(self.episode, 1), 4):
cache.add(s, a, r, done)
with pytest.raises(InsufficientCacheError):
cache.flush()
def test_flush_empty(self):
cache = NStep(self.n, gamma=self.gamma)
with pytest.raises(InsufficientCacheError):
cache.flush()
def test_extra_info(self):
cache = NStep(self.n, gamma=self.gamma, record_extra_info=True)
for i, (s, a, r, done) in enumerate(self.episode, 1):
cache.add(s, a, r, done)
assert len(cache) == i
if i <= self.n:
assert not cache
if i > self.n:
assert cache
transitions = cache.flush()
assert type(transitions.extra_info) == dict
states = jnp.stack(transitions.extra_info['states'])
actions = jnp.stack(transitions.extra_info['actions'])
assert_array_almost_equal(states[0], transitions.S)
assert_array_almost_equal(actions[0], transitions.A)
def test_extra_info_dones(self):
cache = NStep(self.n, gamma=self.gamma, record_extra_info=True)
for i, (s, a, r, done) in enumerate(self.episode, 1):
if i == self.n + 2:
cache.add(s, a, r, True)
break
else:
cache.add(s, a, r, False)
assert cache
transitions = cache.flush()
assert type(transitions.extra_info) == dict
dones = jnp.stack(transitions.extra_info['dones'])
for i in range(self.n + 2):
assert dones[:, i].sum() == i
| 11,381 | 40.845588 | 93 | py |
null | coax-main/coax/reward_tracing/_transition.py | from functools import partial
import jax
import jax.numpy as jnp
import numpy as onp
from .._base.mixins import CopyMixin
from ..utils import pretty_repr
__all__ = (
'TransitionBatch',
)
class TransitionBatch(CopyMixin):
r"""
A container object for a batch of MDP transitions.
Parameters
----------
S : pytree with ndarray leaves
A batch of state observations :math:`S_t`.
A : ndarray
A batch of actions :math:`A_t`.
logP : ndarray
A batch of log-propensities :math:`\log\pi(A_t|S_t)`.
Rn : ndarray
A batch of partial (:math:`\gamma`-discounted) returns. For instance,
in :math:`n`-step bootstrapping these are given by:
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
In other words, it's the part of the :math:`n`-step return *without*
the bootstrapping term.
In : ndarray
A batch of bootstrap factors. For instance, in :math:`n`-step
bootstrapping these are given by :math:`I^{(n)}_t=\gamma^n` when
bootstrapping and :math:`I^{(n)}_t=0` otherwise. Bootstrap factors are
used in constructing the :math:`n`-step bootstrapped target:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,Q(S_{t+1}, A_{t+1})
S_next : pytree with ndarray leaves
A batch of next-state observations :math:`S_{t+n}`. This is typically
used to contruct the TD target in :math:`n`-step bootstrapping.
A_next : ndarray, optional
A batch of next-actions :math:`A_{t+n}`. This is typically used to
contruct the TD target in :math:`n`-step bootstrapping when using SARSA
updates.
logP_next : ndarray, optional
A batch of log-propensities :math:`\log\pi(A_{t+n}|S_{t+n})`.
W : ndarray, optional
A batch of importance weights associated with the sampling procedure that generated each
transition. For example, we need these values when we sample transitions from a
:class:`PrioritizedReplayBuffer <coax.experience_replay.PrioritizedReplayBuffer>`.
"""
__slots__ = ('S', 'A', 'logP', 'Rn', 'In', 'S_next',
'A_next', 'logP_next', 'W', 'idx', 'extra_info')
def __init__(self, S, A, logP, Rn, In, S_next, A_next=None, logP_next=None, W=None, idx=None,
extra_info=None):
self.S = S
self.A = A
self.logP = logP
self.Rn = Rn
self.In = In
self.S_next = S_next
self.A_next = A_next
self.logP_next = logP_next
self.W = onp.ones_like(Rn) if W is None else W
self.idx = onp.arange(Rn.shape[0], dtype='int32') if idx is None else idx
self.extra_info = extra_info
@classmethod
def from_single(
cls, s, a, logp, r, done, gamma,
s_next=None, a_next=None, logp_next=None, w=1, idx=None, extra_info=None):
r"""
Create a TransitionBatch (with batch_size=1) from a single transition.
Attributes
----------
s : state observation
A single state observation :math:`S_t`.
a : action
A single action :math:`A_t`.
logp : non-positive float
The log-propensity :math:`\log\pi(A_t|S_t)`.
r : float or array of floats
A single reward :math:`R_t`.
done : bool
Whether the episode has finished.
info : dict or None
Some additional info about the current time step.
s_next : state observation
A single next-state observation :math:`S_{t+1}`.
a_next : action
A single next-action :math:`A_{t+1}`.
logp_next : non-positive float
The log-propensity :math:`\log\pi(A_{t+1}|S_{t+1})`.
w : positive float, optional
The importance weight associated with the sampling procedure that generated this
transition.
idx : int, optional
The identifier of this particular transition.
"""
# check types
array = (int, float, onp.ndarray, jnp.ndarray)
if not (isinstance(logp, array) and onp.all(logp <= 0)):
raise TypeError(f"logp must be non-positive float(s), got: {logp}")
if not isinstance(r, array):
raise TypeError(f"r must be a scalar or an array, got: {r}")
if not isinstance(done, bool):
raise TypeError(f"done must be a bool, got: {done}")
if not (isinstance(gamma, (float, int)) and 0 <= gamma <= 1):
raise TypeError(f"gamma must be a float in the unit interval [0, 1], got: {gamma}")
if not (logp_next is None or (isinstance(logp_next, array) and onp.all(logp_next <= 0))):
raise TypeError(f"logp_next must be None or non-positive float(s), got: {logp_next}")
if not (isinstance(w, (float, int)) and w > 0):
raise TypeError(f"w must be a positive float, got: {w}")
return cls(
S=_single_to_batch(s),
A=_single_to_batch(a),
logP=_single_to_batch(logp),
Rn=_single_to_batch(r),
In=_single_to_batch(float(gamma) * (1. - bool(done))),
S_next=_single_to_batch(s_next) if s_next is not None else None,
A_next=_single_to_batch(a_next) if a_next is not None else None,
logP_next=_single_to_batch(logp_next) if logp_next is not None else None,
W=_single_to_batch(float(w)),
idx=_single_to_batch(idx) if idx is not None else None,
extra_info=_single_to_batch(extra_info) if extra_info is not None else None
)
@property
def batch_size(self):
return onp.shape(self.Rn)[0]
def to_singles(self):
r"""
Get an iterator of single transitions.
Returns
-------
transition_batches : iterator of TransitionBatch
An iterator of :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>` objects
with ``batch_size=1``.
**Note:** The iterator walks through the individual transitions *in reverse order*.
"""
if self.batch_size == 1:
yield self
return # break out of generator
def lookup(i, pytree):
s = slice(i, i + 1) # ndim-preserving lookup
return jax.tree_map(lambda leaf: leaf[s], pytree)
for i in range(self.batch_size):
yield TransitionBatch(*map(partial(lookup, i), self))
def items(self):
for k in self.__slots__:
yield k, getattr(self, k)
def _asdict(self):
return dict(self.items())
def __repr__(self):
return pretty_repr(self)
def __iter__(self):
return (getattr(self, a) for a in self.__slots__)
def __getitem__(self, int_or_slice):
return tuple(self).__getitem__(int_or_slice)
def __eq__(self, other):
return (type(self) is type(other)) and all(
onp.allclose(a, b) if isinstance(a, (onp.ndarray, jnp.ndarray))
else (a is b if a is None else a == b)
for a, b in zip(jax.tree_util.tree_leaves(self), jax.tree_util.tree_leaves(other)))
def _single_to_batch(pytree):
# notice that we're pulling eveyrthing out of jax.numpy and into ordinary numpy land
return jax.tree_map(lambda arr: onp.expand_dims(arr, axis=0), pytree)
jax.tree_util.register_pytree_node(
TransitionBatch,
lambda tn: (tuple(tn), None),
lambda treedef, leaves: TransitionBatch(*leaves))
| 7,570 | 29.651822 | 97 | py |
null | coax-main/coax/td_learning/__init__.py | r"""
TD Learning
===========
.. autosummary::
:nosignatures:
coax.td_learning.SimpleTD
coax.td_learning.Sarsa
coax.td_learning.ExpectedSarsa
coax.td_learning.QLearning
coax.td_learning.DoubleQLearning
coax.td_learning.SoftQLearning
coax.td_learning.ClippedDoubleQLearning
coax.td_learning.SoftClippedDoubleQLearning
----
This is a collection of objects that are used to update value functions via *Temporal Difference*
(TD) learning. A state value function :class:`coax.V` is using :class:`coax.td_learning.SimpleTD`.
To update a state-action value function :class:`coax.Q`, there are multiple options available. The
difference between the options are the manner in which the TD-target is constructed.
Object Reference
----------------
.. autoclass:: coax.td_learning.SimpleTD
.. autoclass:: coax.td_learning.Sarsa
.. autoclass:: coax.td_learning.ExpectedSarsa
.. autoclass:: coax.td_learning.QLearning
.. autoclass:: coax.td_learning.DoubleQLearning
.. autoclass:: coax.td_learning.SoftQLearning
.. autoclass:: coax.td_learning.ClippedDoubleQLearning
.. autoclass:: coax.td_learning.SoftClippedDoubleQLearning
"""
from ._simple_td import SimpleTD
from ._sarsa import Sarsa
from ._expectedsarsa import ExpectedSarsa
from ._qlearning import QLearning
from ._doubleqlearning import DoubleQLearning
from ._softqlearning import SoftQLearning
from ._clippeddoubleqlearning import ClippedDoubleQLearning
from ._softclippeddoubleqlearning import SoftClippedDoubleQLearning
__all__ = (
'SimpleTD',
'Sarsa',
'ExpectedSarsa',
'QLearning',
'DoubleQLearning',
'SoftQLearning',
'ClippedDoubleQLearning',
'SoftClippedDoubleQLearning'
)
| 1,702 | 27.383333 | 98 | py |
null | coax-main/coax/td_learning/_base.py | from abc import ABC, abstractmethod
import jax
import jax.numpy as jnp
import haiku as hk
import optax
import chex
from .._base.mixins import RandomStateMixin
from ..utils import get_grads_diagnostics, is_policy, is_stochastic, is_qfunction, is_vfunction, jit
from ..value_losses import huber, quantile_huber
from ..regularizers import Regularizer
from ..proba_dists import DiscretizedIntervalDist, EmpiricalQuantileDist
__all__ = (
'BaseTDLearningV',
'BaseTDLearningQ',
'BaseTDLearningQWithTargetPolicy'
)
class BaseTDLearning(ABC, RandomStateMixin):
def __init__(self, f, f_targ=None, optimizer=None, loss_function=None, policy_regularizer=None):
self._f = f
self._f_targ = f if f_targ is None else f_targ
self.loss_function = huber if loss_function is None else loss_function
if not isinstance(policy_regularizer, (Regularizer, type(None))):
raise TypeError(
f"policy_regularizer must be a Regularizer, got: {type(policy_regularizer)}")
self.policy_regularizer = policy_regularizer
# optimizer
self._optimizer = optax.adam(1e-3) if optimizer is None else optimizer
self._optimizer_state = self.optimizer.init(self._f.params)
def apply_grads_func(opt, opt_state, params, grads):
updates, new_opt_state = opt.update(grads, opt_state, params)
new_params = optax.apply_updates(params, updates)
return new_opt_state, new_params
self._apply_grads_func = jit(apply_grads_func, static_argnums=0)
@abstractmethod
def target_func(self, target_params, target_state, rng, transition_batch):
pass
@property
@abstractmethod
def target_params(self):
pass
@property
@abstractmethod
def target_function_state(self):
pass
def update(self, transition_batch, return_td_error=False):
r"""
Update the model parameters (weights) of the underlying function approximator.
Parameters
----------
transition_batch : TransitionBatch
A batch of transitions.
return_td_error : bool, optional
Whether to return the TD-errors.
Returns
-------
metrics : dict of scalar ndarrays
The structure of the metrics dict is ``{name: score}``.
td_error : ndarray, optional
The non-aggregated TD-errors, :code:`shape == (batch_size,)`. This is only returned if
we set :code:`return_td_error=True`.
"""
grads, function_state, metrics, td_error = self.grads_and_metrics(transition_batch)
if any(jnp.any(jnp.isnan(g)) for g in jax.tree_util.tree_leaves(grads)):
raise RuntimeError(f"found nan's in grads: {grads}")
self.apply_grads(grads, function_state)
return (metrics, td_error) if return_td_error else metrics
def apply_grads(self, grads, function_state):
r"""
Update the model parameters (weights) of the underlying function approximator given
pre-computed gradients.
This method is useful in situations in which computation of the gradients is deligated to a
separate (remote) process.
Parameters
----------
grads : pytree with ndarray leaves
A batch of gradients, generated by the :attr:`grads` method.
function_state : pytree
The internal state of the forward-pass function. See :attr:`Q.function_state
<coax.Q.function_state>` and :func:`haiku.transform_with_state` for more details.
"""
self._f.function_state = function_state
self.optimizer_state, self._f.params = \
self._apply_grads_func(self.optimizer, self.optimizer_state, self._f.params, grads)
def grads_and_metrics(self, transition_batch):
r"""
Compute the gradients associated with a batch of transitions.
Parameters
----------
transition_batch : TransitionBatch
A batch of transitions.
Returns
-------
grads : pytree with ndarray leaves
A batch of gradients.
function_state : pytree
The internal state of the forward-pass function. See :attr:`Q.function_state
<coax.Q.function_state>` and :func:`haiku.transform_with_state` for more details.
metrics : dict of scalar ndarrays
The structure of the metrics dict is ``{name: score}``.
td_error : ndarray
The non-aggregated TD-errors, :code:`shape == (batch_size,)`.
"""
return self._grads_and_metrics_func(
self._f.params, self.target_params, self._f.function_state, self.target_function_state,
self._f.rng, transition_batch)
def td_error(self, transition_batch):
r"""
Compute the TD-errors associated with a batch of transitions. We define the TD-error as the
negative gradient of the :attr:`loss_function` with respect to the predicted value:
.. math::
\text{td_error}_i\ =\ -\frac{\partial L(y, \hat{y})}{\partial \hat{y}_i}
Note that this reduces to the ordinary definition :math:`\text{td_error}=y-\hat{y}` when we
use the :func:`coax.value_losses.mse` loss funtion.
Parameters
----------
transition_batch : TransitionBatch
A batch of transitions.
Returns
-------
td_errors : ndarray, shape: [batch_size]
A batch of TD-errors.
"""
return self._td_error_func(
self._f.params, self.target_params, self._f.function_state, self.target_function_state,
self._f.rng, transition_batch)
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, new_optimizer):
new_optimizer_state_structure = jax.tree_util.tree_structure(
new_optimizer.init(self._f.params))
if new_optimizer_state_structure != jax.tree_util.tree_structure(self.optimizer_state):
raise AttributeError("cannot set optimizer attr: mismatch in optimizer_state structure")
self._optimizer = new_optimizer
@property
def optimizer_state(self):
return self._optimizer_state
@optimizer_state.setter
def optimizer_state(self, new_optimizer_state):
tree_structure = jax.tree_util.tree_structure(self.optimizer_state)
new_tree_structure = jax.tree_util.tree_structure(new_optimizer_state)
if new_tree_structure != tree_structure:
raise AttributeError("cannot set optimizer_state attr: mismatch in tree structure")
self._optimizer_state = new_optimizer_state
class BaseTDLearningV(BaseTDLearning):
def __init__(self, v, v_targ=None, optimizer=None, loss_function=None, policy_regularizer=None):
if not is_vfunction(v):
raise TypeError(f"v must be a v-function, got: {type(v)}")
if not (v_targ is None or is_vfunction(v_targ)):
raise TypeError(f"v_targ must be a v-function or None, got: {type(v_targ)}")
super().__init__(
f=v,
f_targ=v_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
def loss_func(params, target_params, state, target_state, rng, transition_batch):
"""
In this function we tie together all the pieces, which is why it's a bit long.
The main structure to watch for is calls to self.target_func(...), which is defined
downstream. All other code is essentially boilerplate to tie this target to the
predictions, i.e. to construct a feedback signal for training.
One of the things we might change here is not to handle both the stochastic and
deterministic cases in the same function.
-kris
"""
rngs = hk.PRNGSequence(rng)
S = self.v.observation_preprocessor(next(rngs), transition_batch.S)
W = jnp.clip(transition_batch.W, 0.1, 10.) # clip importance weights to reduce variance
metrics = {}
# regularization term
if self.policy_regularizer is None:
regularizer = 0.
else:
regularizer, regularizer_metrics = self.policy_regularizer.batch_eval(
target_params['reg'], target_params['reg_hparams'], target_state['reg'],
next(rngs), transition_batch)
metrics.update({f'{self.__class__.__name__}/{k}': v for k,
v in regularizer_metrics.items()})
if is_stochastic(self.v):
dist_params, state_new = self.v.function(params, state, next(rngs), S, True)
dist_params_target = \
self.target_func(target_params, target_state, rng, transition_batch)
if self.policy_regularizer is not None:
dist_params_target = self.v.proba_dist.affine_transform(
dist_params_target, 1., -regularizer, self.v.value_transform)
if isinstance(self.v.proba_dist, DiscretizedIntervalDist):
loss = jnp.mean(self.v.proba_dist.cross_entropy(dist_params_target,
dist_params))
elif isinstance(self.v.proba_dist, EmpiricalQuantileDist):
loss = quantile_huber(dist_params_target['values'],
dist_params['values'],
dist_params['quantile_fractions'], W)
# the rest here is only needed for metrics dict
V = self.v.proba_dist.mean(dist_params)
V = self.v.proba_dist.postprocess_variate(next(rngs), V, batch_mode=True)
G = self.v.proba_dist.mean(dist_params_target)
G = self.v.proba_dist.postprocess_variate(next(rngs), G, batch_mode=True)
dist_params_v_targ, _ = self.v.function(
target_params['v_targ'], target_state['v_targ'], next(rngs), S, False)
V_targ = self.v.proba_dist.mean(dist_params_v_targ)
V_targ = self.v.proba_dist.postprocess_variate(next(rngs), V_targ, batch_mode=True)
else:
V, state_new = self.v.function(params, state, next(rngs), S, True)
G = self.target_func(target_params, target_state, next(rngs), transition_batch)
# flip sign (typical example: regularizer = -beta * entropy)
G -= regularizer
loss = self.loss_function(G, V, W)
# only needed for metrics dict
V_targ, _ = self.v.function(
target_params['v_targ'], target_state['v_targ'], next(rngs), S, False)
chex.assert_equal_shape([G, V, V_targ, W])
chex.assert_rank([G, V, V_targ, W], 1)
dLoss_dV = jax.grad(self.loss_function, argnums=1)
td_error = -V.shape[0] * dLoss_dV(G, V) # e.g. (G - V) if loss function is MSE
chex.assert_equal_shape([td_error, W])
metrics.update({
f'{self.__class__.__name__}/loss': loss,
f'{self.__class__.__name__}/td_error': jnp.mean(W * td_error),
f'{self.__class__.__name__}/td_error_targ': jnp.mean(-dLoss_dV(V, V_targ, W)),
})
return loss, (td_error, state_new, metrics)
def grads_and_metrics_func(
params, target_params, state, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
grads, (td_error, state_new, metrics) = jax.grad(loss_func, has_aux=True)(
params, target_params, state, target_state, next(rngs), transition_batch)
# add some diagnostics about the gradients
metrics.update(get_grads_diagnostics(grads, f'{self.__class__.__name__}/grads_'))
return grads, state_new, metrics, td_error
def td_error_func(params, target_params, state, target_state, rng, transition_batch):
loss, (td_error, state_new, metrics) =\
loss_func(params, target_params, state, target_state, rng, transition_batch)
return td_error
self._grads_and_metrics_func = jit(grads_and_metrics_func)
self._td_error_func = jit(td_error_func)
@property
def v(self):
return self._f
@property
def v_targ(self):
return self._f_targ
@property
def target_params(self):
return hk.data_structures.to_immutable_dict({
'v': self.v.params,
'v_targ': self.v_targ.params,
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'params', None),
'reg_hparams': getattr(self.policy_regularizer, 'hyperparams', None)})
@property
def target_function_state(self):
return hk.data_structures.to_immutable_dict({
'v': self.v.function_state,
'v_targ': self.v_targ.function_state,
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'function_state', None)})
def _get_target_dist_params(self, params, state, rng, transition_batch):
r"""
This method applies techniques from the Distributionel RL paper (arxiv:1707.06887) to
update StochasticQ / StochasticV.
"""
rngs = hk.PRNGSequence(rng)
S_next = self.v_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
scale, shift = transition_batch.In, transition_batch.Rn # defines affine transformation
dist_params_next, _ = self.v_targ.function(params, state, next(rngs), S_next, False)
dist_params_target = self.v_targ.proba_dist.affine_transform(
dist_params_next, scale, shift, self.v_targ.value_transform)
return dist_params_target
class BaseTDLearningQ(BaseTDLearning):
def __init__(self, q, q_targ=None, optimizer=None, loss_function=None, policy_regularizer=None):
if not is_qfunction(q):
raise TypeError(f"q must be a q-function, got: {type(q)}")
if not (q_targ is None or isinstance(q_targ, (list, tuple)) or is_qfunction(q_targ)):
raise TypeError(f"q_targ must be a q-function or None, got: {type(q_targ)}")
super().__init__(
f=q,
f_targ=q_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
def loss_func(params, target_params, state, target_state, rng, transition_batch):
"""
In this function we tie together all the pieces, which is why it's a bit long.
The main structure to watch for is calls to self.target_func(...), which is defined
downstream. All other code is essentially boilerplate to tie this target to the
predictions, i.e. to construct a feedback signal for training.
One of the things we might change here is not to handle both the stochastic and
deterministic cases in the same function.
-kris
"""
rngs = hk.PRNGSequence(rng)
S = self.q.observation_preprocessor(next(rngs), transition_batch.S)
A = self.q.action_preprocessor(next(rngs), transition_batch.A)
W = jnp.clip(transition_batch.W, 0.1, 10.) # clip importance weights to reduce variance
metrics = {}
# regularization term
if self.policy_regularizer is None:
regularizer = 0.
else:
regularizer, regularizer_metrics = self.policy_regularizer.batch_eval(
target_params['reg'], target_params['reg_hparams'], target_state['reg'],
next(rngs), transition_batch)
metrics.update({f'{self.__class__.__name__}/{k}': v for k,
v in regularizer_metrics.items()})
if is_stochastic(self.q):
dist_params, state_new = \
self.q.function_type1(params, state, next(rngs), S, A, True)
dist_params_target = \
self.target_func(target_params, target_state, rng, transition_batch)
if self.policy_regularizer is not None:
dist_params_target = self.q.proba_dist.affine_transform(
dist_params_target, 1., -regularizer, self.q.value_transform)
if isinstance(self.q.proba_dist, DiscretizedIntervalDist):
loss = jnp.mean(self.q.proba_dist.cross_entropy(dist_params_target,
dist_params))
elif isinstance(self.q.proba_dist, EmpiricalQuantileDist):
loss = quantile_huber(dist_params_target['values'],
dist_params['values'],
dist_params['quantile_fractions'], W)
# the rest here is only needed for metrics dict
Q = self.q.proba_dist.mean(dist_params)
Q = self.q.proba_dist.postprocess_variate(next(rngs), Q, batch_mode=True)
G = self.q.proba_dist.mean(dist_params_target)
G = self.q.proba_dist.postprocess_variate(next(rngs), G, batch_mode=True)
dist_params_q_targ, _ = self.q.function_type1(
target_params['q_targ'], target_state['q_targ'], next(rngs), S, A, False)
Q_targ = self.q.proba_dist.mean(dist_params_q_targ)
Q_targ = self.q.proba_dist.postprocess_variate(next(rngs), Q_targ, batch_mode=True)
else:
Q, state_new = self.q.function_type1(params, state, next(rngs), S, A, True)
G = self.target_func(target_params, target_state, next(rngs), transition_batch)
# flip sign (typical example: regularizer = -beta * entropy)
G -= regularizer
loss = self.loss_function(G, Q, W)
# only needed for metrics dict
Q_targ, _ = self.q.function_type1(
target_params['q_targ'], target_state['q_targ'], next(rngs), S, A, False)
chex.assert_equal_shape([G, Q, Q_targ, W])
chex.assert_rank([G, Q, Q_targ, W], 1)
dLoss_dQ = jax.grad(self.loss_function, argnums=1)
td_error = -Q.shape[0] * dLoss_dQ(G, Q) # e.g. (G - Q) if loss function is MSE
chex.assert_equal_shape([td_error, W])
metrics.update({
f'{self.__class__.__name__}/loss': loss,
f'{self.__class__.__name__}/td_error': jnp.mean(W * td_error),
f'{self.__class__.__name__}/td_error_targ': jnp.mean(-dLoss_dQ(Q, Q_targ, W)),
})
return loss, (td_error, state_new, metrics)
def grads_and_metrics_func(
params, target_params, state, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
grads, (td_error, state_new, metrics) = jax.grad(loss_func, has_aux=True)(
params, target_params, state, target_state, next(rngs), transition_batch)
# add some diagnostics about the gradients
metrics.update(get_grads_diagnostics(grads, f'{self.__class__.__name__}/grads_'))
return grads, state_new, metrics, td_error
def td_error_func(params, target_params, state, target_state, rng, transition_batch):
loss, (td_error, state_new, metrics) =\
loss_func(params, target_params, state, target_state, rng, transition_batch)
return td_error
self._grads_and_metrics_func = jit(grads_and_metrics_func)
self._td_error_func = jit(td_error_func)
@property
def q(self):
return self._f
@property
def q_targ(self):
return self._f_targ
@property
def target_params(self):
return hk.data_structures.to_immutable_dict({
'q': self.q.params,
'q_targ': self.q_targ.params,
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'params', None),
'reg_hparams': getattr(self.policy_regularizer, 'hyperparams', None)})
@property
def target_function_state(self):
return hk.data_structures.to_immutable_dict({
'q': self.q.function_state,
'q_targ': self.q_targ.function_state,
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'function_state', None)})
def _get_target_dist_params(self, params, state, rng, transition_batch, A_next):
r"""
This method applies techniques from the Distributionel RL paper (arxiv:1707.06887) to
update StochasticQ / StochasticV.
"""
rngs = hk.PRNGSequence(rng)
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
scale, shift = transition_batch.In, transition_batch.Rn # defines affine transformation
dist_params_next, _ = self.q_targ.function_type1(
params, state, next(rngs), S_next, A_next, False)
dist_params_target = self.q_targ.proba_dist.affine_transform(
dist_params_next, scale, shift, self.q_targ.value_transform)
return dist_params_target
class BaseTDLearningQWithTargetPolicy(BaseTDLearningQ):
def __init__(
self, q, pi_targ, q_targ=None, optimizer=None,
loss_function=None, policy_regularizer=None):
if pi_targ is not None and not is_policy(pi_targ):
raise TypeError(f"pi_targ must be a Policy, got: {type(pi_targ)}")
self.pi_targ = pi_targ
super().__init__(
q=q,
q_targ=q_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
@property
def target_params(self):
return hk.data_structures.to_immutable_dict({
'q': self.q.params,
'q_targ': self.q_targ.params,
'pi_targ': getattr(self.pi_targ, 'params', None),
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'params', None),
'reg_hparams': getattr(self.policy_regularizer, 'hyperparams', None)})
@property
def target_function_state(self):
return hk.data_structures.to_immutable_dict({
'q': self.q.function_state,
'q_targ': self.q_targ.function_state,
'pi_targ': getattr(self.pi_targ, 'function_state', None),
'reg':
getattr(getattr(self.policy_regularizer, 'f', None), 'function_state', None)})
| 22,895 | 40.32852 | 100 | py |
null | coax-main/coax/td_learning/_clippeddoubleqlearning.py | import warnings
import jax
import jax.numpy as jnp
import haiku as hk
import chex
from gymnasium.spaces import Discrete
from ..proba_dists import DiscretizedIntervalDist, EmpiricalQuantileDist
from ..utils import (get_grads_diagnostics, is_policy, is_qfunction,
is_stochastic, jit, single_to_batch, batch_to_single, stack_trees)
from ..value_losses import quantile_huber
from ._base import BaseTDLearningQ
class ClippedDoubleQLearning(BaseTDLearningQ): # TODO(krholshe): make this less ugly
r"""
TD-learning with `TD3 <https://arxiv.org/abs/1802.09477>`_ style double q-learning updates, in
which the target network is only used in selecting the would-be next action.
For discrete actions, the :math:`n`-step bootstrapped target is constructed as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,\min_{i,j}q_i(S_{t+n}, \arg\max_a q_j(S_{t+n}, a))
where :math:`q_i(s,a)` is the :math:`i`-th target q-function provided in :code:`q_targ_list`.
Similarly, for non-discrete actions, the target is constructed as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,\min_{i,j}q_i(S_{t+n}, a_j(S_{t+n}))
where :math:`a_i(s)` is the **mode** of the :math:`i`-th target policy provided in
:code:`pi_targ_list`.
where
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
q : Q
The main q-function to update.
pi_targ_list : list of Policy, optional
The list of policies that are used for constructing the TD-target. This is ignored if the
action space is discrete and *required* otherwise.
q_targ_list : list of Q
The list of q-functions that are used for constructing the TD-target.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
"""
def __init__(
self, q, pi_targ_list=None, q_targ_list=None,
optimizer=None, loss_function=None, policy_regularizer=None):
super().__init__(
q=q,
q_targ=None,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
self._check_input_lists(pi_targ_list, q_targ_list)
self.q_targ_list = q_targ_list
self.pi_targ_list = [] if pi_targ_list is None else pi_targ_list
# consistency check
if isinstance(self.q.action_space, Discrete):
if len(self.q_targ_list) < 2:
raise ValueError("len(q_targ_list) must be at least 2")
elif len(self.q_targ_list) * len(self.pi_targ_list) < 2:
raise ValueError("len(q_targ_list) * len(pi_targ_list) must be at least 2")
def loss_func(params, target_params, state, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
S = self.q.observation_preprocessor(next(rngs), transition_batch.S)
A = self.q.action_preprocessor(next(rngs), transition_batch.A)
W = jnp.clip(transition_batch.W, 0.1, 10.) # clip importance weights to reduce variance
metrics = {}
# regularization term
if self.policy_regularizer is None:
regularizer = 0.
else:
regularizer, regularizer_metrics = self.policy_regularizer.batch_eval(
target_params['reg'], target_params['reg_hparams'], target_state['reg'],
next(rngs), transition_batch)
metrics.update({f'{self.__class__.__name__}/{k}': v for k,
v in regularizer_metrics.items()})
if is_stochastic(self.q):
dist_params, state_new = \
self.q.function_type1(params, state, next(rngs), S, A, True)
dist_params_target = \
self.target_func(target_params, target_state, rng, transition_batch)
if self.policy_regularizer is not None:
dist_params_target = self.q.proba_dist.affine_transform(
dist_params_target, 1., -regularizer, self.q.value_transform)
if isinstance(self.q.proba_dist, DiscretizedIntervalDist):
loss = jnp.mean(self.q.proba_dist.cross_entropy(dist_params_target,
dist_params))
elif isinstance(self.q.proba_dist, EmpiricalQuantileDist):
loss = quantile_huber(dist_params_target['values'],
dist_params['values'],
dist_params['quantile_fractions'], W)
# the rest here is only needed for metrics dict
Q = self.q.proba_dist.mean(dist_params)
Q = self.q.proba_dist.postprocess_variate(next(rngs), Q, batch_mode=True)
G = self.q.proba_dist.mean(dist_params_target)
G = self.q.proba_dist.postprocess_variate(next(rngs), G, batch_mode=True)
else:
Q, state_new = self.q.function_type1(params, state, next(rngs), S, A, True)
G = self.target_func(target_params, target_state, next(rngs), transition_batch)
# flip sign (typical example: regularizer = -beta * entropy)
G -= regularizer
loss = self.loss_function(G, Q, W)
dLoss_dQ = jax.grad(self.loss_function, argnums=1)
td_error = -Q.shape[0] * dLoss_dQ(G, Q) # e.g. (G - Q) if loss function is MSE
# target-network estimate (is this worth computing?)
Q_targ_list = []
qs = list(zip(self.q_targ_list, target_params['q_targ'], target_state['q_targ']))
for q, pm, st in qs:
if is_stochastic(q):
Q_targ = q.mean_func_type1(pm, st, next(rngs), S, A)
Q_targ = q.proba_dist.postprocess_variate(next(rngs), Q_targ, batch_mode=True)
else:
Q_targ, _ = q.function_type1(pm, st, next(rngs), S, A, False)
assert Q_targ.ndim == 1, f"bad shape: {Q_targ.shape}"
Q_targ_list.append(Q_targ)
Q_targ_list = jnp.stack(Q_targ_list, axis=-1)
assert Q_targ_list.ndim == 2, f"bad shape: {Q_targ_list.shape}"
Q_targ = jnp.min(Q_targ_list, axis=-1)
chex.assert_equal_shape([td_error, W, Q_targ])
metrics.update({
f'{self.__class__.__name__}/loss': loss,
f'{self.__class__.__name__}/td_error': jnp.mean(W * td_error),
f'{self.__class__.__name__}/td_error_targ': jnp.mean(-dLoss_dQ(Q, Q_targ, W)),
})
return loss, (td_error, state_new, metrics)
def grads_and_metrics_func(
params, target_params, state, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
grads, (td_error, state_new, metrics) = jax.grad(loss_func, has_aux=True)(
params, target_params, state, target_state, next(rngs), transition_batch)
# add some diagnostics about the gradients
metrics.update(get_grads_diagnostics(grads, f'{self.__class__.__name__}/grads_'))
return grads, state_new, metrics, td_error
def td_error_func(params, target_params, state, target_state, rng, transition_batch):
loss, (td_error, state_new, metrics) =\
loss_func(params, target_params, state, target_state, rng, transition_batch)
return td_error
self._grads_and_metrics_func = jit(grads_and_metrics_func)
self._td_error_func = jit(td_error_func)
@property
def target_params(self):
return hk.data_structures.to_immutable_dict({
'q': self.q.params,
'q_targ': [q.params for q in self.q_targ_list],
'pi_targ': [pi.params for pi in self.pi_targ_list],
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'params', None),
'reg_hparams': getattr(self.policy_regularizer, 'hyperparams', None)})
@property
def target_function_state(self):
return hk.data_structures.to_immutable_dict({
'q': self.q.function_state,
'q_targ': [q.function_state for q in self.q_targ_list],
'pi_targ': [pi.function_state for pi in self.pi_targ_list],
'reg': getattr(getattr(self.policy_regularizer, 'f', None), 'function_state', None)})
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
# collect list of q-values
if isinstance(self.q.action_space, Discrete):
Q_sa_next_list = []
A_next_list = []
qs = list(zip(self.q_targ_list, target_params['q_targ'], target_state['q_targ']))
# compute A_next from q_i
for q_i, params_i, state_i in qs:
S_next = q_i.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(q_i):
Q_s_next = q_i.mean_func_type2(params_i, state_i, next(rngs), S_next)
Q_s_next = q_i.proba_dist.postprocess_variate(
next(rngs), Q_s_next, batch_mode=True)
else:
Q_s_next, _ = q_i.function_type2(params_i, state_i, next(rngs), S_next, False)
assert Q_s_next.ndim == 2, f"bad shape: {Q_s_next.shape}"
A_next = (Q_s_next == Q_s_next.max(axis=1, keepdims=True)).astype(Q_s_next.dtype)
A_next /= A_next.sum(axis=1, keepdims=True) # there may be ties
# evaluate on q_j
for q_j, params_j, state_j in qs:
S_next = q_j.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(q_j):
Q_sa_next = q_j.mean_func_type1(
params_j, state_j, next(rngs), S_next, A_next)
Q_sa_next = q_j.proba_dist.postprocess_variate(
next(rngs), Q_sa_next, batch_mode=True)
else:
Q_sa_next, _ = q_j.function_type1(
params_j, state_j, next(rngs), S_next, A_next, False)
assert Q_sa_next.ndim == 1, f"bad shape: {Q_sa_next.shape}"
f_inv = q_j.value_transform.inverse_func
Q_sa_next_list.append(f_inv(Q_sa_next))
A_next_list.append(A_next)
else:
Q_sa_next_list = []
A_next_list = []
qs = list(zip(self.q_targ_list, target_params['q_targ'], target_state['q_targ']))
pis = list(zip(self.pi_targ_list, target_params['pi_targ'], target_state['pi_targ']))
# compute A_next from pi_i
for pi_i, params_i, state_i in pis:
S_next = pi_i.observation_preprocessor(next(rngs), transition_batch.S_next)
dist_params, _ = pi_i.function(params_i, state_i, next(rngs), S_next, False)
A_next = pi_i.proba_dist.mode(dist_params) # greedy action
# evaluate on q_j
for q_j, params_j, state_j in qs:
S_next = q_j.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(q_j):
Q_sa_next = q_j.mean_func_type1(
params_j, state_j, next(rngs), S_next, A_next)
Q_sa_next = q_j.proba_dist.postprocess_variate(
next(rngs), Q_sa_next, batch_mode=True)
else:
Q_sa_next, _ = q_j.function_type1(
params_j, state_j, next(rngs), S_next, A_next, False)
assert Q_sa_next.ndim == 1, f"bad shape: {Q_sa_next.shape}"
f_inv = q_j.value_transform.inverse_func
Q_sa_next_list.append(f_inv(Q_sa_next))
A_next_list.append(A_next)
# take the min to mitigate over-estimation
A_next_list = jnp.stack(A_next_list, axis=1)
Q_sa_next_list = jnp.stack(Q_sa_next_list, axis=-1)
assert Q_sa_next_list.ndim == 2, f"bad shape: {Q_sa_next_list.shape}"
if is_stochastic(self.q):
Q_sa_next_argmin = jnp.argmin(Q_sa_next_list, axis=-1)
Q_sa_next_argmin_q = Q_sa_next_argmin % len(self.q_targ_list)
def target_dist_params(A_next_idx, q_targ_idx, p, s, t, A_next_list):
return self._get_target_dist_params(batch_to_single(p, q_targ_idx),
batch_to_single(s, q_targ_idx),
next(rngs),
single_to_batch(t),
single_to_batch(batch_to_single(A_next_list,
A_next_idx)))
def tile_parameters(params, state, reps):
return jax.tree_util.tree_map(lambda t: jnp.tile(t, [reps, *([1] * (t.ndim - 1))]),
stack_trees(params, state))
# stack and tile q-function params to select the argmin for the target dist params
tiled_target_params, tiled_target_state = tile_parameters(
target_params['q_targ'], target_state['q_targ'], reps=len(self.q_targ_list))
vtarget_dist_params = jax.vmap(target_dist_params, in_axes=(0, 0, None, None, 0, 0))
dist_params = vtarget_dist_params(
Q_sa_next_argmin,
Q_sa_next_argmin_q,
tiled_target_params,
tiled_target_state,
transition_batch,
A_next_list)
# unwrap dist params computed for single batches
return jax.tree_util.tree_map(lambda t: jnp.squeeze(t, axis=1), dist_params)
Q_sa_next = jnp.min(Q_sa_next_list, axis=-1)
assert Q_sa_next.ndim == 1, f"bad shape: {Q_sa_next.shape}"
f = self.q.value_transform.transform_func
return f(transition_batch.Rn + transition_batch.In * Q_sa_next)
def _check_input_lists(self, pi_targ_list, q_targ_list):
# check input: pi_targ_list
if isinstance(self.q.action_space, Discrete):
if pi_targ_list is not None:
warnings.warn("pi_targ_list is ignored, because action space is discrete")
else:
if pi_targ_list is None:
raise TypeError("pi_targ_list must be provided if action space is not discrete")
if not isinstance(pi_targ_list, (tuple, list)):
raise TypeError(
f"pi_targ_list must be a list or a tuple, got: {type(pi_targ_list)}")
if len(pi_targ_list) < 1:
raise ValueError("pi_targ_list cannot be empty")
for pi_targ in pi_targ_list:
if not is_policy(pi_targ):
raise TypeError(
f"all pi_targ in pi_targ_list must be a policies, got: {type(pi_targ)}")
# check input: q_targ_list
if not isinstance(q_targ_list, (tuple, list)):
raise TypeError(f"q_targ_list must be a list or a tuple, got: {type(q_targ_list)}")
if not q_targ_list:
raise ValueError("q_targ_list cannot be empty")
for q_targ in q_targ_list:
if not is_qfunction(q_targ):
raise TypeError(f"all q_targ in q_targ_list must be a coax.Q, got: {type(q_targ)}")
| 17,026 | 46.166205 | 100 | py |
null | coax-main/coax/td_learning/_clippeddoubleqlearning_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.q import Q
from .._core.stochastic_q import StochasticQ
from .._core.policy import Policy
from ..utils import get_transition_batch
from ._clippeddoubleqlearning import ClippedDoubleQLearning
class TestClippedDoubleQLearning(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete_type1(self):
env = self.env_discrete
func_q = self.func_q_type1
transition_batch = self.transition_discrete
q1 = Q(func_q, env)
q2 = Q(func_q, env)
q_targ1 = q1.copy()
q_targ2 = q2.copy()
updater1 = ClippedDoubleQLearning(q1, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
updater2 = ClippedDoubleQLearning(q2, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
params1 = deepcopy(q1.params)
params2 = deepcopy(q2.params)
function_state1 = deepcopy(q1.function_state)
function_state2 = deepcopy(q2.function_state)
updater1.update(transition_batch)
updater2.update(transition_batch)
self.assertPytreeNotEqual(params1, q1.params)
self.assertPytreeNotEqual(params2, q2.params)
self.assertPytreeNotEqual(function_state1, q1.function_state)
self.assertPytreeNotEqual(function_state2, q2.function_state)
def test_update_discrete_stochastic_type1(self):
env = self.env_discrete
func_q = self.func_q_stochastic_type1
transition_batch = self.transition_discrete
q1 = StochasticQ(func_q, env, value_range=(0, 1))
q2 = StochasticQ(func_q, env, value_range=(0, 1))
q_targ1 = q1.copy()
q_targ2 = q2.copy()
updater1 = ClippedDoubleQLearning(q1, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
updater2 = ClippedDoubleQLearning(q2, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
params1 = deepcopy(q1.params)
params2 = deepcopy(q2.params)
function_state1 = deepcopy(q1.function_state)
function_state2 = deepcopy(q2.function_state)
updater1.update(transition_batch)
updater2.update(transition_batch)
self.assertPytreeNotEqual(params1, q1.params)
self.assertPytreeNotEqual(params2, q2.params)
self.assertPytreeNotEqual(function_state1, q1.function_state)
self.assertPytreeNotEqual(function_state2, q2.function_state)
def test_update_discrete_type2(self):
env = self.env_discrete
func_q = self.func_q_type2
transition_batch = self.transition_discrete
q1 = Q(func_q, env)
q2 = Q(func_q, env)
q_targ1 = q1.copy()
q_targ2 = q2.copy()
updater1 = ClippedDoubleQLearning(q1, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
updater2 = ClippedDoubleQLearning(q2, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
params1 = deepcopy(q1.params)
params2 = deepcopy(q2.params)
function_state1 = deepcopy(q1.function_state)
function_state2 = deepcopy(q2.function_state)
updater1.update(transition_batch)
updater2.update(transition_batch)
self.assertPytreeNotEqual(params1, q1.params)
self.assertPytreeNotEqual(params2, q2.params)
self.assertPytreeNotEqual(function_state1, q1.function_state)
self.assertPytreeNotEqual(function_state2, q2.function_state)
def test_update_discrete_stochastic_type2(self):
env = self.env_discrete
func_q = self.func_q_stochastic_type2
transition_batch = self.transition_discrete
q1 = StochasticQ(func_q, env, value_range=(0, 1))
q2 = StochasticQ(func_q, env, value_range=(0, 1))
q_targ1 = q1.copy()
q_targ2 = q2.copy()
updater1 = ClippedDoubleQLearning(q1, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
updater2 = ClippedDoubleQLearning(q2, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
params1 = deepcopy(q1.params)
params2 = deepcopy(q2.params)
function_state1 = deepcopy(q1.function_state)
function_state2 = deepcopy(q2.function_state)
updater1.update(transition_batch)
updater2.update(transition_batch)
self.assertPytreeNotEqual(params1, q1.params)
self.assertPytreeNotEqual(params2, q2.params)
self.assertPytreeNotEqual(function_state1, q1.function_state)
self.assertPytreeNotEqual(function_state2, q2.function_state)
def test_update_boxspace(self):
env = self.env_boxspace
func_q = self.func_q_type1
func_pi = self.func_pi_boxspace
transition_batch = self.transition_boxspace
q1 = Q(func_q, env)
q2 = Q(func_q, env)
pi1 = Policy(func_pi, env)
pi2 = Policy(func_pi, env)
q_targ1 = q1.copy()
q_targ2 = q2.copy()
updater1 = ClippedDoubleQLearning(
q1, pi_targ_list=[pi1, pi2], q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
updater2 = ClippedDoubleQLearning(
q2, pi_targ_list=[pi1, pi2], q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
params1 = deepcopy(q1.params)
params2 = deepcopy(q2.params)
function_state1 = deepcopy(q1.function_state)
function_state2 = deepcopy(q2.function_state)
updater1.update(transition_batch)
updater2.update(transition_batch)
self.assertPytreeNotEqual(params1, q1.params)
self.assertPytreeNotEqual(params2, q2.params)
self.assertPytreeNotEqual(function_state1, q1.function_state)
self.assertPytreeNotEqual(function_state2, q2.function_state)
def test_update_boxspace_stochastic(self):
env = self.env_boxspace
func_q = self.func_q_stochastic_type1
func_pi = self.func_pi_boxspace
transition_batch = self.transition_boxspace
q1 = StochasticQ(func_q, env, value_range=(0, 1))
q2 = StochasticQ(func_q, env, value_range=(0, 1))
pi1 = Policy(func_pi, env)
pi2 = Policy(func_pi, env)
q_targ1 = q1.copy()
q_targ2 = q2.copy()
updater1 = ClippedDoubleQLearning(
q1, pi_targ_list=[pi1, pi2], q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
updater2 = ClippedDoubleQLearning(
q2, pi_targ_list=[pi1, pi2], q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
params1 = deepcopy(q1.params)
params2 = deepcopy(q2.params)
function_state1 = deepcopy(q1.function_state)
function_state2 = deepcopy(q2.function_state)
updater1.update(transition_batch)
updater2.update(transition_batch)
self.assertPytreeNotEqual(params1, q1.params)
self.assertPytreeNotEqual(params2, q2.params)
self.assertPytreeNotEqual(function_state1, q1.function_state)
self.assertPytreeNotEqual(function_state2, q2.function_state)
def test_discrete_with_pi(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
q1 = Q(func_q, env)
q2 = Q(func_q, env)
pi1 = Policy(func_pi, env)
pi2 = Policy(func_pi, env)
q_targ1 = q1.copy()
q_targ2 = q2.copy()
msg = r"pi_targ_list is ignored, because action space is discrete"
with self.assertWarnsRegex(UserWarning, msg):
ClippedDoubleQLearning(
q1, pi_targ_list=[pi1, pi2], q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
def test_boxspace_without_pi(self):
env = self.env_boxspace
func_q = self.func_q_type1
q1 = Q(func_q, env)
q2 = Q(func_q, env)
q_targ1 = q1.copy()
q_targ2 = q2.copy()
msg = r"pi_targ_list must be provided if action space is not discrete"
with self.assertRaisesRegex(TypeError, msg):
ClippedDoubleQLearning(q1, q_targ_list=[q_targ1, q_targ2], optimizer=sgd(1.0))
def test_update_discrete_nogrid(self):
env = self.env_discrete
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
msg = r"len\(q_targ_list\) must be at least 2"
with self.assertRaisesRegex(ValueError, msg):
ClippedDoubleQLearning(q, q_targ_list=[q_targ], optimizer=sgd(1.0))
def test_update_boxspace_nogrid(self):
env = self.env_boxspace
func_q = self.func_q_type1
func_pi = self.func_pi_boxspace
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
msg = r"len\(q_targ_list\) \* len\(pi_targ_list\) must be at least 2"
with self.assertRaisesRegex(ValueError, msg):
ClippedDoubleQLearning(q, pi_targ_list=[pi], q_targ_list=[q_targ], optimizer=sgd(1.0))
| 8,951 | 37.586207 | 98 | py |
null | coax-main/coax/td_learning/_doubleqlearning.py | import warnings
import haiku as hk
import chex
from gymnasium.spaces import Discrete
from ..utils import is_stochastic
from ._base import BaseTDLearningQWithTargetPolicy
class DoubleQLearning(BaseTDLearningQWithTargetPolicy):
r"""
TD-learning with `Double-DQN <https://arxiv.org/abs/1509.06461>`_ style double q-learning
updates, in which the target network is only used in selecting the would-be next action. The
:math:`n`-step bootstrapped target is thus constructed as:
.. math::
a_\text{greedy}\ &=\ \arg\max_a q_\text{targ}(S_{t+n}, a) \\
G^{(n)}_t\ &=\ R^{(n)}_t + I^{(n)}_t\,q(S_{t+n}, a_\text{greedy})
where
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
q : Q
The main q-function to update.
pi_targ : Policy, optional
The policy that is used for constructing the TD-target. This is ignored if the action space
is discrete and *required* otherwise.
q_targ : Q, optional
The q-function that is used for constructing the TD-target. If this is left unspecified, we
set ``q_targ = q`` internally.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
"""
def __init__(
self, q, pi_targ=None, q_targ=None,
optimizer=None, loss_function=None, policy_regularizer=None):
super().__init__(
q=q,
pi_targ=pi_targ,
q_targ=q_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
# consistency checks
if self.pi_targ is None and not isinstance(self.q.action_space, Discrete):
raise TypeError("pi_targ must be provided if action space is not discrete")
if self.pi_targ is not None and isinstance(self.q.action_space, Discrete):
warnings.warn("pi_targ is ignored, because action space is discrete")
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
if isinstance(self.q.action_space, Discrete):
params, state = target_params['q_targ'], target_state['q_targ']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(self.q):
Q_s = self.q_targ.mean_func_type2(params, state, next(rngs), S_next)
Q_s = self.q_targ.proba_dist.postprocess_variate(next(rngs), Q_s, batch_mode=True)
else:
Q_s, _ = self.q_targ.function_type2(params, state, next(rngs), S_next, False)
chex.assert_rank(Q_s, 2)
assert Q_s.shape[1] == self.q_targ.action_space.n
# get greedy action as the argmax over q_targ
A_next = (Q_s == Q_s.max(axis=1, keepdims=True)).astype(Q_s.dtype)
A_next /= A_next.sum(axis=1, keepdims=True) # there may be ties
else:
# get greedy action as the mode of pi_targ
params, state = target_params['pi_targ'], target_state['pi_targ']
S_next = self.pi_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
A_next = self.pi_targ.mode_func(params, state, next(rngs), S_next)
# evaluate on q (instead of q_targ)
params, state = target_params['q'], target_state['q']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(self.q):
return self._get_target_dist_params(params, state, next(rngs), transition_batch, A_next)
Q_sa_next, _ = self.q.function_type1(params, state, next(rngs), S_next, A_next, False)
f, f_inv = self.q.value_transform.transform_func, self.q_targ.value_transform.inverse_func
return f(transition_batch.Rn + transition_batch.In * f_inv(Q_sa_next))
| 5,168 | 37.288889 | 100 | py |
null | coax-main/coax/td_learning/_doubleqlearning_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.q import Q
from .._core.policy import Policy
from ..utils import get_transition_batch
from ._doubleqlearning import DoubleQLearning
class TestDoubleQLearning(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete_type1(self):
env = self.env_discrete
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
updater = DoubleQLearning(q, q_targ=q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_discrete_type2(self):
env = self.env_discrete
func_q = self.func_q_type2
q = Q(func_q, env)
q_targ = q.copy()
updater = DoubleQLearning(q, q_targ=q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_boxspace(self):
env = self.env_boxspace
func_q = self.func_q_type1
func_pi = self.func_pi_boxspace
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
updater = DoubleQLearning(q, pi, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_boxspace)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_discrete_with_pi(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
msg = r"pi_targ is ignored, because action space is discrete"
with self.assertWarnsRegex(UserWarning, msg):
DoubleQLearning(q, pi, q_targ)
def test_boxspace_without_pi(self):
env = self.env_boxspace
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
msg = r"pi_targ must be provided if action space is not discrete"
with self.assertRaisesRegex(TypeError, msg):
DoubleQLearning(q, q_targ=q_targ)
def test_mistake_q_for_pi(self):
env = self.env_discrete
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
msg = r"pi_targ must be a Policy, got: .*"
with self.assertRaisesRegex(TypeError, msg):
DoubleQLearning(q, q_targ)
| 3,070 | 29.107843 | 90 | py |
null | coax-main/coax/td_learning/_expectedsarsa.py | import gymnasium
import jax
import haiku as hk
from ..utils import is_stochastic
from ._base import BaseTDLearningQWithTargetPolicy
class ExpectedSarsa(BaseTDLearningQWithTargetPolicy):
r"""
TD-learning with expected-SARSA updates. The :math:`n`-step bootstrapped target is constructed
as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t
+ I^{(n)}_t\,\mathop{\mathbb{E}}_{a\sim\pi_\text{targ}(.|S_{t+n})}\,
q_\text{targ}\left(S_{t+n}, a\right)
Note that ordinary :class:`SARSA <coax.td_learning.Sarsa>` target is the sampled estimate of the
above target.
Also, as usual, the :math:`n`-step reward and indicator are defined as:
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
q : Q
The main q-function to update.
pi_targ : Policy
The policy that is used for constructing the TD-target.
q_targ : Q, optional
The q-function that is used for constructing the TD-target. If this is left unspecified, we
set ``q_targ = q`` internally.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
"""
def __init__(
self, q, pi_targ, q_targ=None, optimizer=None,
loss_function=None, policy_regularizer=None):
if not isinstance(q.action_space, gymnasium.spaces.Discrete):
raise NotImplementedError(
f"{self.__class__.__name__} class is only implemented for discrete actions spaces")
if pi_targ is None:
raise TypeError("pi_targ must be provided")
super().__init__(
q=q,
pi_targ=pi_targ,
q_targ=q_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
# action propensities
params, state = target_params['pi_targ'], target_state['pi_targ']
S_next = self.pi_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
dist_params, _ = self.pi_targ.function(params, state, next(rngs), S_next, False)
A_next = jax.nn.softmax(dist_params['logits'], axis=-1) # only works for Discrete actions
# evaluate on q_targ
params, state = target_params['q_targ'], target_state['q_targ']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(self.q):
return self._get_target_dist_params(params, state, next(rngs), transition_batch, A_next)
Q_sa_next, _ = self.q_targ.function_type1(params, state, next(rngs), S_next, A_next, False)
f, f_inv = self.q.value_transform.transform_func, self.q_targ.value_transform.inverse_func
return f(transition_batch.Rn + transition_batch.In * f_inv(Q_sa_next))
| 4,207 | 34.361345 | 100 | py |
null | coax-main/coax/td_learning/_expectedsarsa_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.q import Q
from .._core.policy import Policy
from ..utils import get_transition_batch
from ..regularizers import EntropyRegularizer
from ._expectedsarsa import ExpectedSarsa
class TestExpectedSarsa(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete_type1(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
updater = ExpectedSarsa(q, pi, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_discrete_type2(self):
env = self.env_discrete
func_q = self.func_q_type2
func_pi = self.func_pi_discrete
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
updater = ExpectedSarsa(q, pi, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_nondiscrete(self):
env = self.env_boxspace
func_q = self.func_q_type1
func_pi = self.func_pi_boxspace
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
msg = r"ExpectedSarsa class is only implemented for discrete actions spaces"
with self.assertRaisesRegex(NotImplementedError, msg):
ExpectedSarsa(q, pi, q_targ)
def test_missing_pi(self):
env = self.env_discrete
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
msg = r"pi_targ must be provided"
with self.assertRaisesRegex(TypeError, msg):
ExpectedSarsa(q, None, q_targ)
def test_policyreg(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
params_init = deepcopy(q.params)
function_state_init = deepcopy(q.function_state)
# first update without policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = ExpectedSarsa(q, pi, q_targ, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(q.params)
function_state_without_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
q.params = deepcopy(params_init)
q.function_state = deepcopy(function_state_init)
self.assertPytreeAlmostEqual(params_init, q.params)
self.assertPytreeAlmostEqual(function_state_init, q.function_state)
# then update with policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = ExpectedSarsa(q, pi, q_targ, optimizer=sgd(1.0), policy_regularizer=policy_reg)
print('updater.target_params:', updater.target_params)
print('updater.target_function_state:', updater.target_function_state)
updater.update(transition_batch)
params_with_reg = deepcopy(q.params)
function_state_with_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
| 4,302 | 35.159664 | 98 | py |
null | coax-main/coax/td_learning/_qlearning.py | import warnings
import haiku as hk
import chex
from gymnasium.spaces import Discrete
from ..utils import is_stochastic
from ._base import BaseTDLearningQWithTargetPolicy
class QLearning(BaseTDLearningQWithTargetPolicy):
r"""
TD-learning with Q-Learning updates.
The :math:`n`-step bootstrapped target for discrete actions is constructed as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,\max_aq_\text{targ}\left(S_{t+n}, a\right)
For non-discrete action spaces, this uses a DDPG-style target:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,q_\text{targ}\left(
S_{t+n}, a_\text{targ}(S_{t+n})\right)
where :math:`a_\text{targ}(s)` is the **mode** of the underlying conditional probability
distribution :math:`\pi_\text{targ}(.|s)`. Even though these two formulations of the q-learning
target are equivalent, the implementation of the latter does require additional input, namely
:code:`pi_targ`.
The :math:`n`-step reward and indicator (referenced above) are defined as:
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
q : Q
The main q-function to update.
pi_targ : Policy, optional
The policy that is used for constructing the TD-target. This is ignored if the action space
is discrete and *required* otherwise.
q_targ : Q, optional
The q-function that is used for constructing the TD-target. If this is left unspecified, we
set ``q_targ = q`` internally.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
"""
def __init__(
self, q, pi_targ=None, q_targ=None,
optimizer=None, loss_function=None, policy_regularizer=None):
super().__init__(
q=q,
pi_targ=pi_targ,
q_targ=q_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
# consistency checks
if self.pi_targ is None and not isinstance(self.q.action_space, Discrete):
raise TypeError("pi_targ must be provided if action space is not discrete")
if self.pi_targ is not None and isinstance(self.q.action_space, Discrete):
warnings.warn("pi_targ is ignored, because action space is discrete")
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
if isinstance(self.q.action_space, Discrete):
params, state = target_params['q_targ'], target_state['q_targ']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(self.q):
Q_s = self.q_targ.mean_func_type2(params, state, next(rngs), S_next)
Q_s = self.q_targ.proba_dist.postprocess_variate(next(rngs), Q_s, batch_mode=True)
else:
Q_s, _ = self.q_targ.function_type2(params, state, next(rngs), S_next, False)
chex.assert_rank(Q_s, 2)
assert Q_s.shape[1] == self.q_targ.action_space.n
# get greedy action as the argmax over q_targ
A_next = (Q_s == Q_s.max(axis=1, keepdims=True)).astype(Q_s.dtype)
A_next /= A_next.sum(axis=1, keepdims=True) # there may be ties
else:
# get greedy action as the mode of pi_targ
params, state = target_params['pi_targ'], target_state['pi_targ']
S_next = self.pi_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
A_next = self.pi_targ.mode_func(params, state, next(rngs), S_next)
# evaluate on q_targ
params, state = target_params['q_targ'], target_state['q_targ']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(self.q):
return self._get_target_dist_params(params, state, next(rngs), transition_batch, A_next)
Q_sa_next, _ = self.q.function_type1(params, state, next(rngs), S_next, A_next, False)
f, f_inv = self.q.value_transform.transform_func, self.q_targ.value_transform.inverse_func
return f(transition_batch.Rn + transition_batch.In * f_inv(Q_sa_next))
| 5,556 | 36.802721 | 100 | py |
null | coax-main/coax/td_learning/_qlearning_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.q import Q
from .._core.policy import Policy
from ..utils import get_transition_batch
from ._qlearning import QLearning
class TestQLearning(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete_type1(self):
env = self.env_discrete
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
updater = QLearning(q, q_targ=q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_discrete_type2(self):
env = self.env_discrete
func_q = self.func_q_type2
q = Q(func_q, env)
q_targ = q.copy()
updater = QLearning(q, q_targ=q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_boxspace(self):
env = self.env_boxspace
func_q = self.func_q_type1
func_pi = self.func_pi_boxspace
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
updater = QLearning(q, pi, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(self.transition_boxspace)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_discrete_with_pi(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
q = Q(func_q, env)
pi = Policy(func_pi, env)
q_targ = q.copy()
msg = r"pi_targ is ignored, because action space is discrete"
with self.assertWarnsRegex(UserWarning, msg):
QLearning(q, pi, q_targ)
def test_boxspace_without_pi(self):
env = self.env_boxspace
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
msg = r"pi_targ must be provided if action space is not discrete"
with self.assertRaisesRegex(TypeError, msg):
QLearning(q, q_targ=q_targ)
def test_mistake_q_for_pi(self):
env = self.env_discrete
func_q = self.func_q_type1
q = Q(func_q, env)
q_targ = q.copy()
msg = r"pi_targ must be a Policy, got: .*"
with self.assertRaisesRegex(TypeError, msg):
QLearning(q, q_targ)
| 3,016 | 28.578431 | 90 | py |
null | coax-main/coax/td_learning/_sarsa.py | import haiku as hk
from ..utils import is_stochastic
from ._base import BaseTDLearningQ
class Sarsa(BaseTDLearningQ):
r"""
TD-learning with SARSA updates. The :math:`n`-step bootstrapped target is constructed as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,q_\text{targ}(S_{t+n}, A_{t+n})
where :math:`A_{t+n}` is sampled from experience and
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
q : Q
The main q-function to update.
q_targ : Q, optional
The q-function that is used for constructing the TD-target. If this is left unspecified, we
set ``q_targ = q`` internally.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
"""
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
params, state = target_params['q_targ'], target_state['q_targ']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
A_next = self.q_targ.action_preprocessor(next(rngs), transition_batch.A_next)
if is_stochastic(self.q):
return self._get_target_dist_params(params, state, next(rngs), transition_batch, A_next)
Q_sa_next, _ = self.q_targ.function_type1(params, state, next(rngs), S_next, A_next, False)
f, f_inv = self.q.value_transform.transform_func, self.q_targ.value_transform.inverse_func
return f(transition_batch.Rn + transition_batch.In * f_inv(Q_sa_next))
| 2,826 | 33.901235 | 100 | py |
null | coax-main/coax/td_learning/_sarsa_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.q import Q
from .._core.policy import Policy
from ..utils import get_transition_batch
from ..regularizers import EntropyRegularizer
from ._sarsa import Sarsa
class TestSarsa(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete_type1(self):
env = self.env_discrete
func_q = self.func_q_type1
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
q_targ = q.copy()
updater = Sarsa(q, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(transition_batch)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_discrete_type2(self):
env = self.env_discrete
func_q = self.func_q_type2
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
q_targ = q.copy()
updater = Sarsa(q, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(transition_batch)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_boxspace(self):
env = self.env_boxspace
func_q = self.func_q_type1
transition_batch = self.transition_boxspace
q = Q(func_q, env, random_seed=11)
q_targ = q.copy()
updater = Sarsa(q, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(transition_batch)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_policyreg_discrete(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
params_init = deepcopy(q.params)
function_state_init = deepcopy(q.function_state)
# first update without policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = Sarsa(q, q_targ, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(q.params)
function_state_without_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
self.assertPytreeAlmostEqual(params_init, q.params)
self.assertPytreeAlmostEqual(function_state_init, q.function_state)
# then update with policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = Sarsa(q, q_targ, optimizer=sgd(1.0), policy_regularizer=policy_reg)
print('updater.target_params:', updater.target_params)
print('updater.target_function_state:', updater.target_function_state)
updater.update(transition_batch)
params_with_reg = deepcopy(q.params)
function_state_with_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
def test_policyreg_boxspace(self):
env = self.env_boxspace
func_q = self.func_q_type1
func_pi = self.func_pi_boxspace
transition_batch = self.transition_boxspace
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
params_init = deepcopy(q.params)
function_state_init = deepcopy(q.function_state)
# first update without policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = Sarsa(q, q_targ, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(q.params)
function_state_without_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
self.assertPytreeAlmostEqual(params_init, q.params, decimal=10)
self.assertPytreeAlmostEqual(function_state_init, q.function_state, decimal=10)
# then update with policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = Sarsa(q, q_targ, optimizer=sgd(1.0), policy_regularizer=policy_reg)
updater.update(transition_batch)
params_with_reg = deepcopy(q.params)
function_state_with_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg) # <--- important
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
| 5,906 | 38.119205 | 98 | py |
null | coax-main/coax/td_learning/_simple_td.py | import haiku as hk
from ..utils import is_stochastic
from ._base import BaseTDLearningV
class SimpleTD(BaseTDLearningV):
r"""
TD-learning for state value functions :math:`v(s)`. The :math:`n`-step bootstrapped target is
constructed as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t + I^{(n)}_t\,v_\text{targ}(S_{t+n})
where
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
v : V
The main state value function to update.
v_targ : V, optional
The state value function that is used for constructing the TD-target. If this is left
unspecified, we set ``v_targ = v`` internally.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
"""
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
params, state = target_params['v_targ'], target_state['v_targ']
S_next = self.v_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(self.v):
return self._get_target_dist_params(params, state, next(rngs), transition_batch)
V_next, _ = self.v_targ.function(params, state, next(rngs), S_next, False)
f, f_inv = self.v.value_transform.transform_func, self.v_targ.value_transform.inverse_func
return f(transition_batch.Rn + transition_batch.In * f_inv(V_next))
| 2,704 | 31.987805 | 99 | py |
null | coax-main/coax/td_learning/_simple_td_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.v import V
from .._core.policy import Policy
from ..utils import get_transition_batch
from ..regularizers import EntropyRegularizer
from ..value_transforms import LogTransform
from ._simple_td import SimpleTD
class TestSimpleTD(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete(self):
env = self.env_discrete
func_v = self.func_v
v = V(func_v, env, random_seed=11)
v_targ = v.copy()
updater = SimpleTD(v, v_targ, optimizer=sgd(1.0))
params = deepcopy(v.params)
function_state = deepcopy(v.function_state)
updater.update(self.transition_discrete)
self.assertPytreeNotEqual(params, v.params)
self.assertPytreeNotEqual(function_state, v.function_state)
def test_update_boxspace(self):
env = self.env_boxspace
func_v = self.func_v
v = V(func_v, env, random_seed=11)
v_targ = v.copy()
updater = SimpleTD(v, v_targ, optimizer=sgd(1.0))
params = deepcopy(v.params)
function_state = deepcopy(v.function_state)
updater.update(self.transition_boxspace)
self.assertPytreeNotEqual(params, v.params)
self.assertPytreeNotEqual(function_state, v.function_state)
def test_policyreg_discrete(self):
env = self.env_discrete
func_v = self.func_v
func_pi = self.func_pi_discrete
transition_batch = self.transition_discrete
v = V(func_v, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
v_targ = v.copy()
params_init = deepcopy(v.params)
function_state_init = deepcopy(v.function_state)
# first update without policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = SimpleTD(v, v_targ, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(v.params)
function_state_without_reg = deepcopy(v.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
v = V(func_v, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
v_targ = v.copy()
self.assertPytreeAlmostEqual(params_init, v.params)
self.assertPytreeAlmostEqual(function_state_init, v.function_state)
# then update with policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = SimpleTD(v, v_targ, optimizer=sgd(1.0), policy_regularizer=policy_reg)
updater.update(transition_batch)
params_with_reg = deepcopy(v.params)
function_state_with_reg = deepcopy(v.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
def test_policyreg_boxspace(self):
env = self.env_boxspace
func_v = self.func_v
func_pi = self.func_pi_boxspace
transition_batch = self.transition_boxspace
v = V(func_v, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
v_targ = v.copy()
params_init = deepcopy(v.params)
function_state_init = deepcopy(v.function_state)
# first update without policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = SimpleTD(v, v_targ, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(v.params)
function_state_without_reg = deepcopy(v.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
v = V(func_v, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
v_targ = v.copy()
self.assertPytreeAlmostEqual(params_init, v.params)
self.assertPytreeAlmostEqual(function_state_init, v.function_state)
# then update with policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = SimpleTD(v, v_targ, optimizer=sgd(1.0), policy_regularizer=policy_reg)
updater.update(transition_batch)
params_with_reg = deepcopy(v.params)
function_state_with_reg = deepcopy(v.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
def test_value_transform(self):
env = self.env_discrete
func_v = self.func_v
transition_batch = self.transition_discrete
v = V(func_v, env, random_seed=11)
params_init = deepcopy(v.params)
function_state_init = deepcopy(v.function_state)
# first update without value transform
updater = SimpleTD(v, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(v.params)
function_state_without_reg = deepcopy(v.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
v = V(func_v, env, value_transform=LogTransform(), random_seed=11)
self.assertPytreeAlmostEqual(params_init, v.params)
self.assertPytreeAlmostEqual(function_state_init, v.function_state)
# then update with value transform
updater = SimpleTD(v, optimizer=sgd(1.0))
updater.update(transition_batch)
params_with_reg = deepcopy(v.params)
function_state_with_reg = deepcopy(v.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
| 6,622 | 39.384146 | 98 | py |
null | coax-main/coax/td_learning/_softclippeddoubleqlearning.py | import haiku as hk
import jax
import jax.numpy as jnp
from gymnasium.spaces import Discrete
from ..utils import (batch_to_single, is_stochastic, single_to_batch,
stack_trees)
from ._clippeddoubleqlearning import ClippedDoubleQLearning
class SoftClippedDoubleQLearning(ClippedDoubleQLearning):
def target_func(self, target_params, target_state, rng, transition_batch):
"""
This does almost the same as `ClippedDoubleQLearning.target_func` except that
the action for the next state is sampled instead of taking the mode.
"""
rngs = hk.PRNGSequence(rng)
# collect list of q-values
if isinstance(self.q.action_space, Discrete):
Q_sa_next_list = []
A_next_list = []
qs = list(zip(self.q_targ_list, target_params['q_targ'], target_state['q_targ']))
# compute A_next from q_i
for q_i, params_i, state_i in qs:
S_next = q_i.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(q_i):
Q_s_next = q_i.mean_func_type2(params_i, state_i, next(rngs), S_next)
Q_s_next = q_i.proba_dist.postprocess_variate(
next(rngs), Q_s_next, batch_mode=True)
else:
Q_s_next, _ = q_i.function_type2(params_i, state_i, next(rngs), S_next, False)
assert Q_s_next.ndim == 2, f"bad shape: {Q_s_next.shape}"
A_next = (Q_s_next == Q_s_next.max(axis=1, keepdims=True)).astype(Q_s_next.dtype)
A_next /= A_next.sum(axis=1, keepdims=True) # there may be ties
# evaluate on q_j
for q_j, params_j, state_j in qs:
S_next = q_j.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(q_j):
Q_sa_next = q_j.mean_func_type1(
params_j, state_j, next(rngs), S_next, A_next)
Q_sa_next = q_j.proba_dist.postprocess_variate(
next(rngs), Q_sa_next, batch_mode=True)
else:
Q_sa_next, _ = q_j.function_type1(
params_j, state_j, next(rngs), S_next, A_next, False)
assert Q_sa_next.ndim == 1, f"bad shape: {Q_sa_next.shape}"
f_inv = q_j.value_transform.inverse_func
Q_sa_next_list.append(f_inv(Q_sa_next))
A_next_list.append(A_next)
else:
Q_sa_next_list = []
A_next_list = []
qs = list(zip(self.q_targ_list, target_params['q_targ'], target_state['q_targ']))
pis = list(zip(self.pi_targ_list, target_params['pi_targ'], target_state['pi_targ']))
# compute A_next from pi_i
for pi_i, params_i, state_i in pis:
S_next = pi_i.observation_preprocessor(next(rngs), transition_batch.S_next)
dist_params, _ = pi_i.function(params_i, state_i, next(rngs), S_next, False)
A_next = pi_i.proba_dist.sample(dist_params, next(rngs)) # sample instead of mode
# evaluate on q_j
for q_j, params_j, state_j in qs:
S_next = q_j.observation_preprocessor(next(rngs), transition_batch.S_next)
if is_stochastic(q_j):
Q_sa_next = q_j.mean_func_type1(
params_j, state_j, next(rngs), S_next, A_next)
Q_sa_next = q_j.proba_dist.postprocess_variate(
next(rngs), Q_sa_next, batch_mode=True)
else:
Q_sa_next, _ = q_j.function_type1(
params_j, state_j, next(rngs), S_next, A_next, False)
assert Q_sa_next.ndim == 1, f"bad shape: {Q_sa_next.shape}"
f_inv = q_j.value_transform.inverse_func
Q_sa_next_list.append(f_inv(Q_sa_next))
A_next_list.append(A_next)
# take the min to mitigate over-estimation
A_next_list = jnp.stack(A_next_list, axis=1)
Q_sa_next_list = jnp.stack(Q_sa_next_list, axis=-1)
assert Q_sa_next_list.ndim == 2, f"bad shape: {Q_sa_next_list.shape}"
if is_stochastic(self.q):
Q_sa_next_argmin = jnp.argmin(Q_sa_next_list, axis=-1)
Q_sa_next_argmin_q = Q_sa_next_argmin % len(self.q_targ_list)
def target_dist_params(A_next_idx, q_targ_idx, p, s, t, A_next_list):
return self._get_target_dist_params(batch_to_single(p, q_targ_idx),
batch_to_single(s, q_targ_idx),
next(rngs),
single_to_batch(t),
single_to_batch(batch_to_single(A_next_list,
A_next_idx)))
def tile_parameters(params, state, reps):
return jax.tree_util.tree_map(lambda t: jnp.tile(t, [reps, *([1] * (t.ndim - 1))]),
stack_trees(params, state))
# stack and tile q-function params to select the argmin for the target dist params
tiled_target_params, tiled_target_state = tile_parameters(
target_params['q_targ'], target_state['q_targ'], reps=len(self.q_targ_list))
vtarget_dist_params = jax.vmap(target_dist_params, in_axes=(0, 0, None, None, 0, 0))
dist_params = vtarget_dist_params(
Q_sa_next_argmin,
Q_sa_next_argmin_q,
tiled_target_params,
tiled_target_state,
transition_batch,
A_next_list)
# unwrap dist params computed for single batches
return jax.tree_util.tree_map(lambda t: jnp.squeeze(t, axis=1), dist_params)
Q_sa_next = jnp.min(Q_sa_next_list, axis=-1)
assert Q_sa_next.ndim == 1, f"bad shape: {Q_sa_next.shape}"
f = self.q.value_transform.transform_func
return f(transition_batch.Rn + transition_batch.In * Q_sa_next)
| 6,368 | 51.204918 | 99 | py |
null | coax-main/coax/td_learning/_softqlearning.py | import haiku as hk
from jax.scipy.special import logsumexp
from gymnasium.spaces import Discrete
from ..utils import is_stochastic
from ._base import BaseTDLearningQ
class SoftQLearning(BaseTDLearningQ):
r"""
TD-learning with soft Q-learning updates. The :math:`n`-step bootstrapped target is constructed
as:
.. math::
G^{(n)}_t\ =\ R^{(n)}_t
+ I^{(n)}_t\,\tau\log\sum_{a'}\exp\left(q_\text{targ}(S_{t+n}, a') / \tau\right)
where
.. math::
R^{(n)}_t\ &=\ \sum_{k=0}^{n-1}\gamma^kR_{t+k} \\
I^{(n)}_t\ &=\ \left\{\begin{matrix}
0 & \text{if $S_{t+n}$ is a terminal state} \\
\gamma^n & \text{otherwise}
\end{matrix}\right.
Parameters
----------
q : Q
The main q-function to update.
q_targ : Q, optional
The q-function that is used for constructing the TD-target. If this is left unspecified, we
set ``q_targ = q`` internally.
optimizer : optax optimizer, optional
An optax-style optimizer. The default optimizer is :func:`optax.adam(1e-3)
<optax.adam>`.
loss_function : callable, optional
The loss function that will be used to regress to the (bootstrapped) target. The loss
function is expected to be of the form:
.. math::
L(y_\text{true}, y_\text{pred}, w)\in\mathbb{R}
where :math:`w>0` are sample weights. If left unspecified, this defaults to
:func:`coax.value_losses.huber`. Check out the :mod:`coax.value_losses` module for other
predefined loss functions.
policy_regularizer : Regularizer, optional
If provided, this policy regularizer is added to the TD-target. A typical example is to use
an :class:`coax.regularizers.EntropyRegularizer`, which adds the policy entropy to
the target. In this case, we minimize the following loss shifted by the entropy term:
.. math::
L(y_\text{true} + \beta\,H[\pi], y_\text{pred})
Note that the coefficient :math:`\beta` plays the role of the temperature in SAC-style
agents.
temperature : float, optional
The Boltzmann temperature :math:`\tau>0`.
"""
def __init__(
self, q, q_targ=None, optimizer=None, loss_function=None, policy_regularizer=None,
temperature=1.0):
if not isinstance(q.action_space, Discrete):
raise NotImplementedError(
f"{self.__class__.__name__} class is only implemented for discrete actions spaces")
if is_stochastic(q):
raise NotImplementedError(f"{type(self).__name__} is not yet implement for StochasticQ")
self.temperature = temperature
super().__init__(
q=q,
q_targ=q_targ,
optimizer=optimizer,
loss_function=loss_function,
policy_regularizer=policy_regularizer)
def target_func(self, target_params, target_state, rng, transition_batch):
rngs = hk.PRNGSequence(rng)
params, state = target_params['q_targ'], target_state['q_targ']
S_next = self.q_targ.observation_preprocessor(next(rngs), transition_batch.S_next)
Q_s_next, _ = self.q_targ.function_type2(params, state, next(rngs), S_next, False)
assert Q_s_next.ndim == 2
assert Q_s_next.shape[1] == self.q.action_space.n
Q_sa_next = self.temperature * logsumexp(Q_s_next / self.temperature, axis=-1)
assert Q_sa_next.ndim == 1
f, f_inv = self.q.value_transform.transform_func, self.q_targ.value_transform.inverse_func
return f(transition_batch.Rn + transition_batch.In * f_inv(Q_sa_next))
| 3,698 | 33.570093 | 100 | py |
null | coax-main/coax/td_learning/_softqlearning_test.py | from copy import deepcopy
from optax import sgd
from .._base.test_case import TestCase
from .._core.q import Q
from .._core.policy import Policy
from ..utils import get_transition_batch
from ..regularizers import EntropyRegularizer
from ._softqlearning import SoftQLearning
class TestSoftQLearning(TestCase):
def setUp(self):
self.transition_discrete = get_transition_batch(self.env_discrete, random_seed=42)
self.transition_boxspace = get_transition_batch(self.env_boxspace, random_seed=42)
def test_update_discrete_type1(self):
env = self.env_discrete
func_q = self.func_q_type1
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
q_targ = q.copy()
updater = SoftQLearning(q, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(transition_batch)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_discrete_type2(self):
env = self.env_discrete
func_q = self.func_q_type2
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
q_targ = q.copy()
updater = SoftQLearning(q, q_targ, optimizer=sgd(1.0))
params = deepcopy(q.params)
function_state = deepcopy(q.function_state)
updater.update(transition_batch)
self.assertPytreeNotEqual(params, q.params)
self.assertPytreeNotEqual(function_state, q.function_state)
def test_update_boxspace(self):
env = self.env_boxspace
func_q = self.func_q_type1
q = Q(func_q, env, random_seed=11)
q_targ = q.copy()
msg = r"SoftQLearning class is only implemented for discrete actions spaces"
with self.assertRaisesRegex(NotImplementedError, msg):
SoftQLearning(q, q_targ, optimizer=sgd(1.0))
def test_policyreg_discrete(self):
env = self.env_discrete
func_q = self.func_q_type1
func_pi = self.func_pi_discrete
transition_batch = self.transition_discrete
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
params_init = deepcopy(q.params)
function_state_init = deepcopy(q.function_state)
# first update without policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = SoftQLearning(q, q_targ, optimizer=sgd(1.0))
updater.update(transition_batch)
params_without_reg = deepcopy(q.params)
function_state_without_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_without_reg, params_init)
self.assertPytreeNotEqual(function_state_without_reg, function_state_init)
# reset weights
q = Q(func_q, env, random_seed=11)
pi = Policy(func_pi, env, random_seed=17)
q_targ = q.copy()
self.assertPytreeAlmostEqual(params_init, q.params)
self.assertPytreeAlmostEqual(function_state_init, q.function_state)
# then update with policy regularizer
policy_reg = EntropyRegularizer(pi, beta=1.0)
updater = SoftQLearning(q, q_targ, optimizer=sgd(1.0), policy_regularizer=policy_reg)
print('updater.target_params:', updater.target_params)
print('updater.target_function_state:', updater.target_function_state)
updater.update(transition_batch)
params_with_reg = deepcopy(q.params)
function_state_with_reg = deepcopy(q.function_state)
self.assertPytreeNotEqual(params_with_reg, params_init)
self.assertPytreeNotEqual(function_state_with_reg, function_state_init)
self.assertPytreeNotEqual(params_with_reg, params_without_reg)
self.assertPytreeAlmostEqual(function_state_with_reg, function_state_without_reg) # same!
| 3,958 | 36.704762 | 98 | py |
null | coax-main/coax/utils/__init__.py | r"""
Utilities
=========
This is a collection of utility (helper) functions used throughout the package.
.. autosummary::
:nosignatures:
coax.utils.OrnsteinUhlenbeckNoise
coax.utils.StepwiseLinearFunction
coax.utils.SegmentTree
coax.utils.SumTree
coax.utils.MinTree
coax.utils.MaxTree
coax.utils.argmax
coax.utils.argmin
coax.utils.batch_to_single
coax.utils.check_array
coax.utils.check_preprocessors
coax.utils.clipped_logit
coax.utils.default_preprocessor
coax.utils.diff_transform
coax.utils.diff_transform_matrix
coax.utils.docstring
coax.utils.double_relu
coax.utils.dump
coax.utils.dumps
coax.utils.enable_logging
coax.utils.generate_gif
coax.utils.get_env_attr
coax.utils.get_grads_diagnostics
coax.utils.get_magnitude_quantiles
coax.utils.get_transition_batch
coax.utils.has_env_attr
coax.utils.idx
coax.utils.is_policy
coax.utils.is_qfunction
coax.utils.is_reward_function
coax.utils.is_stochastic
coax.utils.is_transition_model
coax.utils.is_vfunction
coax.utils.isscalar
coax.utils.jit
coax.utils.load
coax.utils.loads
coax.utils.make_dmc
coax.utils.merge_dicts
coax.utils.pretty_print
coax.utils.pretty_repr
coax.utils.quantiles
coax.utils.quantiles_uniform
coax.utils.quantile_cos_embedding
coax.utils.reload_recursive
coax.utils.render_episode
coax.utils.safe_sample
coax.utils.single_to_batch
coax.utils.stack_trees
coax.utils.sync_shared_params
coax.utils.tree_ravel
coax.utils.unvectorize
Object Reference
----------------
.. autoclass:: coax.utils.OrnsteinUhlenbeckNoise
.. autoclass:: coax.utils.StepwiseLinearFunction
.. autoclass:: coax.utils.SegmentTree
.. autoclass:: coax.utils.SumTree
.. autoclass:: coax.utils.MinTree
.. autoclass:: coax.utils.MaxTree
.. autofunction:: coax.utils.argmax
.. autofunction:: coax.utils.argmin
.. autofunction:: coax.utils.batch_to_single
.. autofunction:: coax.utils.check_array
.. autofunction:: coax.utils.check_preprocessors
.. autofunction:: coax.utils.clipped_logit
.. autofunction:: coax.utils.default_preprocessor
.. autofunction:: coax.utils.diff_transform
.. autofunction:: coax.utils.diff_transform_matrix
.. autofunction:: coax.utils.docstring
.. autofunction:: coax.utils.double_relu
.. autofunction:: coax.utils.dump
.. autofunction:: coax.utils.dumps
.. autofunction:: coax.utils.enable_logging
.. autofunction:: coax.utils.generate_gif
.. autofunction:: coax.utils.get_env_attr
.. autofunction:: coax.utils.get_grads_diagnostics
.. autofunction:: coax.utils.get_magnitude_quantiles
.. autofunction:: coax.utils.get_transition_batch
.. autofunction:: coax.utils.has_env_attr
.. autofunction:: coax.utils.idx
.. autofunction:: coax.utils.is_policy
.. autofunction:: coax.utils.is_qfunction
.. autofunction:: coax.utils.is_reward_function
.. autofunction:: coax.utils.is_stochastic
.. autofunction:: coax.utils.is_transition_model
.. autofunction:: coax.utils.is_vfunction
.. autofunction:: coax.utils.isscalar
.. autofunction:: coax.utils.jit
.. autofunction:: coax.utils.load
.. autofunction:: coax.utils.loads
.. autofunction:: coax.utils.make_dmc
.. autofunction:: coax.utils.merge_dicts
.. autofunction:: coax.utils.pretty_print
.. autofunction:: coax.utils.pretty_repr
.. autofunction:: coax.utils.quantiles
.. autofunction:: coax.utils.quantiles_uniform
.. autofunction:: coax.utils.quantile_cos_embedding
.. autofunction:: coax.utils.reload_recursive
.. autofunction:: coax.utils.render_episode
.. autofunction:: coax.utils.safe_sample
.. autofunction:: coax.utils.single_to_batch
.. autofunction:: coax.utils.stack_trees
.. autofunction:: coax.utils.sync_shared_params
.. autofunction:: coax.utils.tree_ravel
.. autofunction:: coax.utils.unvectorize
"""
from ._action_noise import OrnsteinUhlenbeckNoise
from ._array import (
StepwiseLinearFunction,
argmax,
argmin,
batch_to_single,
check_array,
check_preprocessors,
chunks_pow2,
clipped_logit,
default_preprocessor,
diff_transform,
diff_transform_matrix,
double_relu,
get_grads_diagnostics,
get_magnitude_quantiles,
get_transition_batch,
idx,
isscalar,
merge_dicts,
safe_sample,
single_to_batch,
stack_trees,
sync_shared_params,
tree_ravel,
unvectorize,
)
from ._jit import jit
from ._misc import (
docstring,
dump,
dumps,
enable_logging,
generate_gif,
get_env_attr,
has_env_attr,
is_policy,
is_qfunction,
is_reward_function,
is_stochastic,
is_transition_model,
is_vfunction,
load,
loads,
pretty_print,
pretty_repr,
reload_recursive,
render_episode,
)
from ._segment_tree import SegmentTree, SumTree, MinTree, MaxTree
from ._quantile_funcs import quantiles, quantiles_uniform, quantile_cos_embedding
from ._dmc_gym import make_dmc
__all__ = (
'StepwiseLinearFunction',
'OrnsteinUhlenbeckNoise',
'SegmentTree',
'SumTree',
'MinTree',
'MaxTree',
'argmax',
'argmin',
'batch_to_single',
'check_array',
'check_preprocessors',
'chunks_pow2',
'clipped_logit',
'default_preprocessor',
'diff_transform',
'diff_transform_matrix',
'docstring',
'double_relu',
'dump',
'dumps',
'enable_logging',
'generate_gif',
'get_env_attr',
'get_grads_diagnostics',
'get_magnitude_quantiles',
'get_transition_batch',
'has_env_attr',
'idx',
'is_policy',
'is_qfunction',
'is_reward_function',
'is_stochastic',
'is_transition_model',
'is_vfunction',
'isscalar',
'jit',
'load',
'loads',
'make_dmc',
'merge_dicts',
'pretty_print',
'pretty_repr',
'quantiles',
'quantiles_uniform',
'quantile_cos_embedding',
'reload_recursive',
'render_episode',
'safe_sample',
'single_to_batch',
'stack_trees',
'sync_shared_params',
'tree_ravel',
'unvectorize',
)
| 6,051 | 25.086207 | 81 | py |
null | coax-main/coax/utils/_action_noise.py | import numpy as onp
__all__ = (
'OrnsteinUhlenbeckNoise',
)
class OrnsteinUhlenbeckNoise:
r"""
Add `Ornstein-Uhlenbeck <https://en.wikipedia.org/wiki/Ornstein-Uhlenbeck_process>`_ noise to
continuous actions.
.. math::
A_t\ \mapsto\ \widetilde{A}_t = A_t + X_t
As a side effect, the Ornstein-Uhlenbeck noise :math:`X_t` is updated with every function call:
.. math::
X_t\ =\ X_{t-1} - \theta\,\left(X_{t-1} - \mu\right) + \sigma\,\varepsilon
where :math:`\varepsilon` is white noise, i.e. :math:`\varepsilon\sim\mathcal{N}(0,\mathbb{I})`.
The authors of the `DDPG paper <https://arxiv.org/abs/1509.02971>`_ chose to use
Ornstein-Uhlenbeck noise "*[...] in order to explore well in physical environments that have
momentum.*"
Parameters
----------
mu : float or ndarray, optional
The mean :math:`\mu` towards which the Ornstein-Uhlenbeck process should revert; must be
broadcastable with the input actions.
sigma : positive float or ndarray, optional
The spread of the noise :math:`\sigma>0` of the Ornstein-Uhlenbeck process; must be
broadcastable with the input actions.
theta : positive float or ndarray, optional
The (element-wise) dissipation rate :math:`\theta>0` of the Ornstein-Uhlenbeck process; must
be broadcastable with the input actions.
min_value : float or ndarray, optional
The lower bound used for clipping the output action; must be broadcastable with the input
actions.
max_value : float or ndarray, optional
The upper bound used for clipping the output action; must be broadcastable with the input
actions.
random_seed : int, optional
Sets the random state to get reproducible results.
"""
def __init__(
self, mu=0., sigma=1., theta=0.15, min_value=None, max_value=None, random_seed=None):
self.mu = mu
self.theta = theta
self.sigma = sigma
self.min_value = -1e15 if min_value is None else min_value
self.max_value = 1e15 if max_value is None else max_value
self.random_seed = random_seed
self.rnd = onp.random.RandomState(self.random_seed)
self.reset()
def reset(self):
r"""
Reset the Ornstein-Uhlenbeck process.
"""
self._noise = None
def __call__(self, a):
r"""
Add some Ornstein-Uhlenbeck to a continuous action.
Parameters
----------
a : action
A single action :math:`A_t`.
Returns
-------
a_noisy : action
An action with noise added :math:`\widetilde{A}_t = A_t + X_t`.
"""
a = onp.asarray(a)
if self._noise is None:
self._noise = onp.ones_like(a) * self.mu
white_noise = onp.asarray(self.rnd.randn(*a.shape), dtype=a.dtype)
self._noise += self.theta * (self.mu - self._noise) + self.sigma * white_noise
self._noise = onp.clip(self._noise, self.min_value, self.max_value)
return a + self._noise
| 3,119 | 27.108108 | 100 | py |
null | coax-main/coax/utils/_action_noise_test.py | import jax.numpy as jnp
from .._base.test_case import TestCase
from ._action_noise import OrnsteinUhlenbeckNoise
class TestOrnsteinUhlenbeckNoise(TestCase):
def test_overall_mean_variance(self):
noise = OrnsteinUhlenbeckNoise(random_seed=13)
x = jnp.stack([noise(0.) for _ in range(1000)])
mu, sigma = jnp.mean(x), jnp.std(x)
self.assertLess(abs(mu), noise.theta)
self.assertGreater(sigma, noise.sigma)
self.assertLess(sigma, noise.sigma * 2)
| 498 | 32.266667 | 55 | py |
null | coax-main/coax/utils/_array.py | import warnings
from collections import Counter
from functools import partial
import chex
import gymnasium
import jax
import jax.numpy as jnp
import numpy as onp
import haiku as hk
from scipy.linalg import pascal
__all__ = (
'StepwiseLinearFunction',
'argmax',
'argmin',
'batch_to_single',
'check_array',
'check_preprocessors',
'chunks_pow2',
'clipped_logit',
'default_preprocessor',
'diff_transform',
'diff_transform_matrix',
'double_relu',
'get_grads_diagnostics',
'get_magnitude_quantiles',
'get_transition_batch',
'idx',
'isscalar',
'merge_dicts',
'single_to_batch',
'safe_sample',
'stack_trees',
'sync_shared_params',
'tree_ravel',
'tree_sample',
'unvectorize',
)
def argmax(rng, arr, axis=-1):
r"""
This is a little hack to ensure that argmax breaks ties randomly, which is
something that :func:`numpy.argmax` doesn't do.
Parameters
----------
rng : jax.random.PRNGKey
A pseudo-random number generator key.
arr : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
"""
if not isinstance(arr, jnp.ndarray):
arr = jnp.asarray(arr)
candidates = arr == jnp.max(arr, axis=axis, keepdims=True)
logits = (2 * candidates - 1) * 50. # log(max_float32) == 88.72284
logits = jnp.moveaxis(logits, axis, -1)
return jax.random.categorical(rng, logits)
def argmin(rng, arr, axis=-1):
r"""
This is a little hack to ensure that argmin breaks ties randomly, which is
something that :func:`numpy.argmin` doesn't do.
*Note: random tie breaking is only done for 1d arrays; for multidimensional
inputs, we fall back to the numpy version.*
Parameters
----------
rng : jax.random.PRNGKey
A pseudo-random number generator key.
arr : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
"""
return argmax(rng, -arr, axis=axis)
def batch_to_single(pytree, index=0):
r"""
Extract a single instance from a :external+jax:doc:`pytree <pytrees>` of array batches.
This just does an ``leaf[index]`` on all leaf nodes of the :external+jax:doc:`pytree
<pytrees>`.
Parameters
----------
pytree_batch : pytree with ndarray leaves
A pytree representing a batch.
Returns
-------
pytree_single : pytree with ndarray leaves
A pytree representing e.g. a single state observation.
"""
return jax.tree_map(lambda arr: arr[index], pytree)
def check_array(
arr, ndim=None, ndim_min=None, ndim_max=None,
dtype=None, shape=None, axis_size=None, axis=None, except_np=False):
r"""
This helper function is mostly for internal use. It is used to check a few
common properties of a numpy array.
Raises
------
TypeError
If one of the checks fails.
"""
if not except_np and not isinstance(arr, jnp.ndarray):
raise TypeError(f"expected input to be a jnp.ndarray, got type: {type(arr)}")
if not isinstance(arr, (onp.ndarray, jnp.ndarray)):
raise TypeError(f"expected input to be an ndarray, got type: {type(arr)}")
check = ndim is not None
ndims = [ndim] if not isinstance(ndim, (list, tuple, set)) else ndim
if check and arr.ndim not in ndims:
raise TypeError(f"expected input with ndim(s) {ndim}, got ndim: {arr.ndim}")
check = ndim_min is not None
if check and arr.ndim < ndim_min:
raise TypeError(f"expected input with ndim at least {ndim_min}, got ndim: {arr.ndim}")
check = ndim_max is not None
if check and arr.ndim > ndim_max:
raise TypeError(f"expected input with ndim at most {ndim_max}, got ndim: {arr.ndim}")
check = dtype is not None
dtypes = [dtype] if not isinstance(dtype, (list, tuple, set)) else dtype
if check and arr.dtype not in dtypes:
raise TypeError(f"expected input with dtype(s) {dtype}, got dtype: {arr.dtype}")
check = shape is not None
if check and arr.shape != shape:
raise TypeError(f"expected input with shape {shape}, got shape: {arr.shape}")
check = axis_size is not None and axis is not None
sizes = [axis_size] if not isinstance(axis_size, (list, tuple, set)) else axis_size
if check and arr.shape[axis] not in sizes:
raise TypeError(
f"expected input with size(s) {axis_size} along axis {axis}, got shape: {arr.shape}")
def check_preprocessors(space, *preprocessors, num_samples=20, random_seed=None):
r"""
Check whether two preprocessors are the same.
Parameters
----------
space : gymnasium.Space
The domain of the prepocessors.
\*preprocessors
Preprocessor functions, which are functions with input signature: :code:`func(rng: PRNGKey,
x: Element[space]) -> Any`.
num_samples : positive int
The number of samples in which to run checks.
Returns
-------
match : bool
Whether the preprocessors match.
"""
if len(preprocessors) < 2:
raise ValueError("need at least two preprocessors in order to run test")
def test_leaves(a, b):
assert type(a) is type(b)
return onp.testing.assert_allclose(onp.asanyarray(a), onp.asanyarray(b))
rngs = hk.PRNGSequence(onp.random.RandomState(random_seed).randint(jnp.iinfo('int32').max))
p0, *ps = preprocessors
with jax.disable_jit():
for _ in range(num_samples):
x = space.sample()
y0 = p0(next(rngs), x)
for p in ps:
y = p(next(rngs), x)
if jax.tree_util.tree_structure(y) != jax.tree_util.tree_structure(y0):
return False
try:
jax.tree_map(test_leaves, y, y0)
except AssertionError:
return False
return True
def chunks_pow2(transition_batch):
r"""
Split up a :class:`TransitionBatch <coax.reward_tracing.TransitionBatch>`
into smaller batches with sizes equal to powers of 2. This is useful
to recude overhead due to repeated JIT compilation due to varying batch sizes.
Yields
------
chunk : TransitionBatch
A smaller chunk with batch_size equal to a power of 2.
"""
def leafslice(start, stop):
def func(leaf):
if leaf is None:
return None
return leaf[start:stop]
return func
binary = bin(transition_batch.batch_size).replace('0b', '')
start = 0
for i, b in enumerate(binary, 1):
if b == '0':
continue
stop = start + 2 ** (len(binary) - i)
yield jax.tree_map(leafslice(start, stop), transition_batch)
start = stop
def clipped_logit(x, epsilon=1e-15):
r"""
A safe implementation of the logit function :math:`x\mapsto\log(x/(1-x))`. It clips the
arguments of the log function from below so as to avoid evaluating it at 0:
.. math::
\text{logit}_\epsilon(x)\ =\
\log(\max(\epsilon, x)) - \log(\max(\epsilon, 1 - x))
Parameters
----------
x : ndarray
Input numpy array whose entries lie on the unit interval, :math:`x_i\in [0, 1]`.
epsilon : float, optional
The small number with which to clip the arguments of the logarithm from below.
Returns
-------
z : ndarray, dtype: float, shape: same as input
The output logits whose entries lie on the real line,
:math:`z_i\in\mathbb{R}`.
"""
return jnp.log(jnp.clip(x, epsilon, 1)) - jnp.log(jnp.clip(1 - x, epsilon, 1))
def default_preprocessor(space):
r"""
The default preprocessor for a given space.
Parameters
----------
space : gymnasium.Space
The domain of the prepocessor.
Returns
-------
preprocessor : Callable[PRGNKey, Element[space], Any]
The preprocessor function. See :attr:`NormalDist.preprocess_variate
<coax.proba_dists.NormalDist.preprocess_variate>` for an example.
"""
if not isinstance(space, gymnasium.Space):
raise TypeError(f"space must a gymnasium.Space, got: {type(space)}")
if isinstance(space, gymnasium.spaces.Discrete):
def func(rng, X):
X = jnp.asarray(X)
X = jax.nn.one_hot(X, space.n) # one-hot encoding
X = jnp.reshape(X, (-1, space.n)) # ensure batch axis
return X
elif isinstance(space, gymnasium.spaces.Box):
def func(rng, X):
X = jnp.asarray(X, dtype=space.dtype) # ensure ndarray
X = jnp.reshape(X, (-1, *space.shape)) # ensure batch axis
X = jnp.clip(X, space.low, space.high) # clip to be safe
return X
elif isinstance(space, gymnasium.spaces.MultiDiscrete):
def func(rng, X):
rngs = jax.random.split(rng, len(space.nvec))
chex.assert_rank(X, {1, 2})
if X.ndim == 1:
X = jnp.expand_dims(X, axis=0)
return [
default_preprocessor(gymnasium.spaces.Discrete(n))(rng, X[:, i])
for i, (n, rng) in enumerate(zip(space.nvec, rngs))]
elif isinstance(space, gymnasium.spaces.MultiBinary):
def func(rng, X):
X = jnp.asarray(X, dtype=jnp.float32) # ensure ndarray
X = jnp.reshape(X, (-1, space.n)) # ensure batch axis
return X
elif isinstance(space, gymnasium.spaces.Tuple):
def func(rng, X):
rngs = hk.PRNGSequence(rng)
return tuple(
default_preprocessor(sp)(next(rngs), X[i]) for i, sp in enumerate(space.spaces))
elif isinstance(space, gymnasium.spaces.Dict):
def func(rng, X):
rngs = hk.PRNGSequence(rng)
return {k: default_preprocessor(sp)(next(rngs), X[k]) for k, sp in space.spaces.items()}
else:
raise TypeError(f"unsupported space: {space.__class__.__name__}")
return func
def diff_transform_matrix(num_frames, dtype='float32'):
r"""
A helper function that implements discrete differentiation for stacked
state observations.
Let's say we have a feature vector :math:`X` consisting of four stacked
frames, i.e. the shape would be: ``[batch_size, height, width, 4]``.
The corresponding diff-transform matrix with ``num_frames=4`` is a
:math:`4\times 4` matrix given by:
.. math::
M_\text{diff}^{(4)}\ =\ \begin{pmatrix}
-1 & 0 & 0 & 0 \\
3 & 1 & 0 & 0 \\
-3 & -2 & -1 & 0 \\
1 & 1 & 1 & 1
\end{pmatrix}
such that the diff-transformed feature vector is readily computed as:
.. math::
X_\text{diff}\ =\ X\, M_\text{diff}^{(4)}
The diff-transformation preserves the shape, but it reorganizes the frames
in such a way that they look more like canonical variables. You can think
of :math:`X_\text{diff}` as the stacked variables :math:`x`,
:math:`\dot{x}`, :math:`\ddot{x}`, etc. (in reverse order). These
represent the position, velocity, acceleration, etc. of pixels in a single
frame.
Parameters
----------
num_frames : positive int
The number of stacked frames in the original :math:`X`.
dtype : dtype, optional
The output data type.
Returns
-------
M : 2d-Tensor, shape: [num_frames, num_frames]
A square matrix that is intended to be multiplied from the left, e.g.
``X_diff = K.dot(X_orig, M)``, where we assume that the frames are
stacked in ``axis=-1`` of ``X_orig``, in chronological order.
"""
assert isinstance(num_frames, int) and num_frames >= 1
s = jnp.diag(jnp.power(-1, jnp.arange(num_frames))) # alternating sign
m = s.dot(pascal(num_frames, kind='upper'))[::-1, ::-1]
return m.astype(dtype)
def diff_transform(X, dtype='float32'):
r"""
A helper function that implements discrete differentiation for stacked state observations. See
:func:`diff_transform_matrix` for a detailed description.
.. code:: python
M = diff_transform_matrix(num_frames=X.shape[-1])
X_transformed = np.dot(X, M)
Parameters
----------
X : ndarray
An array whose shape is such that the last axis is the frame-stack axis, i.e.
:code:`X.shape[-1] == num_frames`.
Returns
-------
X_transformed : ndarray
The shape is the same as the input shape, but the last axis are mixed to represent position,
velocity, acceleration, etc.
"""
M = diff_transform_matrix(num_frames=X.shape[-1])
return jnp.dot(X, M)
def double_relu(arr):
r"""
A double-ReLU, whose output is the concatenated result of :data:`-relu(-arr) <jax.nn.relu>` and
:data:`relu(arr) <jax.nn.relu>`.
This activation function has the advantage that no signal is lost between layers.
Parameters
----------
arr : ndarray
The input array, e.g. activations.
Returns
-------
doubled_arr
The output array, e.g. input for next layer.
Examples
--------
>>> import coax
>>> import jax.numpy as jnp
>>> x = jnp.array([[-11, -8],
... [ 17, 5],
... [-13, 7],
... [ 19, -3]])
...
>>> coax.utils.double_relu(x)
DeviceArray([[-11, -8, 0, 0],
[ 0, 0, 17, 5],
[-13, 0, 0, 7],
[ 0, -3, 19, 0]], dtype=int32)
There are two things we may observe from the above example. The first is that all components
from the original array are passed on as output. The second thing is that half of the output
components (along axis=1) are masked out, which means that the doubling of array size doesn't
result in doubling the amount of "activation" passed on to the next layer. It merely allows for
the neural net to learn conditional branches in its internal logic.
"""
return jnp.concatenate((-jax.nn.relu(-arr), jax.nn.relu(arr)), axis=-1)
def _get_leaf_diagnostics(leaf, key_prefix):
# update this to add more grads diagnostics
return {
f'{key_prefix}max': jnp.max(jnp.abs(leaf)),
f'{key_prefix}norm': jnp.linalg.norm(jnp.ravel(leaf)),
}
def get_grads_diagnostics(grads, key_prefix='', keep_tree_structure=False):
r"""
Given a :external+jax:doc:`pytree <pytrees>` of grads, return a dict that contains the quantiles
of the magnitudes of each individual component.
This is meant to be a high-level diagnostic. It first extracts the leaves of the pytree, then
flattens each leaf and then it computes the element-wise magnitude. Then, it concatenates all
magnitudes into one long flat array. The quantiles are computed on this array.
Parameters
----------
grads : a pytree with ndarray leaves
The gradients of some loss function with respect to the model parameters (weights).
key_prefix : str, optional
The prefix to add the output dict keys.
keep_tree_structure : bool, optional
Whether to keep the tree structure, i.e. to compute the grads diagnostics for each
individual leaf. If ``False`` (default), we only compute the global grads diagnostics.
Returns
-------
grads_diagnotics : dict<str, float>
A dict with structure ``{name: score}``.
"""
if keep_tree_structure:
return jax.tree_map(lambda g: _get_leaf_diagnostics(g, key_prefix), grads)
return _get_leaf_diagnostics(tree_ravel(grads), key_prefix)
def get_magnitude_quantiles(pytree, key_prefix=''):
r"""
Given a :external+jax:doc:`pytree <pytrees>`, return a dict that contains the quantiles of the
magnitudes of each individual component.
This is meant to be a high-level diagnostic. It first extracts the leaves of the pytree, then
flattens each leaf and then it computes the element-wise magnitude. Then, it concatenates all
magnitudes into one long flat array. The quantiles are computed on this array.
Parameters
----------
pytree : a pytree with ndarray leaves
A typical example is a pytree of model params (weights) or gradients with respect to such
model params.
key_prefix : str, optional
The prefix to add the output dict keys.
Returns
-------
magnitude_quantiles : dict
A dict with keys: ``['min', 'p25', 'p50', 'p75', 'max']``. The values of the dict are
non-negative floats that represent the magnitude quantiles.
"""
quantiles = jnp.quantile(jnp.abs(tree_ravel(pytree)), jnp.array([0, 0.25, 0.5, 0.75, 1]))
quantile_names = (f'{key_prefix}{k}' for k in ('min', 'p25', 'p50', 'p75', 'max'))
return dict(zip(quantile_names, quantiles))
def get_transition_batch(env, batch_size=1, gamma=0.9, random_seed=None):
r"""
Generate a single transition from the environment.
This basically does a single step on the environment and then closes it.
Parameters
----------
env : gymnasium environment
A gymnasium-style environment.
batch_size : positive int, optional
The desired batch size of the sample.
random_seed : int, optional
In order to generate the transition, we do some random sampling from the provided spaces.
This `random_seed` set the seed for the pseudo-random number generators.
Returns
-------
transition_batch : TransitionBatch
A batch of transitions.
"""
# import inline to avoid circular dependencies
from ..reward_tracing import TransitionBatch
from ._array import safe_sample
# check types
if not (isinstance(batch_size, int) and batch_size > 0):
raise TypeError(f"batch_size must be a positive int, got: {batch_size}")
if not (isinstance(gamma, (int, float)) and 0 <= gamma <= 1):
raise TypeError(f"gamma must be a float in the unit interval [0,1], got: {gamma}")
rnd = onp.random.RandomState(random_seed)
def batch_sample(space):
max_seed = onp.iinfo('int32').max
X = [safe_sample(space, seed=rnd.randint(max_seed)) for _ in range(batch_size)]
return jax.tree_map(lambda *leaves: onp.stack(leaves, axis=0), *X)
return TransitionBatch(
S=batch_sample(env.observation_space),
A=batch_sample(env.action_space),
logP=onp.log(onp.clip(rnd.rand(batch_size), 0.01, 0.99)),
Rn=onp.clip(rnd.randn(batch_size), -5., 5.),
In=rnd.choice((0, gamma), batch_size),
S_next=batch_sample(env.observation_space),
A_next=batch_sample(env.action_space),
logP_next=onp.log(onp.clip(rnd.rand(batch_size), 0.01, 0.99)),
W=onp.clip(rnd.rand(batch_size) / rnd.rand(batch_size), 0.01, 100.),
)
def idx(arr, axis=0):
r"""
Given a numpy array, return its corresponding integer index array.
Parameters
----------
arr : array
Input array.
axis : int, optional
The axis along which we'd like to get an index.
Returns
-------
index : 1d array, shape: arr.shape[axis]
An index array `[0, 1, 2, ...]`.
"""
check_array(arr, ndim_min=1)
return jnp.arange(arr.shape[axis])
def isscalar(num):
r"""
This helper uses a slightly looser definition of scalar compared to :func:`numpy.isscalar` (and
:func:`jax.numpy.isscalar`) in that it also considers single-item arrays to be scalars as well.
Parameters
----------
num : number or ndarray
Input array.
Returns
-------
isscalar : bool
Whether the input number is either a number or a single-item array.
"""
return jnp.isscalar(num) or (
isinstance(num, (jnp.ndarray, onp.ndarray)) and jnp.size(num) == 1)
def merge_dicts(*dicts):
r"""
Merge dicts into a single dict.
WARNING: duplicate keys are not resolved.
Parameters
----------
\*dicts : \*dict
Multiple dictionaries.
Returns
-------
merged : dict
A single dictionary.
"""
merged = {}
for d in dicts:
overlap = set(d).intersection(merged)
if overlap:
warnings.warn(f"merge_dicts found overlapping keys: {tuple(overlap)}")
merged.update(d)
return merged
class StepwiseLinearFunction:
r"""
Stepwise linear function. The function remains flat outside of the regions defined by
:code:`steps`.
Parameters
----------
\*steps : sequence of tuples (int, float)
Each step :code:`(timestep, value)` fixes the output value at :code:`timestep` to the
provided :code:`value`.
Example
-------
Here's an example of the exploration schedule in a DQN agent:
.. code::
pi = coax.EpsilonGreedy(q, epsilon=1.0)
epsilon = StepwiseLinearFunction((0, 1.0), (1000000, 0.1), (2000000, 0.01))
for _ in range(num_episodes):
pi.epsilon = epsilon(T) # T is a global step counter
...
.. image:: /_static/img/piecewise_linear_function.svg
:alt: description
:width: 100%
:align: left
Notice that the function is flat outside the interpolation range provided by :code:`steps`.
"""
def __init__(self, *steps):
if len(steps) < 2:
raise TypeError("need at least two steps")
if not all(
isinstance(s, tuple) and len(s) == 2 # check if pair
and isinstance(s[0], int) and isinstance(s[1], (float, int)) # check types
for s in steps):
raise TypeError("all steps must be pairs (size-2 tuples) of (int, type(start_value))")
if not all(t1 < t2 for (t1, _), (t2, _) in zip(steps, steps[1:])): # check if consecutive
raise ValueError(
"steps [(t1, value), ..., (t2, value)] must be provided in ascending order, i.e. "
"0 < t1 < t2 < ... < tn")
self._start_value = float(steps[0][1])
self._final_value = float(steps[-1][1])
self._offsets = onp.array([t for t, _ in steps])
self._intercepts = onp.array([v for _, v in steps])
self._index = onp.arange(len(steps))
self._slopes = onp.array([
(v_next - v) / (t_next - t) for (t, v), (t_next, v_next) in zip(steps, steps[1:])])
def __call__(self, timestep):
r"""
Return the value according to the provided schedule.
"""
mask = self._offsets <= timestep
if not onp.any(mask):
return self._start_value
if onp.all(mask):
return self._final_value
i = onp.max(self._index[mask])
return self._intercepts[i] + self._slopes[i] * (timestep - self._offsets[i])
def _safe_sample(space, rnd):
if isinstance(space, gymnasium.spaces.Discrete):
return rnd.randint(space.n)
if isinstance(space, gymnasium.spaces.MultiDiscrete):
return onp.asarray([rnd.randint(n) for n in space.nvec])
if isinstance(space, gymnasium.spaces.MultiBinary):
return rnd.randint(2, size=space.n)
if isinstance(space, gymnasium.spaces.Box):
low = onp.clip(space.low, -1e9, 1e9)
high = onp.clip(space.high, -1e9, 1e9)
x = low + rnd.rand(*space.shape) * (high - low)
return onp.sign(x) * onp.log(1. + onp.abs(x)) # log transform to avoid very large numbers
if isinstance(space, gymnasium.spaces.Tuple):
return tuple(_safe_sample(sp, rnd) for sp in space.spaces)
if isinstance(space, gymnasium.spaces.Dict):
return {k: _safe_sample(space.spaces[k], rnd) for k in sorted(space.spaces)}
# fallback for non-supported spaces
return space.sample()
def safe_sample(space, seed=None):
r"""
Safely sample from a gymnasium-style space.
Parameters
----------
space : gymnasium.Space
A gymnasium-style space.
seed : int, optional
The seed for the pseudo-random number generator.
Returns
-------
sample
An single sample from of the given ``space``.
"""
if not isinstance(space, gymnasium.Space):
raise TypeError("space must be derived from gymnasium.Space")
rnd = seed if isinstance(seed, onp.random.RandomState) else onp.random.RandomState(seed)
return _safe_sample(space, rnd)
def single_to_batch(pytree):
r"""
Take a single instance and turn it into a batch of size 1.
This just does an ``np.expand_dims(leaf, axis=0)`` on all leaf nodes of the
:external+jax:doc:`pytree <pytrees>`.
Parameters
----------
pytree_single : pytree with ndarray leaves
A pytree representing e.g. a single state observation.
Returns
-------
pytree_batch : pytree with ndarray leaves
A pytree representing a batch with ``batch_size=1``.
"""
return jax.tree_map(lambda arr: jnp.expand_dims(arr, axis=0), pytree)
def sync_shared_params(*params, weights=None):
r"""
Synchronize shared params. See the :doc:`A2C stub </examples/stubs/a2c>` for example usage.
Parameters
----------
*params : multiple hk.Params objects
The parameter dicts that contain shared parameters.
weights : list of positive floats
The relative weights to use for averaging the shared params.
Returns
-------
params : tuple of hk.Params objects
Same as input ``*params`` but with synchronized shared params.
"""
if len(params) < 2:
return params
if weights is None:
weights = (1.0,) * len(params)
elif len(weights) != len(params):
raise ValueError(
f"len(weights) = {len(weights)} does not match the number of param "
f"dicts provided, which is {len(params)}")
# Ensure that the params are mutable.
params = [hk.data_structures.to_mutable_dict(p) for p in params]
# Count occurrence of top-level keys in all params.
key_counts = Counter(k for p in params for k in p.keys())
for k, count in key_counts.items():
# Skip if weights are not shared.
if count < 2:
continue
# Get shared params.
shared_params = [p[k] for p in params if k in p]
assert len(shared_params) > 1
chex.assert_trees_all_equal_structs(*shared_params)
# Get the relative weights.
relative_weights = [w for w, p in zip(weights, params) if k in p]
scale = sum(relative_weights)
assert scale > 0
relative_weights = [w / scale for w in relative_weights]
# Apply relative weights.
shared_params_weighted = [
jax.tree_util.tree_map(lambda x: w * x, p)
for w, p in zip(relative_weights, shared_params)]
# Compute the weighted average.
shared_params_agg = jax.tree_util.tree_map(
lambda *x: sum(x), *shared_params_weighted)
# Replace individual params by shared, averaged params.
for p in params:
if k in p:
p[k] = shared_params_agg
return tuple(hk.data_structures.to_haiku_dict(p) for p in params)
def tree_ravel(pytree):
r"""
Flatten and concatenate all leaves into a single flat ndarray.
Parameters
----------
pytree : a pytree with ndarray leaves
A typical example is a pytree of model parameters (weights) or gradients with respect to
such model params.
Returns
-------
arr : ndarray with ndim=1
A single flat array.
"""
return jnp.concatenate([jnp.ravel(leaf) for leaf in jax.tree_util.tree_leaves(pytree)])
def tree_sample(pytree, rng, n=1, replace=False, axis=0, p=None):
r"""
Flatten and concatenate all leaves into a single flat ndarray.
Parameters
----------
pytree : a pytree with ndarray leaves
A typical example is a pytree of model parameters (weights) or gradients with respect to
such model params.
rng : jax.random.PRNGKey
A pseudo-random number generator key.
n : int, optional
The sample size. Note that the smaple size cannot exceed the batch size of the provided
:code:`pytree` if :code:`with_replacement=False`.
replace : bool, optional
Whether to sample with replacement.
axis : int, optional
The axis along which to sample.
p : 1d array, optional
The sampling propensities.
Returns
-------
arr : ndarray with ndim=1
A single flat array.
"""
batch_size = _check_leaf_batch_size(pytree)
ix = jax.random.choice(rng, batch_size, shape=(n,), replace=replace, p=p)
return jax.tree_map(lambda x: jnp.take(x, ix, axis=0), pytree)
def unvectorize(f, in_axes=0, out_axes=0):
"""
Apply a batched function on a single instance, which effectively does the
inverse of what :func:`jax.vmap` does.
Parameters
----------
f : callable
A batched function.
in_axes : int or tuple of ints, optional
Specify the batch axes of the inputs of the function :code:`f`. If left unpsecified, this
defaults to axis 0 for all inputs.
out_axis: int, optional
Specify the batch axes of the outputs of the function :code:`f`. These axes will be dropped
by :func:`jnp.squeeze <jax.numpy.squeeze>`, i.e. dropped. If left unpsecified, this defaults
to axis 0 for all outputs.
Returns
-------
f_single : callable
The unvectorized version of :code:`f`.
Examples
--------
Haiku uses a batch-oriented design (although some components may be batch-agnostic). To create a
function that acts on a single instance, we can use :func:`unvectorize` as follows:
.. code:: python
import jax.numpy as jnp
import haiku as hk
import coax
def f(x_batch):
return hk.Linear(11)(x_batch)
rngs = hk.PRNGSequence(42)
x_batch = jnp.zeros(shape=(3, 5)) # batch of 3 instances
x_single = jnp.zeros(shape=(5,)) # single instance
init, f_batch = hk.transform(f)
params = init(next(rngs), x_batch)
y_batch = f_batch(params, next(rngs), x_batch)
assert y_batch.shape == (3, 11)
f_single = coax.unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single.shape == (11,)
Alternatively, and perhaps more conveniently, we can unvectorize the function before doing the
Haiku transform:
.. code:: python
init, f_single = hk.transform(coax.unvectorize(f))
params = init(next(rngs), x_single)
y_single = f_single(params, next(rngs), x_single)
assert y_single.shape == (11,)
"""
def f_single(*args):
in_axes_ = in_axes
if in_axes is None or isinstance(in_axes, int):
in_axes_ = (in_axes,) * len(args)
if len(args) != len(in_axes_):
raise ValueError("number of in_axes must match the number of function inputs")
vargs = [
arg if axis is None else
jax.tree_map(partial(jnp.expand_dims, axis=axis), arg)
for arg, axis in zip(args, in_axes_)]
out = f(*vargs)
out_axes_ = out_axes
if isinstance(out, tuple):
if out_axes_ is None or isinstance(out_axes_, int):
out_axes_ = (out_axes_,) * len(out)
if len(out) != len(out_axes_):
raise ValueError("number of out_axes must match the number of function outputs")
out = tuple(
x if axis is None else
jax.tree_map(partial(jnp.squeeze, axis=axis), x)
for x, axis in zip(out, out_axes_))
elif out_axes_ is not None:
if not isinstance(out_axes_, int):
raise TypeError(
"out_axes must be an int for functions with a single output; "
f"got: out_axes={out_axes}")
out = jax.tree_map(partial(jnp.squeeze, axis=out_axes), out)
return out
return f_single
def _check_leaf_batch_size(pytree):
""" some boilerplate to extract the batch size with some consistency checks """
leaf, *leaves = jax.tree_util.tree_leaves(pytree)
if not isinstance(leaf, (onp.ndarray, jnp.ndarray)) and leaf.ndim >= 1:
raise TypeError(f"all leaves must be arrays; got type: {type(leaf)}")
if leaf.ndim < 1:
raise TypeError("all leaves must be at least 1d, i.e. (batch_size, ...)")
batch_size = leaf.shape[0]
for leaf in leaves:
if not isinstance(leaf, (onp.ndarray, jnp.ndarray)) and leaf.ndim >= 1:
raise TypeError(f"all leaves must be arrays; got type: {type(leaf)}")
if leaf.ndim < 1:
raise TypeError("all leaves must be at least 1d, i.e. (batch_size, ...)")
if leaf.shape[0] != batch_size:
raise TypeError("all leaves must have the same batch_size")
return batch_size
def stack_trees(*trees):
"""
Apply :func:`jnp.stack <jax.numpy.stack>` to the leaves of a pytree.
Parameters
----------
trees : sequence of pytrees with ndarray leaves
A typical example are pytrees containing the parameters and function states of
a model that should be used in a function which is vectorized by `jax.vmap`. The trees
have to have the same pytree structure.
Returns
-------
pytree : pytree with ndarray leaves
A tuple of pytrees.
"""
return jax.tree_util.tree_map(lambda *args: jnp.stack(args), *zip(*trees))
| 34,240 | 28.774783 | 100 | py |
null | coax-main/coax/utils/_array_test.py | import gymnasium
import jax
import jax.numpy as jnp
import numpy as onp
from haiku import PRNGSequence
from .._base.test_case import TestCase
from ..proba_dists import NormalDist
from ._array import (
argmax,
check_preprocessors,
chunks_pow2,
default_preprocessor,
get_transition_batch,
tree_sample,
)
class TestArrayUtils(TestCase):
def test_argmax_consistent(self):
rngs = PRNGSequence(13)
vec = jax.random.normal(next(rngs), shape=(5,))
mat = jax.random.normal(next(rngs), shape=(3, 5))
ten = jax.random.normal(next(rngs), shape=(3, 5, 7))
self.assertEqual(
argmax(next(rngs), vec), jnp.argmax(vec, axis=-1))
self.assertArrayAlmostEqual(
argmax(next(rngs), mat), jnp.argmax(mat, axis=-1))
self.assertArrayAlmostEqual(
argmax(next(rngs), mat, axis=0), jnp.argmax(mat, axis=0))
self.assertArrayAlmostEqual(
argmax(next(rngs), ten), jnp.argmax(ten, axis=-1))
self.assertArrayAlmostEqual(
argmax(next(rngs), ten, axis=0), jnp.argmax(ten, axis=0))
self.assertArrayAlmostEqual(
argmax(next(rngs), ten, axis=1), jnp.argmax(ten, axis=1))
def test_argmax_random_tiebreaking(self):
rngs = PRNGSequence(13)
vec = jnp.ones(shape=(5,))
mat = jnp.ones(shape=(3, 5))
self.assertEqual(argmax(next(rngs), vec), 2) # not zero
self.assertArrayAlmostEqual(argmax(next(rngs), mat), [1, 1, 3])
def test_check_preprocessors(self):
box = gymnasium.spaces.Box(
low=onp.finfo('float32').min, high=onp.finfo('float32').max, shape=[7])
p0 = NormalDist(box).preprocess_variate
p1 = default_preprocessor(box)
def p2(rng, x):
return 'garbage'
msg = r"need at least two preprocessors in order to run test"
with self.assertRaisesRegex(ValueError, msg):
check_preprocessors(box)
with self.assertRaisesRegex(ValueError, msg):
check_preprocessors(box, p0)
self.assertTrue(check_preprocessors(box, p0, p0, p0))
self.assertFalse(check_preprocessors(box, p0, p1))
self.assertFalse(check_preprocessors(box, p0, p2))
self.assertFalse(check_preprocessors(box, p1, p2))
def test_default_preprocessor(self):
rngs = PRNGSequence(13)
box = gymnasium.spaces.Box(low=0, high=1, shape=(2, 3))
dsc = gymnasium.spaces.Discrete(7)
mbn = gymnasium.spaces.MultiBinary(11)
mds = gymnasium.spaces.MultiDiscrete(nvec=[3, 5])
tup = gymnasium.spaces.Tuple((box, dsc, mbn, mds))
dct = gymnasium.spaces.Dict({'box': box, 'dsc': dsc, 'mbn': mbn, 'mds': mds})
self.assertArrayShape(default_preprocessor(box)(next(rngs), box.sample()), (1, 2, 3))
self.assertArrayShape(default_preprocessor(dsc)(next(rngs), dsc.sample()), (1, 7))
self.assertArrayShape(default_preprocessor(mbn)(next(rngs), mbn.sample()), (1, 11))
self.assertArrayShape(default_preprocessor(mds)(next(rngs), mds.sample())[0], (1, 3))
self.assertArrayShape(default_preprocessor(mds)(next(rngs), mds.sample())[1], (1, 5))
self.assertArrayShape(default_preprocessor(tup)(next(rngs), tup.sample())[0], (1, 2, 3))
self.assertArrayShape(default_preprocessor(tup)(next(rngs), tup.sample())[1], (1, 7))
self.assertArrayShape(default_preprocessor(tup)(next(rngs), tup.sample())[2], (1, 11))
self.assertArrayShape(default_preprocessor(tup)(next(rngs), tup.sample())[3][0], (1, 3))
self.assertArrayShape(default_preprocessor(tup)(next(rngs), tup.sample())[3][1], (1, 5))
self.assertArrayShape(default_preprocessor(dct)(next(rngs), dct.sample())['box'], (1, 2, 3))
self.assertArrayShape(default_preprocessor(dct)(next(rngs), dct.sample())['dsc'], (1, 7))
self.assertArrayShape(default_preprocessor(dct)(next(rngs), dct.sample())['mbn'], (1, 11))
self.assertArrayShape(default_preprocessor(dct)(next(rngs), dct.sample())['mds'][0], (1, 3))
self.assertArrayShape(default_preprocessor(dct)(next(rngs), dct.sample())['mds'][1], (1, 5))
mds_batch = jax.tree_map(lambda *x: jnp.stack(x), *(mds.sample() for _ in range(7)))
self.assertArrayShape(default_preprocessor(mds)(next(rngs), mds_batch)[0], (7, 3))
self.assertArrayShape(default_preprocessor(mds)(next(rngs), mds_batch)[1], (7, 5))
def test_chunks_pow2(self):
chunk_sizes = (2048, 1024, 512, 64, 32, 1)
tn = get_transition_batch(self.env_discrete, batch_size=sum(chunk_sizes))
for chunk, chunk_size in zip(chunks_pow2(tn), chunk_sizes):
self.assertEqual(chunk.batch_size, chunk_size)
def test_tree_sample(self):
rngs = PRNGSequence(42)
tn = get_transition_batch(self.env_discrete, batch_size=5)
tn_sample = tree_sample(tn, next(rngs), n=3)
assert tn_sample.batch_size == 3
tn_sample = tree_sample(tn, next(rngs), n=7, replace=True)
assert tn_sample.batch_size == 7
msg = r"Cannot take a larger sample than population when 'replace=False'"
with self.assertRaisesRegex(ValueError, msg):
tree_sample(tn, next(rngs), n=7, replace=False)
| 5,291 | 42.377049 | 100 | py |
null | coax-main/coax/utils/_array_test_unvectorize.py | import pytest
import jax
import haiku as hk
from ._array import unvectorize
@pytest.fixture
def rngs():
return hk.PRNGSequence(42)
@pytest.fixture
def x_batch():
rng = jax.random.PRNGKey(13)
return jax.random.normal(rng, shape=(7, 11))
@pytest.fixture
def x_single():
rng = jax.random.PRNGKey(17)
return jax.random.normal(rng, shape=(11,))
def test_unvectorize_single_output(rngs, x_batch, x_single):
def f_batch(X):
return hk.Linear(11)(X)
init, f_batch = hk.transform(f_batch)
params = init(next(rngs), x_batch)
y_batch = f_batch(params, next(rngs), x_batch)
assert y_batch.shape == (7, 11)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single.shape == (11,)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0,))
msg = r"out_axes must be an int for functions with a single output; got: out_axes=\(0,\)"
with pytest.raises(TypeError, match=msg):
f_single(params, next(rngs), x_single)
f_single = unvectorize(f_batch, in_axes=(None, None, 0, 0), out_axes=(0,))
msg = r"number of in_axes must match the number of function inputs"
with pytest.raises(ValueError, match=msg):
f_single(params, next(rngs), x_single)
def test_unvectorize_multi_output(rngs, x_batch, x_single):
def f_batch(X):
return hk.Linear(11)(X), hk.Linear(13)(X)
init, f_batch = hk.transform(f_batch)
params = init(next(rngs), x_batch)
y_batch = f_batch(params, next(rngs), x_batch)
assert y_batch[0].shape == (7, 11)
assert y_batch[1].shape == (7, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (13,)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0, None))
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=None)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (1, 11,)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0,))
msg = r"number of out_axes must match the number of function outputs"
with pytest.raises(ValueError, match=msg):
f_single(params, next(rngs), x_single)
| 2,542 | 31.602564 | 93 | py |
null | coax-main/coax/utils/_dmc_gym.py | """ Adapted from https://github.com/denisyarats/dmc2gym/blob/master/dmc2gym/wrappers.py """
import numpy as onp
from dm_control import suite
from dm_env import specs
from gymnasium import spaces, Env
from gymnasium.envs.registration import register, make, registry
def make_dmc(domain, task, seed=0, max_episode_steps=1000, height=84, width=84, camera_id=0):
"""
Create a Gym environment for a DeepMind Control suite task.
Parameters
----------
domain : str
Name of the domain.
task : str
Name of the task.
seed : int
Random seed.
max_episode_steps : int
Maximum number of steps per episode.
height : int
Height of the observation.
width : int
Width of the observation.
camera_id : int
Camera ID.
Returns
-------
env : gymnasium.Env
Gym environment.
"""
env_id = f"{domain}_{task}-v1"
if env_id not in registry:
register(env_id, entry_point=DmcGymWrapper, kwargs=dict(
domain=domain, task=task, seed=seed, height=height, width=width, camera_id=camera_id),
max_episode_steps=max_episode_steps)
return make(env_id)
class DmcGymWrapper(Env):
metadata = {'render_modes': ['rgb_array'], 'render_fps': 30}
r"""
Wrapper for DeepMind Control suite environments.
"""
def __init__(self, domain, task, seed, height, width, camera_id, render_mode='rgb_array'):
super().__init__()
self.domain = domain
self.task = task
self.seed = seed
self.height = height
self.width = width
self.camera_id = camera_id
self.render_mode = render_mode
self._make_env()
def _make_env(self):
self.dmc_env = suite.load(self.domain, self.task, task_kwargs=dict(random=self.seed))
self.action_space = spec_to_box(self.dmc_env.action_spec(), dtype=onp.float32)
self.observation_space = spec_to_box(
*self.dmc_env.observation_spec().values(), dtype=onp.float32)
def step(self, action):
timestep = self.dmc_env.step(action)
next_state, reward, terminated, truncated, info = flatten_obs(
timestep.observation), timestep.reward, timestep.last(), False, {}
return next_state, reward, terminated, truncated, info
def reset(self, seed=None, options=None):
if seed is not None:
self.seed = seed
self._make_env()
timestep = self.dmc_env.reset()
return flatten_obs(timestep.observation), {}
def render(self):
return self.dmc_env.physics.render(
height=self.height, width=self.width, camera_id=self.camera_id
)
def extract_min_max(s):
r"""
Extract min and max values from a dm_env.specs.ArraySpec.
"""
assert s.dtype == onp.float64 or s.dtype == onp.float32
dim = int(onp.prod(s.shape))
if type(s) == specs.Array:
bound = onp.inf * onp.ones(dim, dtype=onp.float32)
return -bound, bound
elif type(s) == specs.BoundedArray:
zeros = onp.zeros(dim, dtype=onp.float32)
return s.minimum + zeros, s.maximum + zeros
else:
raise ValueError("")
def spec_to_box(*spec, dtype):
r"""
Convert dm_env.specs.ArraySpec to gymnasium.spaces.Box.
"""
mins, maxs = [], []
for s in spec:
mn, mx = extract_min_max(s)
mins.append(mn)
maxs.append(mx)
low = onp.concatenate(mins, axis=0).astype(dtype)
high = onp.concatenate(maxs, axis=0).astype(dtype)
assert low.shape == high.shape
return spaces.Box(low, high, dtype=dtype)
def flatten_obs(obs):
r"""
Flatten observation from a dictionary to a numpy array.
"""
obs_pieces = []
for v in obs.values():
flat = onp.array([v]) if onp.isscalar(v) else v.ravel()
obs_pieces.append(flat)
return onp.concatenate(obs_pieces, axis=0, dtype=onp.float32)
| 3,936 | 29.757813 | 98 | py |
null | coax-main/coax/utils/_jit.py |
from inspect import signature
import jax
__all__ = (
'JittedFunc',
'jit',
)
def jit(func, static_argnums=(), donate_argnums=()):
r"""
An alternative of :func:`jax.jit` that returns a picklable JIT-compiled function.
Note that :func:`jax.jit` produces non-picklable functions, because the JIT compilation depends
on the :code:`device` and :code:`backend`. In order to facilitate serialization, this function
does not for the user to specify :code:`device` or :code:`backend`. Instead :func:`jax.jit` is
called with the default: :code:`jax.jit(..., device=None, backend=None)`.
Check out the original :func:`jax.jit` docs for a more detailed description of the arguments.
Parameters
----------
func : function
Function to be JIT compiled.
static_argnums : int or tuple of ints
Arguments to exclude from JIT compilation.
donate_argnums : int or tuple of ints
To be donated arguments, see :func:`jax.jit`.
Returns
-------
jitted_func : JittedFunc
A picklable JIT-compiled function.
"""
return JittedFunc(func, static_argnums, donate_argnums)
class JittedFunc:
__slots__ = ('func', 'static_argnums', 'donate_argnums', '_jitted_func')
def __init__(self, func, static_argnums=(), donate_argnums=()):
self.func = func
self.static_argnums = static_argnums
self.donate_argnums = donate_argnums
self._init_jitted_func()
def __call__(self, *args, **kwargs):
return self._jitted_func(*args, **kwargs)
@property
def __signature__(self):
return signature(self.func)
def __repr__(self):
return self.__class__.__name__ + str(self.__signature__)
def __getstate__(self):
return self.func, self.static_argnums, self.donate_argnums
def __setstate__(self, state):
self.func, self.static_argnums, self.donate_argnums = state
self._init_jitted_func()
def _init_jitted_func(self):
self._jitted_func = jax.jit(
self.func,
static_argnums=self.static_argnums,
donate_argnums=self.donate_argnums)
| 2,163 | 26.05 | 99 | py |
null | coax-main/coax/utils/_misc.py | import os
import time
import logging
from importlib import reload, import_module
from types import ModuleType
import jax.numpy as jnp
import numpy as onp
import pandas as pd
import lz4.frame
import cloudpickle as pickle
from PIL import Image
__all__ = (
'docstring',
'enable_logging',
'dump',
'dumps',
'load',
'loads',
'generate_gif',
'get_env_attr',
'getattr_safe',
'has_env_attr',
'is_policy',
'is_qfunction',
'is_reward_function',
'is_stochastic',
'is_transition_model',
'is_vfunction',
'pretty_repr',
'pretty_print',
'reload_recursive',
'render_episode',
)
def docstring(obj):
r'''
A simple decorator that sets the ``__doc__`` attribute to ``obj.__doc__``
on the decorated object, see example below.
Parameters
----------
obj : object
The objects whose docstring you wish to copy onto the wrapped object.
Examples
--------
>>> def f(x):
... """Some docstring"""
... return x * x
...
>>> def g(x):
... return 13 - x
...
>>> g.__doc__ = f.__doc__
This can abbreviated by:
>>> @docstring(f)
... def g(x):
... return 13 - x
...
'''
def decorator(func):
func.__doc__ = obj.__doc__
return func
return decorator
def enable_logging(name=None, level=logging.INFO, output_filepath=None, output_level=None):
r"""
Enable logging output.
This executes the following two lines of code:
.. code:: python
import logging
logging.basicConfig(level=logging.INFO)
Parameters
----------
name : str, optional
Name of the process that is logging. This can be set to whatever you
like.
level : int, optional
Logging level for the default :py:class:`StreamHandler
<logging.StreamHandler>`. The default setting is ``level=logging.INFO``
(which is 20). If you'd like to see more verbose logging messages you
might set ``level=logging.DEBUG``.
output_filepath : str, optional
If provided, a :py:class:`FileHandler <logging.FileHandler>` will be
added to the root logger via:
.. code:: python
file_handler = logging.FileHandler(output_filepath)
logging.getLogger('').addHandler(file_handler)
output_level : int, optional
Logging level for the :py:class:`FileHandler <logging.FileHandler>`. If
left unspecified, this defaults to ``level``, i.e. the same level as
the default :py:class:`StreamHandler <logging.StreamHandler>`.
"""
if name is None:
fmt = '[%(name)s|%(levelname)s] %(message)s'
else:
fmt = f'[{name}|%(name)s|%(levelname)s] %(message)s'
logging.basicConfig(level=level, format=fmt)
if output_filepath is not None:
os.makedirs(os.path.dirname(output_filepath) or '.', exist_ok=True)
fh = logging.FileHandler(output_filepath)
fh.setLevel(level if output_level is None else output_level)
logging.getLogger('').addHandler(fh)
def dump(obj, filepath):
r"""
Save an object to disk.
Parameters
----------
obj : object
Any python object.
filepath : str
Where to store the instance.
Warning
-------
References between objects are only preserved if they are stored as part of a single object, for
example:
.. code:: python
# b has a reference to a
a = [13]
b = {'a': a}
# references preserved
dump((a, b), 'ab.pkl.lz4')
a_new, b_new = load('ab.pkl.lz4')
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13, 7] <-- updated
# references not preserved
dump(a, 'a.pkl.lz4')
dump(b, 'b.pkl.lz4')
a_new = load('a.pkl.lz4')
b_new = load('b.pkl.lz4')
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13] <-- not updated!!
Therefore, the safest way to create checkpoints is to store the entire state as a single object
like a dict or a tuple.
"""
dirpath = os.path.dirname(filepath)
if dirpath:
os.makedirs(dirpath, exist_ok=True)
with lz4.frame.open(filepath, 'wb') as f:
f.write(pickle.dumps(obj))
def dumps(obj):
r"""
Serialize an object to an lz4-compressed pickle byte-string.
Parameters
----------
obj : object
Any python object.
Returns
-------
s : bytes
An lz4-compressed pickle byte-string.
Warning
-------
References between objects are only preserved if they are stored as part of a single object, for
example:
.. code:: python
# b has a reference to a
a = [13]
b = {'a': a}
# references preserved
s = dumps((a, b))
a_new, b_new = loads(s)
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13, 7] <-- updated
# references not preserved
s_a = dumps(a)
s_b = dumps(b)
a_new = loads(s_a)
b_new = loads(s_b)
b_new['a'].append(7)
print(b_new) # {'a': [13, 7]}
print(a_new) # [13] <-- not updated!!
Therefore, the safest way to create checkpoints is to store the entire state as a single object
like a dict or a tuple.
"""
return lz4.frame.compress(pickle.dumps(obj))
def load(filepath):
r"""
Load an object from a file that was created by :func:`dump(obj, filepath) <dump>`.
Parameters
----------
filepath : str
File to load.
"""
with lz4.frame.open(filepath, 'rb') as f:
return pickle.loads(f.read())
def loads(s):
r"""
Load an object from a byte-string that was created by :func:`dumps(obj) <dumps>`.
Parameters
----------
s : str
An lz4-compressed pickle byte-string.
"""
return pickle.loads(lz4.frame.decompress(s))
def _reload(module, reload_all, reloaded, logger):
if isinstance(module, ModuleType):
module_name = module.__name__
elif isinstance(module, str):
module_name, module = module, import_module(module)
else:
raise TypeError(
"'module' must be either a module or str; "
f"got: {module.__class__.__name__}")
for attr_name in dir(module):
attr = getattr(module, attr_name)
check = (
# is it a module?
isinstance(attr, ModuleType)
# has it already been reloaded?
and attr.__name__ not in reloaded
# is it a proper submodule? (or just reload all)
and (reload_all or attr.__name__.startswith(module_name))
)
if check:
_reload(attr, reload_all, reloaded, logger)
logger.debug(f"reloading module: {module_name}")
reload(module)
reloaded.add(module_name)
def reload_recursive(module, reload_external_modules=False):
"""
Recursively reload a module (in order of dependence).
Parameters
----------
module : ModuleType or str
The module to reload.
reload_external_modules : bool, optional
Whether to reload all referenced modules, including external ones which
aren't submodules of ``module``.
"""
logger = logging.getLogger('coax.utils.reload_recursive')
_reload(module, reload_external_modules, set(), logger)
def render_episode(env, policy=None, step_delay_ms=0):
r"""
Run a single episode with env.render() calls with each time step.
Parameters
----------
env : gymnasium environment
A gymnasium environment.
policy : callable, optional
A policy objects that is used to pick actions: ``a = policy(s)``. If left unspecified, we'll
just take random actions instead, i.e. ``a = env.action_space.sample()``.
step_delay_ms : non-negative float
The number of milliseconds to wait between consecutive timesteps. This can be used to slow
down the rendering.
"""
from ..wrappers import TrainMonitor
if isinstance(env, TrainMonitor):
env = env.env # unwrap to strip off TrainMonitor
s = env.reset()
env.render()
for t in range(int(1e9)):
a = env.action_space.sample() if policy is None else policy(s)
s_next, r, done, info = env.step(a)
env.render()
time.sleep(step_delay_ms / 1e3)
if done:
break
s = s_next
time.sleep(5 * step_delay_ms / 1e3)
def has_env_attr(env, attr, max_depth=100):
r"""
Check if a potentially wrapped environment has a given attribute.
Parameters
----------
env : gymnasium environment
A potentially wrapped environment.
attr : str
The attribute name.
max_depth : positive int, optional
The maximum depth of wrappers to traverse.
"""
e = env
for i in range(max_depth):
if hasattr(e, attr):
return True
if not hasattr(e, 'env'):
break
e = e.env
return False
def get_env_attr(env, attr, default='__ERROR__', max_depth=100):
r"""
Get the given attribute from a potentially wrapped environment.
Note that the wrapped envs are traversed from the outside in. Once the
attribute is found, the search stops. This means that an inner wrapped env
may carry the same (possibly conflicting) attribute. This situation is
*not* resolved by this function.
Parameters
----------
env : gymnasium environment
A potentially wrapped environment.
attr : str
The attribute name.
max_depth : positive int, optional
The maximum depth of wrappers to traverse.
"""
e = env
for i in range(max_depth):
if hasattr(e, attr):
return getattr(e, attr)
if not hasattr(e, 'env'):
break
e = e.env
if default == '__ERROR__':
raise AttributeError("env is missing attribute: {}".format(attr))
return default
def generate_gif(env, filepath, policy=None, resize_to=None, duration=50, max_episode_steps=None):
r"""
Store a gif from the episode frames.
Parameters
----------
env : gymnasium environment
The environment to record from.
filepath : str
Location of the output gif file.
policy : callable, optional
A policy objects that is used to pick actions: ``a = policy(s)``. If left unspecified, we'll
just take random actions instead, i.e. ``a = env.action_space.sample()``.
resize_to : tuple of ints, optional
The size of the output frames, ``(width, height)``. Notice the
ordering: first **width**, then **height**. This is the convention PIL
uses.
duration : float, optional
Time between frames in the animated gif, in milliseconds.
max_episode_steps : int, optional
The maximum number of step in the episode. If left unspecified, we'll
attempt to get the value from ``env.spec.max_episode_steps`` and if
that fails we default to 10000.
"""
logger = logging.getLogger('generate_gif')
max_episode_steps = max_episode_steps \
or getattr(getattr(env, 'spec'), 'max_episode_steps', 10000)
from ..wrappers import TrainMonitor
if isinstance(env, TrainMonitor):
env = env.env # unwrap to strip off TrainMonitor
s, info = env.reset()
# check if render_mode is set to 'rbg_array'
if not (env.render_mode == 'rgb_array' or isinstance(env.render(), onp.ndarray)):
raise RuntimeError("Cannot generate GIF if env.render_mode != 'rgb_array'.")
# collect frames
frames = []
for t in range(max_episode_steps):
a = env.action_space.sample() if policy is None else policy(s)
s_next, r, done, truncated, info = env.step(a)
# store frame
frame = env.render()
frame = Image.fromarray(frame)
frame = frame.convert('P', palette=Image.ADAPTIVE)
if resize_to is not None:
if not (isinstance(resize_to, tuple) and len(resize_to) == 2):
raise TypeError(
"expected a tuple of size 2, resize_to=(w, h)")
frame = frame.resize(resize_to)
frames.append(frame)
if done or truncated:
break
s = s_next
# store last frame
frame = env.render()
frame = Image.fromarray(frame)
frame = frame.convert('P', palette=Image.ADAPTIVE)
if resize_to is not None:
frame = frame.resize(resize_to)
frames.append(frame)
# generate gif
os.makedirs(os.path.dirname(filepath) or '.', exist_ok=True)
frames[0].save(
fp=filepath, format='GIF', append_images=frames[1:], save_all=True,
duration=duration, loop=0)
logger.info("recorded episode to: {}".format(filepath))
def is_transition_model(obj):
r"""
Check whether an object is a dynamics model.
Parameters
----------
obj
Object to check.
Returns
-------
bool
Whether ``obj`` is a dynamics function.
"""
# import at runtime to avoid circular dependence
from .._core.transition_model import TransitionModel
from .._core.stochastic_transition_model import StochasticTransitionModel
return isinstance(obj, (TransitionModel, StochasticTransitionModel))
def is_reward_function(obj):
r"""
Check whether an object is a dynamics model.
Parameters
----------
obj
Object to check.
Returns
-------
bool
Whether ``obj`` is a dynamics function.
"""
# import at runtime to avoid circular dependence
from .._core.reward_function import RewardFunction
from .._core.stochastic_reward_function import StochasticRewardFunction
return isinstance(obj, (RewardFunction, StochasticRewardFunction))
def is_vfunction(obj):
r"""
Check whether an object is a :class:`state value function <coax.V>`, or V-function.
Parameters
----------
obj
Object to check.
Returns
-------
bool
Whether ``obj`` is a V-function.
"""
# import at runtime to avoid circular dependence
from .._core.v import V
from .._core.stochastic_v import StochasticV
return isinstance(obj, (V, StochasticV))
def is_qfunction(obj):
r"""
Check whether an object is a :class:`state-action value function <coax.Q>`, or Q-function.
Parameters
----------
obj
Object to check.
Returns
-------
bool
Whether ``obj`` is a Q-function and (optionally) whether it is of modeltype 1 or 2.
"""
# import at runtime to avoid circular dependence
from .._core.q import Q
from .._core.stochastic_q import StochasticQ
from .._core.successor_state_q import SuccessorStateQ
return isinstance(obj, (Q, StochasticQ, SuccessorStateQ))
def is_stochastic(obj):
r"""
Check whether an object is a stochastic function approximator.
Parameters
----------
obj
Object to check.
Returns
-------
bool
Whether ``obj`` is a stochastic function approximator.
"""
# import at runtime to avoid circular dependence
from .._core.policy import Policy
from .._core.stochastic_v import StochasticV
from .._core.stochastic_q import StochasticQ
from .._core.stochastic_reward_function import StochasticRewardFunction
from .._core.stochastic_transition_model import StochasticTransitionModel
return isinstance(obj, (
Policy, StochasticV, StochasticQ, StochasticRewardFunction,
StochasticTransitionModel))
def is_policy(obj):
r"""
Check whether an object is a :doc:`policy <policies>`.
Parameters
----------
obj
Object to check.
Returns
-------
bool
Whether ``obj`` is a policy.
"""
# import at runtime to avoid circular dependence
from .._core.policy import Policy
from .._core.value_based_policy import EpsilonGreedy, BoltzmannPolicy
return isinstance(obj, (Policy, EpsilonGreedy, BoltzmannPolicy))
def pretty_repr(o, d=0):
r"""
Generate pretty :func:`repr` (string representions).
Parameters
----------
o : object
Any object.
d : int, optional
The depth of the recursion. This is used to determine the indentation level in recursive
calls, so we typically keep this 0.
Returns
-------
pretty_repr : str
A nicely formatted string representation of :code:`object`.
"""
i = " " # indentation string
if isinstance(o, (jnp.ndarray, onp.ndarray, pd.Index)):
try:
summary = f", min={onp.min(o):.3g}, median={onp.median(o):.3g}, max={onp.max(o):.3g}"
except Exception:
summary = ""
return f"array(shape={o.shape}, dtype={str(o.dtype)}{summary:s})"
if isinstance(o, (pd.Series, pd.DataFrame)):
sep = ',\n' + i * (d + 1)
items = zip(('index', 'data'), (o.index, o.values))
body = sep + sep.join(f"{k}={pretty_repr(v, d + 1)}" for k, v in items)
return f"{type(o).__name__}({body})"
if hasattr(o, '_asdict'):
sep = '\n' + i * (d + 1)
body = sep + sep.join(f"{k}={pretty_repr(v, d + 1)}" for k, v in o._asdict().items())
return f"{type(o).__name__}({body})"
if isinstance(o, tuple):
sep = ',\n' + i * (d + 1)
body = '\n' + i * (d + 1) + sep.join(f"{pretty_repr(v, d + 1)}" for v in o)
return f"({body})"
if isinstance(o, list):
sep = ',\n' + i * (d + 1)
body = '\n' + i * (d + 1) + sep.join(f"{pretty_repr(v, d + 1)}" for v in o)
return f"[{body}]"
if hasattr(o, 'items'):
sep = ',\n' + i * (d + 1)
body = '\n' + i * (d + 1) + sep.join(
f"{repr(k)}: {pretty_repr(v, d + 1)}" for k, v in o.items())
return f"{{{body}}}"
return repr(o)
def pretty_print(obj):
r"""
Print :func:`pretty_repr(obj) <coax.utils.pretty_repr>`.
Parameters
----------
obj : object
Any object.
"""
print(pretty_repr(obj))
def getattr_safe(obj, name, default=None):
"""
A safe implementation of :func:`getattr <python3:getattr>`. If an attr
exists, but calling getattr raises an error, this implementation will
silence the error and return the ``default`` value.
Parameter
---------
obj : object
Any object.
name : str
The name of the attribute.
default : object, optional
The default value to return if getattr fails.
Returns
-------
attr : object
The attribute ``obj.name`` or ``default``.
"""
attr = default
try:
attr = getattr(obj, name, default)
except Exception:
pass
return attr
| 18,988 | 23.470361 | 100 | py |
null | coax-main/coax/utils/_misc_test.py | import os
import tempfile
from ..utils import jit
from ._misc import dump, dumps, load, loads
def test_dump_load():
with tempfile.TemporaryDirectory() as d:
a = [13]
b = {'a': a}
# references preserved
dump((a, b), os.path.join(d, 'ab.pkl.lz4'))
a_new, b_new = load(os.path.join(d, 'ab.pkl.lz4'))
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13, 7]
# references not preserved
dump(a, os.path.join(d, 'a.pkl.lz4'))
dump(b, os.path.join(d, 'b.pkl.lz4'))
a_new = load(os.path.join(d, 'a.pkl.lz4'))
b_new = load(os.path.join(d, 'b.pkl.lz4'))
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13]
def test_dumps_loads():
a = [13]
b = {'a': a}
# references preserved
s = dumps((a, b))
a_new, b_new = loads(s)
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13, 7]
# references not preserved
s_a = dumps(a)
s_b = dumps(b)
a_new = loads(s_a)
b_new = loads(s_b)
b_new['a'].append(7)
assert b_new['a'] == [13, 7]
assert a_new == [13]
def test_dumps_loads_jitted_function():
@jit
def f(x):
return 13 * x
# references preserved
s = dumps(f)
f_new = loads(s)
assert f_new(11) == f(11) == 143
| 1,377 | 21.966667 | 58 | py |
null | coax-main/coax/utils/_quantile_funcs.py | import haiku as hk
import jax
import jax.numpy as jnp
import numpy as onp
__all__ = (
'quantiles',
'quantiles_uniform',
'quantile_cos_embedding'
)
def quantiles_uniform(rng, batch_size, num_quantiles=32):
"""
Generate :code:`batch_size` quantile fractions that split the interval :math:`[0, 1]`
into :code:`num_quantiles` uniformly distributed fractions.
Parameters
----------
rng : jax.random.PRNGKey
A pseudo-random number generator key.
batch_size : int
The batch size for which the quantile fractions should be generated.
num_quantiles : int, optional
The number of quantile fractions. By default 32.
Returns
-------
quantile_fractions : ndarray
Array of quantile fractions.
"""
rngs = hk.PRNGSequence(rng)
quantile_fractions = jax.random.uniform(next(rngs), shape=(batch_size, num_quantiles))
quantile_fraction_differences = quantile_fractions / \
jnp.sum(quantile_fractions, axis=-1, keepdims=True)
quantile_fractions = jnp.cumsum(quantile_fraction_differences, axis=-1)
return quantile_fractions
def quantiles(batch_size, num_quantiles=200):
r"""
Generate :code:`batch_size` quantile fractions that split the interval :math:`[0, 1]`
into :code:`num_quantiles` equally spaced fractions.
Parameters
----------
batch_size : int
The batch size for which the quantile fractions should be generated.
num_quantiles : int, optional
The number of quantile fractions. By default 200.
Returns
-------
quantile_fractions : ndarray
Array of quantile fractions.
"""
quantile_fractions = jnp.arange(num_quantiles, dtype=jnp.float32) / num_quantiles
quantile_fractions = jnp.tile(quantile_fractions[None, :], [batch_size, 1])
return quantile_fractions
def quantile_cos_embedding(quantile_fractions, n=64):
r"""
Embed the given quantile fractions :math:`\tau` in an `n` dimensional space
using cosine basis functions.
.. math::
\phi(\tau) = \cos(\tau i \pi) \qquad 0 \leq i \lt n
Parameters
----------
quantile_fractions : ndarray
Array of quantile fractions :math:`\tau` to be embedded.
n : int
The dimensionality of the embedding. By default 64.
Returns
-------
quantile_embs : ndarray
Array of quantile embeddings with shape `(quantile_fractions.shape[0], n)`.
"""
quantile_fractions = jnp.tile(quantile_fractions[..., None],
[1] * quantile_fractions.ndim + [n])
quantiles_emb = (
jnp.arange(1, n + 1, 1)
* onp.pi
* quantile_fractions)
quantiles_emb = jnp.cos(quantiles_emb)
return quantiles_emb
| 2,764 | 27.505155 | 90 | py |
null | coax-main/coax/utils/_rolling.py | from collections import deque
class RollingAverage:
def __init__(self, n=100):
self._value = 0.
self._deque = deque(maxlen=n)
@property
def value(self):
return self._value
def update(self, observed_value):
if len(self._deque) == self._deque.maxlen:
self._value += (observed_value - self._deque.popleft()) / self._deque.maxlen
self._deque.append(observed_value)
else:
self._deque.append(observed_value)
self._value += (observed_value - self._value) / len(self._deque)
return self._value
class ExponentialAverage:
def __init__(self, n=100):
self._value = 0.
self._len = 0
self._maxlen = n
@property
def value(self):
return self._value
def update(self, observed_value):
if self._len < self._maxlen:
self._len += 1
self._value += (observed_value - self._value) / self._len
return self._value
| 992 | 25.131579 | 88 | py |
null | coax-main/coax/utils/_segment_tree.py | import numpy as onp
__all__ = (
'SegmentTree',
'SumTree',
'MinTree',
'MaxTree',
)
class SegmentTree:
r"""
A `segment tree <https://en.wikipedia.org/wiki/Segment_tree>`_ data structure that allows
for batched updating and batched partial-range (segment) reductions.
Parameters
----------
capacity : positive int
Number of values to accommodate.
reducer : function
The reducer function: :code:`(float, float) -> float`.
init_value : float
The unit element relative to the reducer function. Some typical examples are: 0 if reducer
is :data:`add <numpy.add>`, 1 for :data:`multiply <numpy.multiply>`, :math:`-\infty` for
:data:`maximum <numpy.maximum>`, :math:`\infty` for :data:`minimum <numpy.minimum>`.
Warning
-------
The :attr:`values` attribute and square-bracket lookups (:code:`tree[level, index]`) return
references of the underlying storage array. Therefore, make sure that downstream code doesn't
update these values in-place, which would corrupt the segment tree structure.
"""
def __init__(self, capacity, reducer, init_value):
self.capacity = capacity
self.reducer = reducer
self.init_value = float(init_value)
self._height = int(onp.ceil(onp.log2(capacity))) + 1 # the +1 is for the values themselves
self._arr = onp.full(shape=(2 ** self.height - 1), fill_value=self.init_value)
@property
def height(self):
r""" The height of the tree :math:`h\sim\log(\text{capacity})`. """
return self._height
@property
def root_value(self):
r"""
The aggregated value, equivalent to
:func:`reduce(reducer, values, init_value) <functools.reduce>`.
"""
return self._arr[0]
@property
def values(self):
r""" The values stored at the leaves of the tree. """
start = 2 ** (self.height - 1) - 1
stop = start + self.capacity
return self._arr[start:stop]
def __getitem__(self, lookup):
if isinstance(lookup, int):
level_offset, level_size = self._check_level_lookup(lookup)
return self._arr[level_offset:(level_offset + level_size)]
if isinstance(lookup, tuple) and len(lookup) == 1:
level, = lookup
return self[level]
if isinstance(lookup, tuple) and len(lookup) == 2:
level, index = lookup
return self[level][index]
raise IndexError(
"tree lookup must be of the form: tree[level] or tree[level, index], "
"where 'level' is an int and 'index' is a 1d array lookup")
def set_values(self, idx, values):
r"""
Set or update the :attr:`values`.
Parameters
----------
idx : 1d array of ints
The indices of the values to be updated. If you wish to update all values use ellipses
instead, e.g. :code:`tree.set_values(..., values)`.
values : 1d array of floats
The new values.
"""
idx, level_offset, level_size = self._check_idx(idx)
# update leaf-node values
self._arr[level_offset + (idx % level_size)] = values
for level in range(self.height - 2, -1, -1):
idx = onp.unique(idx // 2)
left_child = level_offset + 2 * idx
right_child = left_child + 1
level_offset = 2 ** level - 1
parent = level_offset + idx
self._arr[parent] = self.reducer(self._arr[left_child], self._arr[right_child])
def partial_reduce(self, start=0, stop=None):
r"""
Reduce values over a partial range of indices. This is an efficient, batched implementation
of :func:`reduce(reducer, values[state:stop], init_value) <functools.reduce>`.
Parameters
----------
start : int or array of ints
The lower bound of the range (inclusive).
stop : int or array of ints, optional
The lower bound of the range (exclusive). If left unspecified, this defaults to
:attr:`height`.
Returns
-------
value : float
The result of the partial reduction.
"""
# NOTE: This is an iterative implementation, which is a lot uglier than a recursive one.
# The reason why we use an iterative approach is that it's easier for batch-processing.
# i and j are 1d arrays (indices for self._arr)
i, j = self._check_start_stop_to_i_j(start, stop)
# trivial case
done = (i == j)
if done.all():
return self._arr[i]
# left/right accumulators (mask one of them to avoid over-counting if i == j)
a, b = self._arr[i], onp.where(done, self.init_value, self._arr[j])
# number of nodes in higher levels
level_offset = 2 ** (self.height - 1) - 1
# we start from the leaves and work up towards the root
for level in range(self.height - 2, -1, -1):
# get parent indices
level_offset_parent = 2 ** level - 1
i_parent = (i - level_offset) // 2 + level_offset_parent
j_parent = (j - level_offset) // 2 + level_offset_parent
# stop when we have a shared parent (possibly the root node, but not necessarily)
done |= (i_parent == j_parent)
if done.all():
return self.reducer(a, b)
# only accumulate right-child value if 'i' was a left child of 'i_parent'
a = onp.where((i % 2 == 1) & ~done, self.reducer(a, self._arr[i + 1]), a)
# only accumulate left-child value if 'j' was a right child of 'j_parent'
b = onp.where((j % 2 == 0) & ~done, self.reducer(b, self._arr[j - 1]), b)
# prepare for next loop
i, j, level_offset = i_parent, j_parent, level_offset_parent
assert False, 'this point should not be reached'
def __repr__(self):
s = ""
for level in range(self.height):
s += f"\n level={level} : {repr(self[level])}"
return f"{type(self).__name__}({s})"
def _check_level_lookup(self, level):
if not isinstance(level, int):
raise IndexError(f"level lookup must be an int, got: {type(level)}")
if not (-self.height <= level < self.height):
raise IndexError(f"level index {level} is out of bounds; tree height: {self.height}")
level %= self.height
level_offset = 2 ** level - 1
level_size = min(2 ** level, self.capacity)
return level_offset, level_size
def _check_level(self, level):
if level < -self.height or level >= self.height:
raise IndexError(f"tree level index {level} out of range; tree height: {self.height}")
return level % self.height
def _check_idx(self, idx):
""" some boiler plate to turn any compatible idx into a 1d integer array """
level_offset, level_size = self._check_level_lookup(self.height - 1)
if isinstance(idx, int):
idx = onp.asarray([idx], dtype='int32')
if idx is None or idx is Ellipsis:
idx = onp.arange(level_size, dtype='int32')
elif isinstance(idx, list) and all(isinstance(x, int) for x in idx):
idx = onp.asarray(idx, dtype='int32')
elif (isinstance(idx, onp.ndarray)
and onp.issubdtype(idx.dtype, onp.integer)
and idx.ndim <= 1):
idx = idx.reshape(-1)
else:
raise IndexError("idx must be an int or a 1d integer array")
if not onp.all((idx < level_size) & (idx >= -level_size)):
raise IndexError("one of more entries in idx are out or range")
return idx % level_size, level_offset, level_size
def _check_start_stop_to_i_j(self, start, stop):
""" some boiler plate to turn (start, stop) into left/right index arrays (i, j) """
start_orig, stop_orig = start, stop
# convert 'start' index to 1d array
if isinstance(start, int):
start = onp.array([start])
if not (isinstance(start, onp.ndarray)
and start.ndim == 1
and onp.issubdtype(start.dtype, onp.integer)):
raise TypeError("'start' must be an int or a 1d integer array")
# convert 'stop' index to 1d array
if stop is None:
stop = onp.full_like(start, self.capacity)
if isinstance(stop, int):
stop = onp.full_like(start, stop)
if not (isinstance(stop, onp.ndarray)
and stop.ndim == 1
and onp.issubdtype(stop.dtype, onp.integer)):
raise TypeError("'stop' must be an int or a 1d integer array")
# ensure that 'start' is the same size as 'stop'
if start.size == 1 and stop.size > 1:
start = onp.full_like(stop, start[0])
# check compatible shapes
if start.shape != stop.shape:
raise ValueError(
f"shapes must be equal, got: start.shape: {start.shape}, stop.shape: {stop.shape}")
# convert to (i, j), where j is the *inclusive* version of 'stop' (which is exclusive)
level_offset, level_size = self._check_level_lookup(self.height - 1)
i = level_offset + start % level_size
j = level_offset + (stop - 1) % level_size
# check consistency of ranges
if not onp.all((i >= level_offset) & (j < level_offset + level_size) & (i <= j)):
raise IndexError(
f"inconsistent ranges detected from (start, stop) = ({start_orig}, {stop_orig})")
return i, j
class SumTree(SegmentTree):
r"""
A sum-tree data structure that allows for batched updating and batched weighted sampling.
Both update and sampling operations have a time complexity of :math:`\mathcal{O}(\log N)` and a
memory footprint of :math:`\mathcal{O}(N)`, where :math:`N` is the length of the underlying
:attr:`values`.
Parameters
----------
capacity : positive int
Number of values to accommodate.
reducer : function
The reducer function: :code:`(float, float) -> float`.
init_value : float
The unit element relative to the reducer function. Some typical examples are: 0 if
reducer is :func:`operator.add`, 1 for :func:`operator.mul`, :math:`-\infty` for
:func:`max`, :math:`\infty` for :func:`min`.
"""
def __init__(self, capacity, random_seed=None):
super().__init__(capacity=capacity, reducer=onp.add, init_value=0)
self.random_seed = random_seed
@property
def random_seed(self):
return self._random_seed
@random_seed.setter
def random_seed(self, new_random_seed):
self._rnd = onp.random.RandomState(new_random_seed)
self._random_seed = new_random_seed
def sample(self, n):
r"""
Sample array indices using weighted sampling, where the sample weights are proprotional to
the values stored in :attr:`values`.
Parameters
----------
n : positive int
The number of samples to return.
Returns
-------
idx : array of ints
The sampled indices, shape: (n,)
Warning
-------
This method presumes (but doesn't check) that all :attr:`values` stored in the tree are
non-negative.
"""
if not (isinstance(n, int) and n > 0):
raise TypeError("n must be a positive integer")
return self.inverse_cdf(self._rnd.rand(n))
def inverse_cdf(self, u):
r"""
Inverse of the cumulative distribution function (CDF) of the categorical distribution
:math:`\text{Cat}(p)`, where :math:`p` are the normalized values :math:`p_i=`
:attr:`values[i] / sum(values) <values>`.
This function provides the machinery for the :attr:`sample` method.
Parameters
----------
u : float or 1d array of floats
One of more numbers :math:`u\in[0,1]`. These are typically sampled from
:math:`\text{Unif([0, 1])}`.
Returns
-------
idx : array of ints
The indices associated with :math:`u`, shape: (n,)
Warning
-------
This method presumes (but doesn't check) that all :attr:`values` stored in the tree are
non-negative.
"""
# NOTE: This is an iterative implementation, which is a lot uglier than a recursive one.
# The reason why we use an iterative approach is that it's easier for batch-processing.
if self.root_value <= 0:
raise RuntimeError("the root_value must be positive")
# init (will be updated in loop)
u, isscalar = self._check_u(u)
values = u * self.root_value
idx = onp.zeros_like(values, dtype='int32') # this is ultimately what we'll returned
level_offset_parent = 0 # number of nodes in levels above parent
# iterate down, from the root to the leaves
for level in range(1, self.height):
# get child indices
level_offset = 2 ** level - 1
left_child_idx = (idx - level_offset_parent) * 2 + level_offset
right_child_idx = left_child_idx + 1
# update (idx, values, level_offset_parent)
left_child_values = self._arr[left_child_idx]
pick_left_child = left_child_values > values
idx = onp.where(pick_left_child, left_child_idx, right_child_idx)
values = onp.where(pick_left_child, values, values - left_child_values)
level_offset_parent = level_offset
idx = idx - level_offset_parent
return idx[0] if isscalar else idx
def _check_u(self, u):
""" some boilerplate to check validity of 'u' array """
isscalar = False
if isinstance(u, (float, int)):
u = onp.array([u], dtype='float')
isscalar = True
if isinstance(u, list) and all(isinstance(x, (float, int)) for x in u):
u = onp.asarray(u, dtype='float')
if not (isinstance(u, onp.ndarray)
and u.ndim == 1 and onp.issubdtype(u.dtype, onp.floating)):
raise TypeError("'u' must be a float or a 1d array of floats")
if onp.any(u > 1) or onp.any(u < 0):
raise ValueError("all values in 'u' must lie in the unit interval [0, 1]")
return u, isscalar
class MinTree(SegmentTree):
r"""
A min-tree data structure, which is a :class:`SegmentTree` whose reducer is :data:`minimum
<numpy.minimum>`.
Parameters
----------
capacity : positive int
Number of values to accommodate.
"""
def __init__(self, capacity):
super().__init__(capacity=capacity, reducer=onp.minimum, init_value=float('inf'))
class MaxTree(SegmentTree):
r"""
A max-tree data structure, which is a :class:`SegmentTree` whose reducer is :data:`maximum
<numpy.maximum>`.
Parameters
----------
capacity : positive int
Number of values to accommodate.
"""
def __init__(self, capacity):
super().__init__(capacity=capacity, reducer=onp.maximum, init_value=-float('inf'))
| 15,361 | 33.290179 | 99 | py |
null | coax-main/coax/utils/_segment_tree_test.py | import pytest
import numpy as onp
import pandas as pd
from ._segment_tree import SumTree, MinTree
@pytest.fixture
def sum_tree():
return SumTree(capacity=14)
@pytest.fixture
def min_tree():
tr = MinTree(capacity=8)
tr.set_values(..., onp.array([13, 7, 11, 17, 19, 5, 3, 23]))
return tr
def test_sum_tree_basic(sum_tree):
assert sum_tree.values.shape == (14,)
assert sum_tree.height == 5
assert sum_tree.values.sum() == sum_tree.root_value == 0
def test_min_tree_basic(min_tree):
assert min_tree.values.shape == (8,)
assert min_tree.height == 4
assert min_tree.values.min() == min_tree.root_value == 3
def test_set_values_none(sum_tree):
values = onp.arange(1, 15)
sum_tree.set_values(None, values)
assert sum_tree.values.sum() == sum_tree.root_value == values.sum() > 0
def test_set_values_ellipsis(sum_tree):
values = onp.arange(1, 15)
sum_tree.set_values(..., values)
assert sum_tree.values.sum() == sum_tree.root_value == values.sum() > 0
def test_set_values_with_idx(sum_tree):
idx = onp.array([2, 6, 5, 12, 13])
values = onp.array([7., 13., 11., 17., 5.])
sum_tree.set_values(idx, values)
assert sum_tree.values.sum() == sum_tree.root_value == values.sum() > 0
def test_partial_reduce_empty_range(sum_tree):
msg = r'inconsistent ranges detected from \(start, stop\) = \(1, 1\)'
with pytest.raises(IndexError, match=msg):
sum_tree.partial_reduce(1, 1)
def test_partial_reduce_all(sum_tree):
values = onp.arange(1, 15)
sum_tree.set_values(..., values)
assert sum_tree.partial_reduce() == sum_tree.values.sum() == sum_tree.root_value == values.sum()
@pytest.mark.parametrize('i,j', [(1, 2), (13, 14), (3, 8), (0, None), (0, 3), (7, None)])
def test_partial_reduce(sum_tree, i, j):
values = onp.arange(100, 114)
sum_tree.set_values(..., values)
assert sum_tree.partial_reduce(i, j) == sum_tree.values[i:j].sum() == values[i:j].sum() > 0
def test_partial_reduce_array_sum(sum_tree):
i, j = onp.array([0, 8, 3, 0, 0]), onp.array([1, 13, 14, 5, -1])
values = onp.arange(100, 114)
sum_tree.set_values(..., values)
expected = onp.vectorize(lambda i, j: values[i:j].sum())
onp.testing.assert_allclose(sum_tree.partial_reduce(i, j), expected(i, j))
def test_partial_reduce_array_min(min_tree):
i, j = onp.array([1, 6, 0]), onp.array([8, 7, 5])
expected = onp.vectorize(lambda i, j: min_tree.values[i:j].min())
onp.testing.assert_allclose(min_tree.partial_reduce(i, j), expected(i, j))
@pytest.mark.parametrize('s', [slice(0, 1), [0, -1], slice(6, 9), slice(None), 0, 10, -1])
def test_inverse_cdf(s):
tr = SumTree(capacity=8)
tr.set_values(..., onp.array([13, 7, 11, 17, 19, 5, 3, 23]))
df = pd.DataFrame(
columns=['uniform', 'idx', 'value'],
data=onp.array([
[0, 0, 13],
[5, 0, 13],
[12, 0, 13],
[13, 1, 7],
[14, 1, 7],
[19, 1, 7],
[20, 2, 11],
[25, 2, 11],
[30, 2, 11],
[31, 3, 17],
[40, 3, 17],
[47, 3, 17],
[48, 4, 19],
[50, 4, 19],
[66, 4, 19],
[67, 5, 5],
[70, 5, 5],
[71, 5, 5],
[72, 6, 3],
[73, 6, 3],
[74, 6, 3],
[75, 7, 23],
[80, 7, 23],
[97, 7, 23],
[98, 7, 23],
]))
df['uniform'] /= df['uniform'].max() # normalize to unit interval [0, 1]
actual = tr.inverse_cdf(df['uniform'].values[s])
expected = df['idx'].values[s]
onp.testing.assert_allclose(actual, expected)
def test_sample_distribution():
tr = SumTree(capacity=8, random_seed=13)
tr.set_values(..., onp.array([0, 7, 0, 17, 19, 5, 3, 0]))
# this also demonstrates how fast the batched implementation is
idx = tr.sample(n=1000000)
compare = pd.merge(
pd.Series(tr.values / tr.values.sum()).rename('expected'),
pd.Series(idx).value_counts(normalize=True).rename('empirical'),
left_index=True, right_index=True, how='left').fillna(0.)
print(compare)
onp.testing.assert_allclose(compare.empirical, compare.expected, rtol=1e-2)
| 4,278 | 29.564286 | 100 | py |
null | coax-main/coax/value_losses/__init__.py | r"""
Value Losses
============
.. autosummary::
:nosignatures:
coax.value_losses.mse
coax.value_losses.huber
coax.value_losses.logloss
coax.value_losses.logloss_sign
coax.value_losses.quantile_huber
----
This is a collection of loss functions that may be used for learning a value function. They are just
ordinary loss functions known from supervised learning.
Object Reference
----------------
.. autofunction:: coax.value_losses.mse
.. autofunction:: coax.value_losses.huber
.. autofunction:: coax.value_losses.logloss
.. autofunction:: coax.value_losses.logloss_sign
.. autofunction:: coax.value_losses.quantile_huber
"""
from ._losses import mse, huber, logloss, logloss_sign, quantile_huber
__all__ = (
'mse',
'huber',
'logloss',
'logloss_sign',
'quantile_huber'
)
| 825 | 19.146341 | 100 | py |
null | coax-main/coax/value_losses/_losses.py | import jax
import jax.numpy as jnp
__all__ = (
'mse',
'huber',
'logloss',
'logloss_sign',
)
def mse(y_true, y_pred, w=None):
r"""
Ordinary mean-squared error loss function.
.. math::
L\ =\ \frac12(\hat{y} - y)^2
.. image:: /_static/img/mse.svg
:alt: Mean-Squared Error loss
:width: 320px
:align: center
Parameters
----------
y_true : ndarray
The target :math:`y\in\mathbb{R}`.
y_pred : ndarray
The predicted output :math:`\hat{y}\in\mathbb{R}`.
w : ndarray, optional
Sample weights.
Returns
-------
loss : scalar ndarray
The loss averaged over the batch.
"""
loss = 0.5 * jnp.square(y_pred - y_true)
return _mean_with_weights(loss, w)
def huber(y_true, y_pred, w=None, delta=1.0):
r"""
`Huber <https://en.wikipedia.org/wiki/Huber_loss>`_ loss function.
.. math::
L\ =\ \left\{\begin{matrix}
(\hat{y} - y)^2
&\quad:\ |\hat{y} - y|\leq\delta \\
\delta\,|\hat{y} - y| - \frac{\delta^2}{2}
&\quad:\ |\hat{y} - y| > \delta
\end{matrix}\right.
.. image:: /_static/img/huber.svg
:alt: Huber loss
:width: 320px
:align: center
Parameters
----------
y_true : ndarray
The target :math:`y\in\mathbb{R}`.
y_pred : ndarray
The predicted output :math:`\hat{y}\in\mathbb{R}`.
w : ndarray, optional
Sample weights.
delta : float, optional
The scale of the quadratic-to-linear transition.
Returns
-------
loss : scalar ndarray
The loss averaged over the batch.
"""
err = jnp.abs(y_pred - y_true)
err_clipped = jnp.minimum(err, delta)
loss = 0.5 * jnp.square(err_clipped) + delta * (err - err_clipped)
return _mean_with_weights(loss, w)
def logloss(y_true, y_pred, w=None):
r"""
Logistic loss function for binary classification, `y_true` =
:math:`y\in\{0,1\}` and the model output is a probability `y_pred` =
:math:`\hat{y}\in[0,1]`:
.. math::
L\ =\ -y\log(\hat{y}) - (1 - y)\log(1 - \hat{y})
Parameters
----------
y_true : ndarray
The binary target, encoded as :math:`y\in\{0,1\}`.
y_pred : (ndarray of) float
The predicted output, represented by a probablity
:math:`\hat{y}\in[0,1]`.
w : ndarray, optional
Sample weights.
Returns
-------
loss : scalar ndarray
The loss averaged over the batch.
"""
loss = -y_true * jnp.log(y_pred) - (1. - y_true) * jnp.log(1. - y_pred)
return _mean_with_weights(loss, w)
def logloss_sign(y_true_sign, logits, w=None):
r"""
Logistic loss function specific to the case in which the target is a sign
:math:`y\in\{-1,1\}` and the model output is a logit
:math:`\hat{z}\in\mathbb{R}`.
.. math::
L\ =\ \log(1 + \exp(-y\,\hat{z}))
This version tends to be more numerically stable than the generic
implementation, because it avoids having to map the predicted logit to a
probability.
Parameters
----------
y_true_sign : ndarray
The binary target, encoded as :math:`y=\pm1`.
logits : ndarray
The predicted output, represented by a logit
:math:`\hat{z}\in\mathbb{R}`.
w : ndarray, optional
Sample weights.
Returns
-------
loss : scalar ndarray
The loss averaged over the batch.
"""
loss = jnp.log(1.0 + jnp.exp(-y_true_sign * logits))
return _mean_with_weights(loss, w)
def _mean_with_weights(loss, w):
if w is not None:
assert w.ndim == 1
assert loss.ndim >= 1
assert loss.shape[0] == w.shape[0]
loss = jax.vmap(jnp.multiply)(w, loss)
return jnp.mean(loss)
def quantile_huber(y_true, y_pred, quantiles, w=None, delta=1.0):
r"""
`Quantile Huber <https://arxiv.org/abs/1806.06923>`_ loss function.
.. math::
\delta_{ij} &= y_j - \hat{y}_i\\
\rho^\kappa_\tau(\delta_{ij}) &= |\tau - \mathbb{I}{\{ \delta_{ij} < 0 \}}| \
\frac{\mathcal{L}_\kappa(\delta_{ij})}{\kappa},\ \quad \text{with}\\
\mathcal{L}_\kappa(\delta_{ij}) &= \begin{cases}
\frac{1}{2} \delta_{ij}^2,\quad \ &\text{if } |\delta_{ij}| \le \kappa\\
\kappa (|\delta_{ij}| - \frac{1}{2}\kappa),\quad \ &\text{otherwise}
\end{cases}
Parameters
----------
y_true : ndarray
The target :math:`y\in\mathbb{R}^{2}`.
y_pred : ndarray
The predicted output :math:`\hat{y}\in\mathbb{R}^{2}`.
quantiles : ndarray
The quantiles of the prediction :math:`\tau\in\mathbb{R}^{2}`.
w : ndarray, optional
Sample weights.
delta : float, optional
The scale of the quadratic-to-linear transition.
Returns
-------
loss : scalar ndarray
The loss averaged over the batch.
"""
y_pred = y_pred[..., None]
y_true = y_true[..., None, :]
quantiles = quantiles[..., None]
td_error = y_true - y_pred
td_error_abs = jnp.abs(td_error)
err_clipped = jnp.minimum(td_error_abs, delta)
elementwise_huber_loss = 0.5 * jnp.square(err_clipped) + delta * (td_error_abs - err_clipped)
elementwise_quantile_huber_loss = jnp.abs(
quantiles - (td_error < 0)) * elementwise_huber_loss / delta
quantile_huber_loss = elementwise_quantile_huber_loss.sum(axis=-1)
return _mean_with_weights(quantile_huber_loss, w=w)
| 5,578 | 21.864754 | 97 | py |
null | coax-main/coax/value_transforms/__init__.py | r"""
Value Transforms
================
.. autosummary::
:nosignatures:
coax.value_transforms.ValueTransform
coax.value_transforms.LogTransform
----
This module contains some useful **value transforms**. These are functions
that can be used to rescale or warp the returns for more a more robust training
signal, see e.g. :class:`coax.value_transforms.LogTransform`.
Object Reference
----------------
.. autoclass:: coax.value_transforms.ValueTransform
.. autoclass:: coax.value_transforms.LogTransform
"""
from ._base import ValueTransform
from ._log_transform import LogTransform
__all__ = (
'ValueTransform',
'LogTransform',
)
| 659 | 18.411765 | 79 | py |
null | coax-main/coax/value_transforms/_base.py |
class ValueTransform:
r"""
Abstract base class for value transforms. See
:class:`coax.value_transforms.LogTransform` for a specific implementation.
"""
__slots__ = ('_transform_func', '_inverse_func')
def __init__(self, transform_func, inverse_func):
self._transform_func = transform_func
self._inverse_func = inverse_func
@property
def transform_func(self):
r"""
The transformation function :math:`x\mapsto y=f(x)`.
Parameters
----------
x : ndarray
The values in their original representation.
Returns
-------
y : ndarray
The values in their transformed representation.
"""
return self._transform_func
@property
def inverse_func(self):
r"""
The inverse transformation function :math:`y\mapsto x=f^{-1}(y)`.
Parameters
----------
y : ndarray
The values in their transformed representation.
Returns
-------
x : ndarray
The values in their original representation.
"""
return self._inverse_func
def __iter__(self):
return iter((self.transform_func, self.inverse_func))
| 1,263 | 20.423729 | 78 | py |
null | coax-main/coax/value_transforms/_log_transform.py | import jax.numpy as jnp
from ._base import ValueTransform
class LogTransform(ValueTransform):
r"""
A simple invertible log-transform.
.. math::
x\ \mapsto\ y\ =\ \lambda\,\text{sign}(x)\,
\log\left(1+\frac{|x|}{\lambda}\right)
with inverse:
.. math::
y\ \mapsto\ x\ =\ \lambda\,\text{sign}(y)\,
\left(\text{e}^{|y|/\lambda} - 1\right)
This transform logarithmically supresses large values :math:`|x|\gg1` and smoothly interpolates
to the identity transform for small values :math:`|x|\sim1` (see figure below).
.. image:: /_static/img/log_transform.svg
:alt: Invertible log-transform
:width: 640px
Parameters
----------
scale : positive float, optional
The scale :math:`\lambda>0` of the linear-to-log cross-over. Smaller
values for :math:`\lambda` translate into earlier onset of the
cross-over.
"""
__slots__ = ValueTransform.__slots__ + ('scale',)
def __init__(self, scale=1.0):
assert scale > 0
self.scale = scale
def transform_func(x):
return jnp.sign(x) * scale * jnp.log(1 + jnp.abs(x) / scale)
def inverse_func(x):
return jnp.sign(x) * scale * (jnp.exp(jnp.abs(x) / scale) - 1)
self._transform_func = transform_func
self._inverse_func = inverse_func
| 1,385 | 24.666667 | 99 | py |
null | coax-main/coax/value_transforms/_log_transform_test.py | import jax.numpy as jnp
from .._base.test_case import TestCase
from ._log_transform import LogTransform
class TestLogTransform(TestCase):
decimal = 5
def test_inverse(self):
f = LogTransform(scale=7)
# some consistency checks
values = jnp.array([-100, -10, -1, 0, 1, 10, 100], dtype='float32')
self.assertArrayAlmostEqual(
f.inverse_func(f.transform_func(values)), values)
| 429 | 25.875 | 75 | py |
null | coax-main/coax/wrappers/__init__.py | r"""
Wrappers
========
.. autosummary::
:nosignatures:
coax.wrappers.TrainMonitor
coax.wrappers.FrameStacking
coax.wrappers.BoxActionsToReals
coax.wrappers.BoxActionsToDiscrete
coax.wrappers.MetaPolicyEnv
----
Gymnasium provides a nice modular interface to extend existing using
`environment wrappers <https://gymnasium.farama.org/api/wrappers/>`_.
Here we list some wrappers that are used throughout the **coax** package.
The most notable wrapper that you'll probably want to use is
:class:`coax.wrappers.TrainMonitor`. It wraps the environment in a way that we
can view our training logs easily. It uses both the standard :py:mod:`logging`
module as well as tensorboard through the `tensorboardX
<https://tensorboardx.readthedocs.io/>`_ package.
Object Reference
----------------
.. autoclass:: coax.wrappers.TrainMonitor
.. autoclass:: coax.wrappers.FrameStacking
.. autoclass:: coax.wrappers.BoxActionsToReals
.. autoclass:: coax.wrappers.BoxActionsToDiscrete
.. autoclass:: coax.wrappers.MetaPolicyEnv
"""
from ._train_monitor import TrainMonitor
from ._frame_stacking import FrameStacking
from ._box_spaces import BoxActionsToReals, BoxActionsToDiscrete
from ._meta_policy import MetaPolicyEnv
__all__ = (
'TrainMonitor',
'FrameStacking',
'BoxActionsToReals',
'BoxActionsToDiscrete',
'MetaPolicyEnv',
)
| 1,366 | 25.288462 | 78 | py |
null | coax-main/coax/wrappers/_box_spaces.py | import gymnasium
import numpy as onp
from scipy.special import expit as sigmoid
from .._base.mixins import AddOrigToInfoDictMixin
__all__ = (
'BoxActionsToReals',
'BoxActionsToDiscrete',
)
class BoxActionsToReals(gymnasium.Wrapper, AddOrigToInfoDictMixin):
r"""
This wrapper decompactifies a :class:`Box <gymnasium.spaces.Box>` action space to the reals.
This is required in order to be able to use a Gaussian policy.
In practice, the wrapped environment expects the input action
:math:`a_\text{real}\in\mathbb{R}^n` and then it compactifies it back to a Box of the right
size:
.. math::
a_\text{box}\ =\ \text{low} + (\text{high}-\text{low})\times\text{sigmoid}(a_\text{real})
Technically, the transformed space is still a Box, but that's only because we assume that the
values lie between large but finite bounds, :math:`a_\text{real}\in[-10^{15}, 10^{15}]^n`.
"""
def __init__(self, env):
super().__init__(env)
if not isinstance(self.action_space, gymnasium.spaces.Box):
raise NotImplementedError("BoxActionsToReals is only implemented for Box action spaces")
shape_flat = onp.prod(self.env.action_space.shape),
self.action_space = gymnasium.spaces.Box(
low=onp.full(shape_flat, -1e15, self.env.action_space.dtype),
high=onp.full(shape_flat, 1e15, self.env.action_space.dtype))
def step(self, a):
assert self.action_space.contains(a)
self._a_orig = self._compactify(a)
s_next, r, done, truncated, info = super().step(self._a_orig)
self._add_a_orig_to_info_dict(info)
return s_next, r, done, truncated, info
def _compactify(self, action):
hi, lo = self.env.action_space.high, self.env.action_space.low
action = onp.clip(action, -1e15, 1e15)
action = onp.reshape(action, self.env.action_space.shape)
return lo + (hi - lo) * sigmoid(action)
class BoxActionsToDiscrete(gymnasium.Wrapper, AddOrigToInfoDictMixin):
r"""
This wrapper splits a :class:`Box <gymnasium.spaces.Box>` action space into bins. The resulting
action space is either :class:`Discrete <gymnasium.spaces.Discrete>` or :class:`MultiDiscrete
<gymnasium.spaces.MultiDiscrete>`, depending on the shape of the original action space.
Parameters
----------
num_bins : int or tuple of ints
The number of bins to use. A multi-dimenionsional box requires a tuple of num_bins instead
of a single integer.
random_seed : int, optional
Sets the random state to get reproducible results.
"""
def __init__(self, env, num_bins, random_seed=None):
super().__init__(env)
if not isinstance(self.action_space, gymnasium.spaces.Box):
raise NotImplementedError(
"BoxActionsToDiscrete is only implemented for Box action spaces")
self._rnd = onp.random.RandomState(random_seed)
self._init_action_space(num_bins) # also sets self._nvec and self._size
def step(self, a):
assert self.action_space.contains(a)
self._a_orig = self._discrete_to_box(a)
s_next, r, done, truncated, info = super().step(self._a_orig)
self._add_a_orig_to_info_dict(info)
return s_next, r, done, truncated, info
def _discrete_to_box(self, a_discrete):
hi, lo = self.env.action_space.high, self.env.action_space.low
a_flat = (a_discrete + self._rnd.rand(self._size)) / self._nvec
a_reshaped = onp.reshape(a_flat, self.env.action_space.shape)
a_rescaled = lo + a_reshaped * (hi - lo)
return a_rescaled
def _init_action_space(self, num_bins):
self._size = onp.prod(self.env.action_space.shape)
if isinstance(num_bins, int):
self._nvec = [num_bins] * self._size
elif isinstance(num_bins, tuple) and all(isinstance(i, int) for i in num_bins):
if len(num_bins) != self._size:
raise ValueError(
"len(num_bins) must be equal to the number of non-trivial dimensions: "
f"{self._size}")
self._nvec = onp.asarray(num_bins)
else:
raise TypeError("num_bins must an int or tuple of ints")
if self._size == 1:
self.action_space = gymnasium.spaces.Discrete(self._nvec[0])
else:
self.action_space = gymnasium.spaces.MultiDiscrete(self._nvec)
| 4,466 | 38.184211 | 100 | py |
null | coax-main/coax/wrappers/_box_spaces_test.py | from itertools import combinations
import gymnasium
import numpy as onp
from .._base.test_case import TestCase
from ._box_spaces import BoxActionsToDiscrete
class TestBoxActionsToDiscrete(TestCase):
def test_inverse(self):
num_bins = 100
env = gymnasium.make('BipedalWalker-v3')
env = BoxActionsToDiscrete(env, num_bins)
hi, lo = env.env.action_space.high, env.env.action_space.low
a_orig = env.env.action_space.sample()
# create discrete action
a_orig_rescaled = (a_orig - lo) / (hi - lo)
a_orig_flat = onp.ravel(a_orig_rescaled)
a_discrete = onp.asarray(num_bins * a_orig_flat, dtype='int8')
# reconstruct continuous action
a_reconstructed = env._discrete_to_box(a_discrete)
diff = onp.abs(a_reconstructed - a_orig) / (hi - lo)
print(diff)
self.assertTrue(onp.all(diff < 1 / num_bins))
def test_not_all_same(self):
env = gymnasium.make('BipedalWalker-v3')
env = BoxActionsToDiscrete(env, num_bins=10, random_seed=13)
a_discrete = onp.zeros(4)
a_box = env._discrete_to_box(a_discrete)
print(a_box)
for x, y in combinations(a_box, 2):
self.assertNotAlmostEqual(x, y)
| 1,259 | 29 | 70 | py |
null | coax-main/coax/wrappers/_frame_stacking.py | from collections import deque
import gymnasium
class FrameStacking(gymnasium.Wrapper):
r"""
Wrapper that does frame stacking (see `DQN paper
<https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf>`_).
This implementation is different from most implementations in that it doesn't perform the
stacking itself. Instead, it just returns a tuple of frames (untouched), which may be stacked
downstream.
The benefit of this implementation is two-fold. First, it respects the :mod:`gymnasium.spaces`
API, where each observation is truly an element of the observation space (this is not true of
the gymnasium implementation, which uses a custom data class to maintain its minimal memory
footprint). Second, this implementation is compatibility with the :mod:`jax.tree_util` module,
which means that we can feed it into jit-compiled functions directly.
Example
-------
.. code::
import gymnasium
env = gymnasium.make('PongNoFrameskip-v0')
print(env.observation_space) # Box(210, 160, 3)
env = FrameStacking(env, num_frames=2)
print(env.observation_space) # Tuple((Box(210, 160, 3), Box(210, 160, 3)))
Parameters
----------
env : gymnasium-style environment
The original environment to be wrapped.
num_frames : positive int
Number of frames to stack.
"""
def __init__(self, env, num_frames):
if not (isinstance(num_frames, int) and num_frames > 0):
raise TypeError(f"num_frames must be a positive int, got: {num_frames}")
super().__init__(env)
self.observation_space = gymnasium.spaces.Tuple((self.env.observation_space,) * num_frames)
self._frames = deque(maxlen=num_frames)
def step(self, action):
observation, reward, done, truncated, info = self.env.step(action)
self._frames.append(observation)
return tuple(self._frames), reward, done, truncated, info
def reset(self, **kwargs):
observation, info = self.env.reset(**kwargs)
self._frames.extend(observation for _ in range(self._frames.maxlen))
return tuple(self._frames), info # shallow copy
| 2,186 | 34.274194 | 99 | py |
null | coax-main/coax/wrappers/_meta_policy.py | import inspect
import gymnasium
class MetaPolicyEnv(gymnasium.Wrapper):
r"""
Wrap a gymnasium-style environment such that it may be used by a meta-policy,
i.e. a bandit that selects a policy (an *arm*), which is then used to
sample a lower-level action and fed the original environment. In other
words, the actions that the :attr:`step` method expects are *meta-actions*,
selecting different *arms*. The lower-level actions (and their
log-propensities) that are sampled internally are stored in the ``info``
dict, returned by the :attr:`step` method.
Parameters
----------
env : gymnasium-style environment
The original environment to be wrapped into a meta-policy env.
\*arms : functions
Callable objects that take a state observation :math:`s` and return an
action :math:`a` (and optionally, log-propensity :math:`\log\pi(a|s)`).
See for example :attr:`coax.Policy.__call__` or
:attr:`coax.Policy.mode`.
"""
def __init__(self, env, *arms):
super().__init__(env)
self.arms = arms
self.action_space = gymnasium.spaces.Discrete(len(arms))
self._s = None
def reset(self):
self._s, info = self.env.reset()
return self._s, info
def step(self, a_meta):
assert self.action_space.contains(a_meta), "a_meta is invalid"
assert self._s is not None, "please call env.reset() first"
pi = self.arms[a_meta]
if 'return_logp' in inspect.getargspec(pi).args:
a, logp = pi(self._s, return_logp=True)
else:
a, logp = pi(self._s), 0.
self._s, r, done, truncated, info = self.env.step(a)
info = info or {}
info.update({'a': a, 'logp': logp})
return self._s, r, done, truncated, info
| 1,824 | 32.181818 | 81 | py |
null | coax-main/coax/wrappers/_train_monitor.py | import os
import re
import datetime
import logging
import time
from collections import deque
from typing import Mapping
import numpy as np
import lz4.frame
import cloudpickle as pickle
from gymnasium import Wrapper
from gymnasium.spaces import Discrete
from tensorboardX import SummaryWriter
from .._base.mixins import LoggerMixin
from ..utils import enable_logging
__all__ = (
'TrainMonitor',
)
class StreamingSample:
def __init__(self, maxlen, random_seed=None):
self._deque = deque(maxlen=maxlen)
self._count = 0
self._rnd = np.random.RandomState(random_seed)
def reset(self):
self._deque = deque(maxlen=self.maxlen)
self._count = 0
def append(self, obj):
self._count += 1
if len(self) < self.maxlen:
self._deque.append(obj)
elif self._rnd.rand() < self.maxlen / self._count:
i = self._rnd.randint(self.maxlen)
self._deque[i] = obj
@property
def values(self):
return list(self._deque) # shallow copy
@property
def maxlen(self):
return self._deque.maxlen
def __len__(self):
return len(self._deque)
def __bool__(self):
return bool(self._deque)
class TrainMonitor(Wrapper, LoggerMixin):
r"""
Environment wrapper for monitoring the training process.
This wrapper logs some diagnostics at the end of each episode and it also gives us some handy
attributes (listed below).
Parameters
----------
env : gymnasium environment
A gymnasium environment.
tensorboard_dir : str, optional
If provided, TrainMonitor will log all diagnostics to be viewed in tensorboard. To view
these, point tensorboard to the same dir:
.. code:: bash
$ tensorboard --logdir {tensorboard_dir}
tensorboard_write_all : bool, optional
You may record your training metrics using the :attr:`record_metrics` method. Setting the
``tensorboard_write_all`` specifies whether to pass the metrics on to tensorboard
immediately (``True``) or to wait and average them across the episode (``False``). The
default setting (``False``) prevents tensorboard from being fluided by logs.
log_all_metrics : bool, optional
Whether to log all metrics. If ``log_all_metrics=False``, only a reduced set of metrics are
logged.
smoothing : positive int, optional
The number of observations for smoothing the metrics. We use the following smooth update
rule:
.. math::
n\ &\leftarrow\ \min(\text{smoothing}, n + 1) \\
x_\text{avg}\ &\leftarrow\ x_\text{avg}
+ \frac{x_\text{obs} - x_\text{avg}}{n}
\*\*logger_kwargs
Keyword arguments to pass on to :func:`coax.utils.enable_logging`.
Attributes
----------
T : positive int
Global step counter. This is not reset by ``env.reset()``, use ``env.reset_global()``
instead.
ep : positive int
Global episode counter. This is not reset by ``env.reset()``, use ``env.reset_global()``
instead.
t : positive int
Step counter within an episode.
G : float
The return, i.e. amount of reward accumulated from the start of the current episode.
avg_G : float
The average return G, averaged over the past 100 episodes.
dt_ms : float
The average wall time of a single step, in milliseconds.
"""
_COUNTER_ATTRS = (
'T', 'ep', 't', 'G', 'avg_G', '_n_avg_G', '_ep_starttime', '_ep_metrics', '_ep_actions',
'_tensorboard_dir', '_period')
def __init__(
self, env,
tensorboard_dir=None,
tensorboard_write_all=False,
log_all_metrics=False,
smoothing=10,
**logger_kwargs):
super().__init__(env)
self.log_all_metrics = log_all_metrics
self.tensorboard_write_all = tensorboard_write_all
self.smoothing = float(smoothing)
self.reset_global()
enable_logging(**logger_kwargs)
self.logger.setLevel(logger_kwargs.get('level', logging.INFO))
self._init_tensorboard(tensorboard_dir)
def reset_global(self):
r""" Reset the global counters, not just the episodic ones. """
self.T = 0
self.ep = 0
self.t = 0
self.G = 0.0
self.avg_G = 0.0
self._n_avg_G = 0.0
self._ep_starttime = time.time()
self._ep_metrics = {}
self._ep_actions = StreamingSample(maxlen=1000)
self._period = {'T': {}, 'ep': {}}
def reset(self):
# write logs from previous episode:
if self.ep:
self._write_episode_logs()
# increment global counters:
self.T += 1
self.ep += 1
# reset episodic counters:
self.t = 0
self.G = 0.0
self._ep_starttime = time.time()
self._ep_metrics = {}
self._ep_actions.reset()
return self.env.reset()
@property
def dt_ms(self):
if self.t <= 0:
return np.nan
return 1000 * (time.time() - self._ep_starttime) / self.t
@property
def avg_r(self):
if self.t <= 0:
return np.nan
return self.G / self.t
def step(self, a):
self._ep_actions.append(a)
s_next, r, done, truncated, info = self.env.step(a)
if info is None:
info = {}
info['monitor'] = {'T': self.T, 'ep': self.ep}
self.t += 1
self.T += 1
self.G += r
if done or truncated:
if self._n_avg_G < self.smoothing:
self._n_avg_G += 1.
self.avg_G += (self.G - self.avg_G) / self._n_avg_G
return s_next, r, done, truncated, info
def record_metrics(self, metrics):
r"""
Record metrics during the training process.
These are used to print more diagnostics.
Parameters
----------
metrics : dict
A dict of metrics, of type ``{name <str>: value <float>}``.
"""
if not isinstance(metrics, Mapping):
raise TypeError("metrics must be a Mapping")
# write metrics to tensoboard
if self.tensorboard is not None and self.tensorboard_write_all:
for name, metric in metrics.items():
self.tensorboard.add_scalar(
str(name), float(metric), global_step=self.T)
# compute episode averages
for k, v in metrics.items():
if k not in self._ep_metrics:
self._ep_metrics[k] = v, 1.
else:
x, n = self._ep_metrics[k]
self._ep_metrics[k] = x + v, n + 1
def get_metrics(self):
r"""
Return the current state of the metrics.
Returns
-------
metrics : dict
A dict of metrics, of type ``{name <str>: value <float>}``.
"""
return {k: float(x) / n for k, (x, n) in self._ep_metrics.items()}
def period(self, name, T_period=None, ep_period=None):
if T_period is not None:
T_period = int(T_period)
assert T_period > 0
if name not in self._period['T']:
self._period['T'][name] = 1
if self.T >= self._period['T'][name] * T_period:
self._period['T'][name] += 1
return True or self.period(name, None, ep_period)
return self.period(name, None, ep_period)
if ep_period is not None:
ep_period = int(ep_period)
assert ep_period > 0
if name not in self._period['ep']:
self._period['ep'][name] = 1
if self.ep >= self._period['ep'][name] * ep_period:
self._period['ep'][name] += 1
return True
return False
@property
def tensorboard(self):
if not hasattr(self, '_tensorboard'):
assert self._tensorboard_dir is not None
self._tensorboard = SummaryWriter(self._tensorboard_dir)
return self._tensorboard
def _init_tensorboard(self, tensorboard_dir):
if tensorboard_dir is None:
self._tensorboard_dir = None
self._tensorboard = None
return
# append timestamp to disambiguate instances
if not re.match(r'.*/\d{8}_\d{6}$', tensorboard_dir):
tensorboard_dir = os.path.join(
tensorboard_dir,
datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
# only set/update if necessary
if tensorboard_dir != getattr(self, '_tensorboard_dir', None):
self._tensorboard_dir = tensorboard_dir
if hasattr(self, '_tensorboard'):
del self._tensorboard
def _write_episode_logs(self):
metrics = (
f'{k:s}: {float(x) / n:.3g}'
for k, (x, n) in self._ep_metrics.items() if (
self.log_all_metrics
or str(k).endswith('/loss')
or str(k).endswith('/entropy')
or str(k).endswith('/kl_div')
or str(k).startswith('throughput/')
)
)
self.logger.info(
',\t'.join((
f'ep: {self.ep:d}',
f'T: {self.T:,d}',
f'G: {self.G:.3g}',
f'avg_r: {self.avg_r:.3g}',
f'avg_G: {self.avg_G:.3g}',
f't: {self.t:d}',
f'dt: {self.dt_ms:.3f}ms',
*metrics)))
if self.tensorboard is not None:
metrics = {
'episode/episode': self.ep,
'episode/avg_reward': self.avg_r,
'episode/return': self.G,
'episode/steps': self.t,
'episode/avg_step_duration_ms': self.dt_ms}
for name, metric in metrics.items():
self.tensorboard.add_scalar(
str(name), float(metric), global_step=self.T)
if self._ep_actions:
if isinstance(self.action_space, Discrete):
bins = np.arange(self.action_space.n + 1)
else:
bins = 'auto' # see also: np.histogram_bin_edges.__doc__
self.tensorboard.add_histogram(
tag='actions', values=self._ep_actions.values, global_step=self.T, bins=bins)
if self._ep_metrics and not self.tensorboard_write_all:
for k, (x, n) in self._ep_metrics.items():
self.tensorboard.add_scalar(str(k), float(x) / n, global_step=self.T)
self.tensorboard.flush()
def __getstate__(self):
state = self.__dict__.copy() # shallow copy
if '_tensorboard' in state:
del state['_tensorboard'] # remove reference to non-pickleable attr
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._init_tensorboard(state['_tensorboard_dir'])
def get_counters(self):
r"""
Get the current state of all internal counters.
Returns
-------
counter : dict
The dict that contains the counters.
"""
return {k: getattr(self, k) for k in self._COUNTER_ATTRS}
def set_counters(self, counters):
r"""
Restore the state of all internal counters.
Parameters
----------
counter : dict
The dict that contains the counters.
"""
if not (isinstance(counters, dict) and set(counters) == set(self._COUNTER_ATTRS)):
raise TypeError(f"invalid counters dict: {counters}")
self.__setstate__(counters)
def save_counters(self, filepath):
r"""
Store the current state of all internal counters.
Parameters
----------
filepath : str
The checkpoint file path.
"""
counters = self.get_counters()
os.makedirs(os.path.dirname(filepath) or '.', exist_ok=True)
with lz4.frame.open(filepath, 'wb') as f:
f.write(pickle.dumps(counters))
def load_counters(self, filepath):
r"""
Restore the state of all internal counters.
Parameters
----------
filepath : str
The checkpoint file path.
"""
with lz4.frame.open(filepath, 'rb') as f:
counters = pickle.loads(f.read())
self.set_counters(counters)
| 12,563 | 28.84323 | 99 | py |
null | coax-main/doc/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup ------------------------------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import re
import os
import sys
import logging
logger = logging.getLogger(__file__)
RE_VERSION = re.compile(r'^__version__ \= \'(\d+\.\d+\.\d+(?:\w+\d+)?)\'$', re.MULTILINE)
PROJECTDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, PROJECTDIR)
def get_release():
with open(os.path.join(PROJECTDIR, 'coax', '__init__.py')) as f:
version = re.search(RE_VERSION, f.read())
assert version is not None, "can't parse __version__ from __init__.py"
return version.group(1)
# -- Project information ---------------------------------------------------------------------------
project = 'coax'
copyright = '2020, Microsoft Corporation; 2021, github.com/coax-dev'
author = 'Kristian Holsheimer'
release = get_release()
# -- General configuration -------------------------------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'nbsphinx', # requirement: nbsphinx
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx_tabs.tabs', # requirement: sphinx-tabs
'sphinx_copybutton', # requirement: sphinx-copybutton
'notfound.extension', # requirement: sphinx-notfound-page
]
# autodoc settings
autodoc_default_options = {
'members': True,
'inherited-members': True,
'special-members': '__call__',
}
autodoc_member_order = 'groupwise' # by member type, falling back to bysource
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'examples/sandbox.ipynb', '**.ipynb_checkpoints', '_notebooks',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -----------------------------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# html_theme_path = ['_themes']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/img/coax_logo.png'
# html_favicon = '_static/img/favicon.ico'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# options for
html_theme_options = {
'canonical_url': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
# Toc options
'collapse_navigation': True,
'sticky_navigation': True,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False
}
# get google analytics tracking id from: https://analytics.google.com
# update the tracking id here: https://readthedocs.org/dashboard/coax/advanced/
# add GA_TRACKING_ID env var to: https://readthedocs.org/dashboard/coax/environmentvariables/
# format: GA4_TRACKING_ID=G-XXXXXXXXXX (or legacy tracking id: GA_TRACKING_ID=UA-XXXXXXX-1)
if os.environ.get('GA4_TRACKING_ID', '').startswith('G-'):
html_theme_options['analytics_id'] = os.environ['GA4_TRACKING_ID']
logger.info("added Google Analytics tracking ID to html_theme_options")
elif os.environ.get('GA_TRACKING_ID', '').startswith('UA-'):
html_theme_options['analytics_id'] = os.environ['GA_TRACKING_ID']
logger.info("added Google Analytics tracking ID to html_theme_options")
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# some extra html files to include as-is
html_extra_path = []
# verify that base url is linked to google account
if os.environ.get('GOOGLE_VERIFICATION_HASH'):
fn = 'google{GOOGLE_VERIFICATION_HASH}.html'.format(**os.environ)
with open(fn, 'w') as f:
f.write(f"google-site-verification: {fn}")
html_extra_path.append(fn)
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
html_css_files = [
'css/custom.css',
]
html_js_files = [
# 'js/custom.js',
]
# -- Options for HTMLHelp output -------------------------------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'coaxdoc'
# -- Options for LaTeX output ----------------------------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'coax.tex', 'coax Documentation',
'Kristian Holsheimer', 'manual'),
]
# -- Options for manual page output ----------------------------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'coax', 'coax Documentation',
[author], 1)
]
# -- Options for Texinfo output --------------------------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'coax', 'coax Documentation',
author, 'coax', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -----------------------------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Options for nbsphinx extension ----------------------------------------------------------------
# don't evaluate any cells in ipython notebooks
nbsphinx_execute = 'never'
# -- Options for intersphinx extension -------------------------------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', ('_intersphinx/python3.inv',)),
'numpy': ('https://numpy.org/doc/stable/', ('_intersphinx/numpy.inv',)),
'sklearn': ('https://scikit-learn.org/stable/', ('_intersphinx/sklearn.inv',)),
'jax': ('https://jax.readthedocs.io/en/latest/', ('_intersphinx/jax.inv',)),
'haiku': ('https://dm-haiku.readthedocs.io/en/latest/', ('_intersphinx/haiku.inv',)),
'spinup': ('https://spinningup.openai.com/en/latest/', ('_intersphinx/spinup.inv',)),
}
# -- Options for todo extension --------------------------------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for napolean extension ----------------------------------------------------------------
# Defaults:
# napoleon_google_docstring = True
# napoleon_numpy_docstring = True
# napoleon_include_init_with_doc = False
# napoleon_include_private_with_doc = False
# napoleon_include_special_with_doc = True
# napoleon_use_admonition_for_examples = False
# napoleon_use_admonition_for_notes = False
# napoleon_use_admonition_for_references = False
# napoleon_use_ivar = False
# napoleon_use_param = True
# napoleon_use_rtype = True
# Overrides:
napoleon_use_rtype = False
napoleon_use_ivar = True
# -- Options for napolean extension ----------------------------------------------------------------
# this strips common prefixes from the code before it's copied
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
# -- Automatically update jaxlib version number ----------------------------------------------------
try:
from jaxlib import __version__
with open('versions.html') as f:
filecontent = f.read()
filecontent_new = re.sub(
r'var jaxlibVersion = \'\d+\.\d+\.\d+\';',
f"var jaxlibVersion = '{__version__}';",
filecontent)
if filecontent_new != filecontent:
with open('versions.html', 'w') as f:
f.write(filecontent_new)
except ImportError:
pass
| 10,762 | 32.321981 | 100 | py |
null | coax-main/doc/create_notebooks.py | #!/usr/bin/env python3
import os
import json
import shutil
from glob import glob
from copy import deepcopy
PACKAGEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
nb_template = {
"cells": [
{
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": [
"%pip install git+https://github.com/coax-dev/coax.git@main --quiet",
]
},
{
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.2"
}
},
"colab": {
"name": None,
"provenance": []
},
"nbformat": 4,
"nbformat_minor": 2
}
sdl_videodriver_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": [
"# Run this cell to fix rendering errors.\n",
"import os\n",
"os.environ['SDL_VIDEODRIVER'] = 'dummy'",
]
}
tensorboard_cell = {
"cell_type": "code",
"execution_count": None,
"metadata": {},
"outputs": [],
"source": [
"%load_ext tensorboard\n",
"%tensorboard --logdir ./data/tensorboard",
]
}
for d_in in glob(os.path.join(PACKAGEDIR, 'doc', 'examples', '*')):
if not os.path.isdir(d_in) or 'search' in d_in:
continue
d_out = d_in.replace('examples', '_notebooks')
if os.path.exists(d_out):
shutil.rmtree(d_out)
os.makedirs(d_out)
for f_in in glob(f'{d_in}/*.py'):
f_out = f_in.replace('examples', '_notebooks').replace('.py', '.ipynb')
nb = deepcopy(nb_template)
with open(f_in) as r, open(f'{f_out}', 'w') as w:
lines = list(r)
nb['colab']['name'] = os.path.split(f_out)[1]
nb['cells'][-1]['source'] = lines
if any(("CartPole-v" in line or "Pendulum-v" in line) for line in lines):
nb['cells'].insert(1, sdl_videodriver_cell)
if any("tensorboard_dir=" in line for line in lines):
nb['cells'].insert(1, tensorboard_cell)
if 'atari' in f_in:
nb['accelerator'] = 'GPU'
json.dump(nb, w, indent=1)
print(f"converted: {f_in} --> {f_out}")
| 2,859 | 25.481481 | 85 | py |
null | coax-main/doc/versions.html | <style>
.version.matrix {
width: 100%;
}
.version.options {
column-fill: balance;
column-gap: 0;
text-align: center;
}
.version.title {
width: 25%;
padding: 10px 10px 10px 0px;
}
.version.option {
background: #E3E3E3;
padding: 10px 5px 10px 5px;
margin: 0px 1px;
}
.version.option:hover {
color: #FFF;
background: #A9A9A9;
}
.version.option.selected {
color: #FFF;
background: #676767;
}
.version.option.unavailable {
color: #D6D6D6;
background: #E3E3E3;
}
</style>
<p>
<table class="version matrix">
<tbody>
<tr>
<td class="version title os">OS:</td>
<td class="version options os" style="column-count: 2">
<div id="macosx_10_9_x86_64" class="version option os">Mac</div>
<div id="manylinux2010_x86_64" class="version option os selected">Linux</div>
<!-- if you add a row here, make sure to update 'column-count' as well -->
</td>
</tr>
<tr>
<td class="version title cuda">CUDA version:</td>
<td class="version options cuda" style="column-count: 5">
<div id="nocuda" class="version option cuda selected">none</div>
<div id="cuda101" class="version option cuda">10.1</div>
<div id="cuda102" class="version option cuda">10.2</div>
<div id="cuda110" class="version option cuda">11.0</div>
<div id="cuda111" class="version option cuda">11.1</div>
<!-- if you add a row here, make sure to update 'column-count' as well -->
</td>
</tr>
</tbody>
</table>
</p>
Command to run:
<div class="highlight-bash notranslate">
<div class="highlight">
<pre id="codecell0">
<span></span>$ pip install --upgrade jaxlib jax coax
</pre>
</div>
</div>
<script>
// removes from global namespace
(function() {
// init form
var osVersions = document.getElementsByClassName('version option os')
var pythonVersions = document.getElementsByClassName('version option python')
var cudaVersions = document.getElementsByClassName('version option cuda')
function selectOption(e) {
if (e.target.classList.contains('selected')) {
return;
}
// update selection
var options = document.getElementsByClassName(e.target.className);
for (var i=0, len=options.length; i<len; i++) {
options[i].classList.remove('selected')
}
e.target.classList.add('selected');
// select 'nocuda' if 'macos' is selected
if (document.getElementById('macosx_10_9_x86_64').classList.contains('selected') ) {
for (var i=0, len=cudaVersions.length; i<len; i++) {
if (cudaVersions[i].id === 'nocuda') {
cudaVersions[i].classList.add('selected');
} else {
cudaVersions[i].classList.remove('selected');
cudaVersions[i].classList.add('unavailable');
}
}
} else {
// make cuda versions available again if 'linux' is selected
for (var i=0, len=cudaVersions.length; i<len; i++) {
cudaVersions[i].classList.remove('unavailable');
}
}
// update the codecell with the installation command
updateCommand();
}
function updateCommand() {
var codecellName = 'codecell0';
var jaxlibVersion = '0.3.25'; // this is automatically updated from conf.py
// get the selected os version
var osVersion = null;
for (var i=0, len=osVersions.length; i<len; i++) {
if (osVersions[i].classList.contains('selected')) {
osVersion = osVersions[i].id;
break;
}
}
// get the selected cuda version
var cudaVersion = null;
for (var i=0, len=cudaVersions.length; i<len; i++) {
if (cudaVersions[i].classList.contains('selected')) {
cudaVersion = cudaVersions[i].id;
break;
}
}
var command = document.getElementById(codecellName);
if (cudaVersion === 'nocuda') {
command.innerHTML = "$ pip install --upgrade coax jax jaxlib";
} else {
command.innerHTML = `$ pip install --upgrade coax jax jaxlib==${jaxlibVersion}+${cudaVersion} -f https://storage.googleapis.com/jax-releases/jax_releases.html
`
}
}
// init
for (var i=0, len=osVersions.length; i<len; i++) {
osVersions[i].onclick = selectOption;
}
for (var i=0, len=pythonVersions.length; i<len; i++) {
pythonVersions[i].onclick = selectOption;
}
for (var i=0, len=cudaVersions.length; i<len; i++) {
cudaVersions[i].onclick = selectOption;
}
updateCommand();
}());
</script>
| 4,720 | 29.458065 | 170 | html |
null | coax-main/doc/_static/css/custom.css | body a {
font-weight: bold;
}
.math {
text-align: left;
}
.eqno {
float: right;
}
.keep-us-sustainable {
/* hide ads */
display: none !important;
}
.wy-nav-top {
max-width: 900px;
}
.wy-nav-content {
background: #fff;
max-width: 900px;
}
.rst-content dl:not(.docutils) dl dt strong {
padding-left: 6pt;
padding-right: 6pt;
}
.rst-content dl:not(.docutils) dl dt span {
color: #777;
}
.rst-content dl:not(.docutils) dl dt span.classifier {
padding-left: 6pt;
padding-right: 6pt;
}
.rst-content dl:not(.docutils) dl dt a.reference.internal span.xref.std.std-term {
color: #2980B9;
}
| 622 | 13.159091 | 82 | css |
null | coax-main/doc/examples/README.md | # Example Notebooks
To interact with these examples, please go to:
* https://coax.readthedocs.io
From there you can view them easily and open the scripts as Jupyter notebooks in Google Colab.
| 195 | 23.5 | 94 | md |
null | coax-main/doc/examples/sandbox.py | 0 | 0 | 0 | py |
|
null | coax-main/doc/examples/atari/apex_dqn.py | import os
os.environ['JAX_PLATFORM_NAME'] = 'cpu'
# os.environ['JAX_PLATFORM_NAME'] = 'gpu'
# os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
import gymnasium
import ray
import jax
import jax.numpy as jnp
import coax
import haiku as hk
import optax
# name of this script
name, _ = os.path.splitext(os.path.basename(__file__))
@ray.remote(num_cpus=1, num_gpus=0)
class ApexWorker(coax.Worker):
def __init__(self, name, param_store=None, tensorboard_dir=None):
env = make_env(name, tensorboard_dir)
# function approximator
self.q = coax.Q(forward_pass, env)
self.q_targ = self.q.copy()
# tracer and updater
self.q_updater = coax.td_learning.QLearning(
self.q, q_targ=self.q_targ, optimizer=optax.adam(3e-4))
# schedule for beta parameter used in PrioritizedReplayBuffer
self.buffer_beta = coax.utils.StepwiseLinearFunction((0, 0.4), (1000000, 1))
super().__init__(
env=env,
param_store=param_store,
pi=coax.BoltzmannPolicy(self.q, temperature=0.015),
tracer=coax.reward_tracing.NStep(n=1, gamma=0.99),
buffer=(
coax.experience_replay.PrioritizedReplayBuffer(capacity=1000000, alpha=0.6)
if param_store is None else None),
buffer_warmup=50000,
name=name)
def get_state(self):
return self.q.params, self.q.function_state, self.q_targ.params, self.q_targ.function_state
def set_state(self, state):
self.q.params, self.q.function_state, self.q_targ.params, self.q_targ.function_state = state
def trace(self, s, a, r, done_or_truncated, logp):
self.tracer.add(s, a, r, done_or_truncated, logp)
if done_or_truncated:
transition_batch = self.tracer.flush()
for chunk in coax.utils.chunks_pow2(transition_batch):
td_error = self.q_updater.td_error(chunk)
self.buffer_add(chunk, td_error)
def learn(self, transition_batch):
metrics, td_error = self.q_updater.update(transition_batch, return_td_error=True)
self.buffer_update(transition_batch.idx, td_error)
self.q_targ.soft_update(self.q, tau=0.001)
self.push_setattr('buffer.beta', self.buffer_beta(self.env.T))
return metrics
def make_env(name=None, tensorboard_dir=None):
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array') # AtariPreprocessing will do frame skipping
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=tensorboard_dir)
env.spec.reward_threshold = 19.
return env
def forward_pass(S, is_training):
seq = hk.Sequential((
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
hk.Linear(256), jax.nn.relu,
hk.Linear(make_env().action_space.n, w_init=jnp.zeros),
))
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
# settings
num_actors = 6
# start ray cluster
ray.init(num_cpus=(2 + num_actors), num_gpus=0)
# the central parameter store
param_store = ApexWorker.remote('param_store')
# concurrent rollout workers
actors = [
ApexWorker.remote(f'actor_{i}', param_store, f'data/tensorboard/apex_dqn/actor_{i}')
for i in range(num_actors)]
# one learner
learner = ApexWorker.remote('learner', param_store)
# block until one of the remote processes terminates
ray.wait([
learner.learn_loop.remote(max_total_steps=3000000),
*(actor.rollout_loop.remote(max_total_steps=3000000) for actor in actors)
])
| 3,880 | 31.613445 | 116 | py |
null | coax-main/doc/examples/atari/ddpg.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'ddpg'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array')
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def shared(S, is_training):
seq = hk.Sequential([
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
])
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
def func_pi(S, is_training):
logits = hk.Sequential((
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros),
))
X = shared(S, is_training)
return {'logits': logits(X)}
def func_q(S, A, is_training):
value = hk.Sequential((
hk.Linear(256), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
X = shared(S, is_training)
assert A.ndim == 2 and A.shape[1] == env.action_space.n, "actions must be one-hot encoded"
return value(jax.vmap(jnp.kron)(X, A))
# function approximators
pi = coax.Policy(func_pi, env)
q = coax.Q(func_q, env)
# target networks
pi_targ = pi.copy()
q_targ = q.copy()
# policy regularizer (avoid premature exploitation)
kl_div = coax.regularizers.KLDivRegularizer(pi, beta=0.001)
# updaters
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, optimizer=adam(3e-4))
determ_pg = coax.policy_objectives.DeterministicPG(pi, q, regularizer=kl_div, optimizer=adam(3e-4))
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.99)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=1000000)
while env.T < 3000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a, logp = pi(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done, logp)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) > 50000: # buffer warm-up
transition_batch = buffer.sample(batch_size=32)
env.record_metrics(determ_pg.update(transition_batch))
env.record_metrics(qlearning.update(transition_batch))
if env.period('target_model_sync', T_period=10000):
pi_targ.soft_update(pi, tau=1)
q_targ.soft_update(q, tau=1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 3,394 | 29.863636 | 99 | py |
null | coax-main/doc/examples/atari/dqn.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'dqn'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array') # AtariPreprocessing will do frame skipping
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func(S, is_training):
""" type-2 q-function: s -> q(s,.) """
seq = hk.Sequential((
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros),
))
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
# function approximator
q = coax.Q(func, env)
pi = coax.EpsilonGreedy(q, epsilon=1.)
# target network
q_targ = q.copy()
# updater
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, optimizer=adam(3e-4))
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.99)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=1000000)
# DQN exploration schedule (stepwise linear annealing)
epsilon = coax.utils.StepwiseLinearFunction((0, 1), (1000000, 0.1), (2000000, 0.01))
while env.T < 3000000:
s, info = env.reset()
pi.epsilon = epsilon(env.T)
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) > 50000: # buffer warm-up
metrics = qlearning.update(buffer.sample(batch_size=32))
env.record_metrics(metrics)
if env.T % 10000 == 0:
q_targ.soft_update(q, tau=1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 2,794 | 29.380435 | 112 | py |
null | coax-main/doc/examples/atari/dqn_boltzmann.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'dqn_boltzmann'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array') # AtariPreprocessing will do frame skipping
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func(S, is_training):
""" type-2 q-function: s -> q(s,.) """
seq = hk.Sequential((
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros),
))
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.015) # <--- different from standard DQN (ε-greedy)
# target network
q_targ = q.copy()
# updater
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, optimizer=adam(3e-4))
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.99)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=1000000)
while env.T < 3000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
# learn
if env.T % 4 == 0 and len(buffer) > 50000: # buffer warm-up
metrics = qlearning.update(buffer.sample(batch_size=32))
env.record_metrics(metrics)
if env.T % 10000 == 0:
q_targ.soft_update(q, tau=1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 2,705 | 30.103448 | 112 | py |
null | coax-main/doc/examples/atari/dqn_per.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'dqn_boltzmann_per'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array') # AtariPreprocessing will do frame skipping
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func(S, is_training):
""" type-2 q-function: s -> q(s,.) """
seq = hk.Sequential((
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros),
))
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.015) # <--- different from standard DQN (ε-greedy)
# target network
q_targ = q.copy()
# updater
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, optimizer=adam(3e-4))
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.99)
buffer = coax.experience_replay.PrioritizedReplayBuffer(capacity=1000000, alpha=0.6)
# schedule for the PER beta hyperparameter
beta = coax.utils.StepwiseLinearFunction((0, 0.4), (1000000, 1))
while env.T < 3000000:
s, info = env.reset()
buffer.beta = beta(env.T)
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
transition = tracer.pop()
buffer.add(transition, qlearning.td_error(transition))
# learn
if env.T % 4 == 0 and len(buffer) > 50000: # buffer warm-up
transition_batch = buffer.sample(batch_size=32)
metrics, td_error = qlearning.update(transition_batch, return_td_error=True)
buffer.update(transition_batch.idx, td_error)
env.record_metrics(metrics)
if env.T % 10000 == 0:
q_targ.soft_update(q, tau=1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 3,070 | 31.670213 | 112 | py |
null | coax-main/doc/examples/atari/dqn_soft.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'dqn_soft'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array') # AtariPreprocessing will do frame skipping
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func(S, is_training):
""" type-2 q-function: s -> q(s,.) """
seq = hk.Sequential((
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros),
))
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.015)
# target network
q_targ = q.copy()
# updater
soft_qlearning = coax.td_learning.SoftQLearning(
q, q_targ=q_targ, optimizer=adam(3e-4), temperature=pi.temperature)
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.99)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=1000000)
while env.T < 3000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
# learn
if env.T % 4 == 0 and len(buffer) > 50000: # buffer warm-up
metrics = soft_qlearning.update(buffer.sample(batch_size=32))
env.record_metrics(metrics)
if env.T % 10000 == 0:
q_targ.soft_update(q, tau=1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 2,700 | 29.693182 | 112 | py |
null | coax-main/doc/examples/atari/dqn_type1.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'dqn_type1'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array') # AtariPreprocessing will do frame skipping
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func(S, A, is_training):
""" type-1 q-function: (s,a) -> q(s,a) """
body = hk.Sequential((
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
))
head = hk.Sequential((
hk.Linear(256), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
X = jnp.stack(S, axis=-1) / 255. # stack frames
return head(jax.vmap(jnp.kron)(body(X), A))
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.015) # <--- different from standard DQN
# target network
q_targ = q.copy()
# updater
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, optimizer=adam(3e-4))
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.99)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=1000000)
while env.T < 3000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) > 50000: # buffer warm-up
metrics = qlearning.update(buffer.sample(batch_size=32))
env.record_metrics(metrics)
if env.T % 10000 == 0:
q_targ.soft_update(q, tau=1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 2,736 | 29.752809 | 112 | py |
null | coax-main/doc/examples/atari/ppo.py | import os
# set some env vars
os.environ.setdefault('JAX_PLATFORM_NAME', 'gpu') # tell JAX to use GPU
os.environ['XLA_PYTHON_CLIENT_MEM_FRACTION'] = '0.1' # don't use all gpu mem
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # tell XLA to be quiet
import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from optax import adam
# the name of this script
name = 'ppo'
# env with preprocessing
env = gymnasium.make('PongNoFrameskip-v4', render_mode='rgb_array')
env = gymnasium.wrappers.AtariPreprocessing(env)
env = coax.wrappers.FrameStacking(env, num_frames=3)
env = gymnasium.wrappers.TimeLimit(env, max_episode_steps=108000 // 3)
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def shared(S, is_training):
seq = hk.Sequential([
coax.utils.diff_transform,
hk.Conv2D(16, kernel_shape=8, stride=4), jax.nn.relu,
hk.Conv2D(32, kernel_shape=4, stride=2), jax.nn.relu,
hk.Flatten(),
])
X = jnp.stack(S, axis=-1) / 255. # stack frames
return seq(X)
def func_pi(S, is_training):
logits = hk.Sequential((
hk.Linear(256), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros),
))
X = shared(S, is_training)
return {'logits': logits(X)}
def func_v(S, is_training):
value = hk.Sequential((
hk.Linear(256), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
X = shared(S, is_training)
return value(X)
# function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# target networks
pi_behavior = pi.copy()
v_targ = v.copy()
# policy regularizer (avoid premature exploitation)
entropy = coax.regularizers.EntropyRegularizer(pi, beta=0.001)
# updaters
simpletd = coax.td_learning.SimpleTD(v, v_targ, optimizer=adam(3e-4))
ppo_clip = coax.policy_objectives.PPOClip(pi, regularizer=entropy, optimizer=adam(3e-4))
# reward tracer and replay buffer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.99)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=256)
# run episodes
while env.T < 3000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a, logp = pi_behavior(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done, logp)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= buffer.capacity:
num_batches = int(4 * buffer.capacity / 32) # 4 epochs per round
for _ in range(num_batches):
transition_batch = buffer.sample(32)
metrics_v, td_error = simpletd.update(transition_batch, return_td_error=True)
metrics_pi = ppo_clip.update(transition_batch, td_error)
env.record_metrics(metrics_v)
env.record_metrics(metrics_pi)
buffer.clear()
# sync target networks
pi_behavior.soft_update(pi, tau=0.1)
v_targ.soft_update(v, tau=0.1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 50000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, resize_to=(320, 420),
filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 3,521 | 29.362069 | 94 | py |
null | coax-main/doc/examples/atari/run_all.sh | #!/bin/bash
trap "kill 0" EXIT
gio trash -f ./data
for f in $(ls ./*.py); do
python3 $f &
done
wait
| 107 | 8.818182 | 25 | sh |
null | coax-main/doc/examples/atari/experiment/dqn_sqil.py | 0 | 0 | 0 | py |
|
null | coax-main/doc/examples/cartpole/a2c.py | import coax
import gymnasium
import haiku as hk
import jax
import jax.numpy as jnp
import optax
from coax.value_losses import mse
# the name of this script
name = 'a2c'
# the cart-pole MDP
env = gymnasium.make('CartPole-v0', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_pi(S, is_training):
logits = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros)
))
return {'logits': logits(S)}
def func_v(S, is_training):
value = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
return value(S)
# these optimizers collect batches of grads before applying updates
optimizer_v = optax.chain(optax.apply_every(k=32), optax.adam(0.002))
optimizer_pi = optax.chain(optax.apply_every(k=32), optax.adam(0.001))
# value function and its derived policy
v = coax.V(func_v, env)
pi = coax.Policy(func_pi, env)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updaters
vanilla_pg = coax.policy_objectives.VanillaPG(pi, optimizer=optimizer_pi)
simple_td = coax.td_learning.SimpleTD(v, loss_function=mse, optimizer=optimizer_v)
# train
for ep in range(1000):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# extend last reward as asymptotic best-case return
if truncated:
r = 1 / (1 - tracer.gamma) # gamma + gamma^2 + gamma^3 + ... = 1 / (1 - gamma)
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
metrics_v, td_error = simple_td.update(transition_batch, return_td_error=True)
metrics_pi = vanilla_pg.update(transition_batch, td_error)
env.record_metrics(metrics_v)
env.record_metrics(metrics_pi)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
coax.utils.generate_gif(env, policy=pi, filepath=f"./data/{name}.gif", duration=25)
| 2,386 | 26.436782 | 94 | py |
null | coax-main/doc/examples/cartpole/dqn.py | import coax
import gymnasium
import haiku as hk
import jax
import jax.numpy as jnp
from coax.value_losses import mse
from optax import adam
# the name of this script
name = 'dqn'
# the cart-pole MDP
env = gymnasium.make('CartPole-v0', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func(S, is_training):
""" type-2 q-function: s -> q(s,.) """
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(env.action_space.n, w_init=jnp.zeros)
))
return seq(S)
# value function and its derived policy
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# target network
q_targ = q.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=100000)
# updater
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, loss_function=mse, optimizer=adam(0.001))
# train
for ep in range(1000):
s, info = env.reset()
# pi.epsilon = max(0.01, pi.epsilon * 0.95)
# env.record_metrics({'EpsilonGreedy/epsilon': pi.epsilon})
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# extend last reward as asymptotic best-case return
if truncated:
r = 1 / (1 - tracer.gamma) # gamma + gamma^2 + gamma^3 + ... = 1 / (1 - gamma)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= 100:
transition_batch = buffer.sample(batch_size=32)
metrics = qlearning.update(transition_batch)
env.record_metrics(metrics)
# sync target network
q_targ.soft_update(q, tau=0.01)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
coax.utils.generate_gif(env, policy=pi, filepath=f"./data/{name}.gif", duration=25)
| 2,219 | 25.428571 | 98 | py |
null | coax-main/doc/examples/cartpole/iqn.py | import coax
import gymnasium
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as onp
from optax import adam
# the name of this script
name = 'iqn'
# the cart-pole MDP
env = gymnasium.make('CartPole-v0', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(
env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
quantile_embedding_dim = 64
layer_size = 256
num_quantiles = 32
def quantile_net(x, quantile_fractions):
quantiles_emb = coax.utils.quantile_cos_embedding(
quantile_fractions, quantile_embedding_dim)
quantiles_emb = hk.Linear(x.shape[-1])(quantiles_emb)
quantiles_emb = jax.nn.relu(quantiles_emb)
x = x[:, None, :] * quantiles_emb
x = hk.Linear(layer_size)(x)
x = jax.nn.relu(x)
return x
def func(S, A, is_training):
""" type-1 q-function: (s,a) -> q(s,a) """
encoder = hk.Sequential((
hk.Flatten(), hk.Linear(layer_size), jax.nn.relu
))
quantile_fractions = coax.utils.quantiles_uniform(rng=hk.next_rng_key(),
batch_size=S.shape[0],
num_quantiles=num_quantiles)
X = jnp.concatenate((S, A), axis=-1)
x = encoder(X)
quantile_x = quantile_net(x, quantile_fractions=quantile_fractions)
quantile_values = hk.Linear(1, w_init=jnp.zeros)(quantile_x)
return {'values': quantile_values.squeeze(axis=-1),
'quantile_fractions': quantile_fractions}
# quantile value function and its derived policy
q = coax.StochasticQ(func, env, num_bins=num_quantiles, value_range=None)
pi = coax.BoltzmannPolicy(q)
# target network
q_targ = q.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=100000)
# updater
qlearning = coax.td_learning.QLearning(q, q_targ=q_targ, optimizer=adam(1e-3))
# train
for ep in range(1000):
s, info = env.reset()
# pi.epsilon = max(0.01, pi.epsilon * 0.95)
# env.record_metrics({'EpsilonGreedy/epsilon': pi.epsilon})
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# extend last reward as asymptotic best-case return
if truncated:
r = 1 / (1 - tracer.gamma) # gamma + gamma^2 + gamma^3 + ... = 1 / (1 - gamma)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= 100:
transition_batch = buffer.sample(batch_size=32)
metrics = qlearning.update(transition_batch)
env.record_metrics(metrics)
# sync target network
q_targ.soft_update(q, tau=0.01)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
coax.utils.generate_gif(env, policy=pi, filepath=f"./data/{name}.gif", duration=25)
| 3,092 | 29.029126 | 91 | py |
null | coax-main/doc/examples/cartpole/model_based.py | import coax
import gymnasium
import jax.numpy as jnp
import haiku as hk
import optax
from coax.value_losses import mse
# the name of this script
name = 'model_based'
# the cart-pole MDP
env = gymnasium.make('CartPole-v0', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_v(S, is_training):
potential = hk.Sequential((jnp.square, hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
return -jnp.square(S[:, 3]) + potential(S[:, :3]) # kinetic term is angular velocity squared
def func_p(S, A, is_training):
dS = hk.Linear(4, w_init=jnp.zeros)
return S + dS(A)
def func_r(S, A, is_training):
return jnp.ones(S.shape[0]) # CartPole yields r=1 at every time step (no need to learn)
# function approximators
p = coax.TransitionModel(func_p, env)
v = coax.V(func_v, env, observation_preprocessor=p.observation_preprocessor)
r = coax.RewardFunction(func_r, env, observation_preprocessor=p.observation_preprocessor)
# composite objects
q = coax.SuccessorStateQ(v, p, r, gamma=0.9)
pi = coax.EpsilonGreedy(q, epsilon=0.) # no exploration
# reward tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=q.gamma)
# updaters
adam = optax.chain(optax.apply_every(k=16), optax.adam(1e-4))
simple_td = coax.td_learning.SimpleTD(v, loss_function=mse, optimizer=adam)
sgd = optax.sgd(1e-3, momentum=0.9, nesterov=True)
model_updater = coax.model_updaters.ModelUpdater(p, optimizer=sgd)
while env.T < 100000:
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
env.render()
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
env.record_metrics(simple_td.update(transition_batch))
env.record_metrics(model_updater.update(transition_batch))
if done or truncated:
break
s = s_next
# early stopping
if env.ep >= 5 and env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
coax.utils.generate_gif(env, policy=pi, filepath=f"./data/{name}.gif", duration=25)
| 2,228 | 26.518519 | 97 | py |
null | coax-main/doc/examples/cartpole/run_all.sh | #!/bin/bash
trap "kill 0" EXIT
gio trash -f ./data
for f in $(ls ./*.py); do
JAX_PLATFORM_NAME=cpu python3 $f &
done
wait
| 129 | 10.818182 | 38 | sh |
null | coax-main/doc/examples/dmc/run_all.sh | #!/bin/bash
trap "kill 0" EXIT
gio trash -f ./data
for f in $(ls ./*.py); do
JAX_PLATFORM_NAME=cpu python3 $f &
done
wait
| 129 | 10.818182 | 38 | sh |
null | coax-main/doc/examples/dmc/sac.py | import os
os.environ["MUJOCO_GL"] = "egl"
import coax
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as onp
import optax
from coax.utils import make_dmc
# the name of this script
name = 'sac'
# the dm_control MDP
env = make_dmc("walker", "walk")
env = coax.wrappers.TrainMonitor(env, name=name)
def func_pi(S, is_training):
seq = hk.Sequential((
hk.Linear(1024), hk.LayerNorm(-1, create_scale=True, create_offset=True), jax.nn.tanh,
hk.Linear(1024), jax.nn.relu,
hk.Linear(onp.prod(env.action_space.shape) * 2),
hk.Reshape((*env.action_space.shape, 2)),
))
x = seq(S)
mu, logvar = x[..., 0], x[..., 1]
return {'mu': mu, 'logvar': logvar}
def func_q(S, A, is_training):
seq = hk.Sequential((
hk.Linear(1024), hk.LayerNorm(-1, create_scale=True, create_offset=True), jax.nn.tanh,
hk.Linear(1024), jax.nn.relu,
hk.Linear(1), jnp.ravel
))
X = jnp.concatenate((S, A), axis=-1)
return seq(X)
# main function approximators
pi = coax.Policy(func_pi, env, proba_dist=coax.proba_dists.SquashedNormalDist(
env.action_space,
clip_logvar=(-10.0, 4.0),
))
q1 = coax.Q(func_q, env, action_preprocessor=pi.proba_dist.preprocess_variate)
q2 = coax.Q(func_q, env, action_preprocessor=pi.proba_dist.preprocess_variate)
# target network
q1_targ = q1.copy()
q2_targ = q2.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.99, record_extra_info=True)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=1000000)
policy_regularizer = coax.regularizers.NStepEntropyRegularizer(pi,
beta=0.2,
gamma=tracer.gamma,
n=[tracer.n])
# updaters (use current pi to update the q-functions and use sampled action in contrast to TD3)
qlearning1 = coax.td_learning.SoftClippedDoubleQLearning(
q1, pi_targ_list=[pi], q_targ_list=[q1_targ, q2_targ],
loss_function=coax.value_losses.mse, optimizer=optax.adam(1e-3),
policy_regularizer=policy_regularizer)
qlearning2 = coax.td_learning.SoftClippedDoubleQLearning(
q2, pi_targ_list=[pi], q_targ_list=[q1_targ, q2_targ],
loss_function=coax.value_losses.mse, optimizer=optax.adam(1e-3),
policy_regularizer=policy_regularizer)
soft_pg = coax.policy_objectives.SoftPG(pi, [q1_targ, q2_targ], optimizer=optax.adam(
1e-4), regularizer=policy_regularizer)
# train
while env.T < 1000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= 5000:
transition_batch = buffer.sample(batch_size=256)
# init metrics dict
metrics = {}
# flip a coin to decide which of the q-functions to update
qlearning = qlearning1 if jax.random.bernoulli(q1.rng) else qlearning2
metrics.update(qlearning.update(transition_batch))
# delayed policy updates
if env.T >= 7500 and env.T % 4 == 0:
metrics.update(soft_pg.update(transition_batch))
env.record_metrics(metrics)
# sync target networks
q1_targ.soft_update(q1, tau=0.005)
q2_targ.soft_update(q2, tau=0.005)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 5000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 3,943 | 31.595041 | 95 | py |
null | coax-main/doc/examples/frozen_lake/a2c.py | import coax
import jax
import jax.numpy as jnp
import gymnasium
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func_v(S, is_training):
value = hk.Sequential((hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
return value(S)
def func_pi(S, is_training):
logits = hk.Linear(env.action_space.n, w_init=jnp.zeros)
return {'logits': logits(S)}
# function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# target network
v_targ = v.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updaters
simple_td = coax.td_learning.SimpleTD(v, v_targ, optimizer=optax.adam(0.02))
vanillapg = coax.policy_objectives.VanillaPG(pi, optimizer=optax.adam(0.01))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a, logp = pi(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done, logp)
while tracer:
transition_batch = tracer.pop()
_, td_error = simple_td.update(transition_batch, return_td_error=True)
vanillapg.update(transition_batch, td_error)
# sync target network
v_targ.soft_update(v, tau=0.01)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# estimated state value
print(" v(s) = {:.3f}".format(v(s)))
# print individual action probabilities
params = pi.dist_params(s)
propensities = jax.nn.softmax(params['logits'])
for i, p in enumerate(propensities):
print(" π({:s}|s) = {:.3f}".format('LDRU'[i], p))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,313 | 21.910891 | 82 | py |
null | coax-main/doc/examples/frozen_lake/ddpg.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func_pi(S, is_training):
logits = hk.Linear(env.action_space.n, w_init=jnp.zeros)
return {'logits': logits(S)}
def func_q(S, A, is_training):
value = hk.Sequential((hk.Flatten(), hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return value(X)
# function approximators
pi = coax.Policy(func_pi, env)
q = coax.Q(func_q, env)
# target networks
q_targ = q.copy()
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=128)
# updaters
qlearning = coax.td_learning.QLearning(q, pi_targ, q_targ, optimizer=optax.adam(0.02))
determ_pg = coax.policy_objectives.DeterministicPG(pi, q, optimizer=optax.adam(0.01))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
if len(buffer) == buffer.capacity:
transition_batch = buffer.sample(batch_size=16)
determ_pg.update(transition_batch)
qlearning.update(transition_batch)
# sync copies
q_targ.soft_update(q, tau=0.01)
pi_targ.soft_update(pi, tau=0.01)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual action probabilities
params = pi.dist_params(s)
propensities = jax.nn.softmax(params['logits'])
for i, p in enumerate(propensities):
print(" π({:s}|s) = {:.3f}".format('LDRU'[i], p))
for i, q_ in enumerate(q(s)):
print(" q(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,594 | 23.252336 | 86 | py |
null | coax-main/doc/examples/frozen_lake/double_qlearning.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
value = hk.Sequential((hk.Flatten(), hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return value(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# target network
q_targ = q.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
qlearning = coax.td_learning.DoubleQLearning(q, q_targ=q_targ, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
qlearning.update(transition_batch)
# sync target network
q_targ.soft_update(q, tau=0.1)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual state-action values
for i, q_ in enumerate(q(s)):
print(" q(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 1,943 | 20.6 | 90 | py |
null | coax-main/doc/examples/frozen_lake/expected_sarsa.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
value = hk.Sequential((hk.Flatten(), hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return value(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
esarsa = coax.td_learning.ExpectedSarsa(q, pi, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
esarsa.update(transition_batch)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual state-action values
for i, q_ in enumerate(q(s)):
print(" q(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 1,809 | 20.807229 | 84 | py |
null | coax-main/doc/examples/frozen_lake/ppo.py | import coax
import jax
import jax.numpy as jnp
import gymnasium
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func_v(S, is_training):
value = hk.Sequential((hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
return value(S)
def func_pi(S, is_training):
logits = hk.Linear(env.action_space.n, w_init=jnp.zeros)
return {'logits': logits(S)}
# function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# create copies
pi_old = pi.copy() # behavior policy
v_targ = v.copy() # target network
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updaters
simple_td = coax.td_learning.SimpleTD(v, v_targ, optimizer=optax.adam(0.02))
ppo_clip = coax.policy_objectives.PPOClip(pi, optimizer=optax.adam(0.01))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a, logp = pi_old(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done, logp)
while tracer:
transition_batch = tracer.pop()
_, td_error = simple_td.update(transition_batch, return_td_error=True)
ppo_clip.update(transition_batch, td_error)
# sync target networks
v_targ.soft_update(v, tau=0.01)
pi_old.soft_update(pi, tau=0.01)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# estimated state value
print(" v(s) = {:.3f}".format(v(s)))
# print individual action probabilities
params = pi.dist_params(s)
propensities = jax.nn.softmax(params['logits'])
for i, p in enumerate(propensities):
print(" π({:s}|s) = {:.3f}".format('LDRU'[i], p))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,415 | 22.456311 | 82 | py |
null | coax-main/doc/examples/frozen_lake/qlearning.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
value = hk.Sequential((hk.Flatten(), hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return value(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
qlearning = coax.td_learning.QLearning(q, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
qlearning.update(transition_batch)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual state-action values
for i, q_ in enumerate(q(s)):
print(" q(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 1,807 | 20.783133 | 84 | py |
null | coax-main/doc/examples/frozen_lake/reinforce.py | import coax
import jax
import jax.numpy as jnp
import gymnasium
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func_pi(S, is_training):
logits = hk.Linear(env.action_space.n, w_init=jnp.zeros)
return {'logits': logits(S)}
# function approximators
pi = coax.Policy(func_pi, env)
# experience tracer
tracer = coax.reward_tracing.MonteCarlo(gamma=0.9)
# updater
vanilla_pg = coax.policy_objectives.VanillaPG(pi, optimizer=optax.adam(0.01))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
vanilla_pg.update(transition_batch, Adv=transition_batch.Rn)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual action probabilities
params = pi.dist_params(s)
propensities = jax.nn.softmax(params['logits'])
for i, p in enumerate(propensities):
print(" π({:s}|s) = {:.3f}".format('LDRU'[i], p))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 1,820 | 20.939759 | 77 | py |
null | coax-main/doc/examples/frozen_lake/run_all.sh | #!/bin/bash
trap "kill 0" EXIT
gio trash -f ./data
for f in $(ls ./*.py); do
JAX_PLATFORM_NAME=cpu python3 $f &
done
wait
| 129 | 10.818182 | 38 | sh |
null | coax-main/doc/examples/frozen_lake/sarsa.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
value = hk.Sequential((hk.Flatten(), hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return value(X)
# function approximator
q = coax.Q(func, env)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
sarsa = coax.td_learning.Sarsa(q, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
sarsa.update(transition_batch)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual state-action values
for i, q_ in enumerate(q(s)):
print(" q(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 1,795 | 20.638554 | 84 | py |
null | coax-main/doc/examples/frozen_lake/stochastic_double_qlearning.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
from matplotlib import pyplot as plt
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
logits = hk.Sequential((hk.Flatten(), hk.Linear(20, w_init=jnp.zeros)))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return {'logits': logits(X)}
# function approximator
q = coax.StochasticQ(func, env, value_range=(-1, 2), num_bins=20)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# target network
q_targ = q.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
qlearning = coax.td_learning.DoubleQLearning(q, q_targ=q_targ, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
qlearning.update(transition_batch)
# sync target network
q_targ.soft_update(q, tau=0.1)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# create sub-plots, one for each action
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 2))
action_names = ('Left', 'Down', 'Right', 'Up')
for action_name, ax, dist_params in zip(action_names, axes, q.dist_params(s)):
p = jax.nn.softmax(dist_params['logits'])
z = q.proba_dist.atoms
# plot histogram for this specific state-action pair
ax.bar(z, p, width=(z[1] - z[0]) * 0.9)
ax.set_title(f"a = {action_name}")
ax.set_ylim(0, 1)
ax.set_xlabel('Q(s, a)')
ax.set_yticks([])
plt.show()
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,469 | 22.75 | 90 | py |
null | coax-main/doc/examples/frozen_lake/stochastic_expected_sarsa.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
from matplotlib import pyplot as plt
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
logits = hk.Sequential((hk.Flatten(), hk.Linear(20, w_init=jnp.zeros)))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return {'logits': logits(X)}
# function approximator
q = coax.StochasticQ(func, env, value_range=(-1, 2), num_bins=20)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
esarsa = coax.td_learning.ExpectedSarsa(q, pi, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
esarsa.update(transition_batch)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# create sub-plots, one for each action
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 2))
action_names = ('Left', 'Down', 'Right', 'Up')
for action_name, ax, dist_params in zip(action_names, axes, q.dist_params(s)):
p = jax.nn.softmax(dist_params['logits'])
z = q.proba_dist.atoms
# plot histogram for this specific state-action pair
ax.bar(z, p, width=(z[1] - z[0]) * 0.9)
ax.set_title(f"a = {action_name}")
ax.set_ylim(0, 1)
ax.set_xlabel('Q(s, a)')
ax.set_yticks([])
plt.show()
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,335 | 23.082474 | 82 | py |
null | coax-main/doc/examples/frozen_lake/stochastic_qlearning.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
from matplotlib import pyplot as plt
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
logits = hk.Sequential((hk.Flatten(), hk.Linear(20, w_init=jnp.zeros)))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return {'logits': logits(X)}
# function approximator
q = coax.StochasticQ(func, env, value_range=(-1, 2), num_bins=20)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
sarsa = coax.td_learning.QLearning(q, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
sarsa.update(transition_batch)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# create sub-plots, one for each action
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 2))
action_names = ('Left', 'Down', 'Right', 'Up')
for action_name, ax, dist_params in zip(action_names, axes, q.dist_params(s)):
p = jax.nn.softmax(dist_params['logits'])
z = q.proba_dist.atoms
# plot histogram for this specific state-action pair
ax.bar(z, p, width=(z[1] - z[0]) * 0.9)
ax.set_title(f"a = {action_name}")
ax.set_ylim(0, 1)
ax.set_xlabel('Q(s, a)')
ax.set_yticks([])
plt.show()
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,325 | 22.979381 | 82 | py |
null | coax-main/doc/examples/frozen_lake/stochastic_sarsa.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
from matplotlib import pyplot as plt
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func(S, A, is_training):
logits = hk.Sequential((hk.Flatten(), hk.Linear(20, w_init=jnp.zeros)))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return {'logits': logits(X)}
# function approximator
q = coax.StochasticQ(func, env, value_range=(-1, 2), num_bins=20)
pi = coax.BoltzmannPolicy(q, temperature=0.1)
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
# updater
sarsa = coax.td_learning.Sarsa(q, optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
transition_batch = tracer.pop()
sarsa.update(transition_batch)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# create sub-plots, one for each action
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(16, 2))
action_names = ('Left', 'Down', 'Right', 'Up')
for action_name, ax, dist_params in zip(action_names, axes, q.dist_params(s)):
p = jax.nn.softmax(dist_params['logits'])
z = q.proba_dist.atoms
# plot histogram for this specific state-action pair
ax.bar(z, p, width=(z[1] - z[0]) * 0.9)
ax.set_title(f"a = {action_name}")
ax.set_ylim(0, 1)
ax.set_xlabel('Q(s, a)')
ax.set_yticks([])
plt.show()
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 2,321 | 22.938144 | 82 | py |
null | coax-main/doc/examples/frozen_lake/td3.py | import coax
import gymnasium
import jax
import jax.numpy as jnp
import haiku as hk
import optax
# the MDP
env = gymnasium.make('FrozenLakeNonSlippery-v0')
env = coax.wrappers.TrainMonitor(env)
def func_pi(S, is_training):
logits = hk.Linear(env.action_space.n, w_init=jnp.zeros)
return {'logits': logits(S)}
def func_q(S, A, is_training):
value = hk.Sequential((hk.Flatten(), hk.Linear(1, w_init=jnp.zeros), jnp.ravel))
X = jax.vmap(jnp.kron)(S, A) # S and A are one-hot encoded
return value(X)
# function approximators
pi = coax.Policy(func_pi, env)
q1 = coax.Q(func_q, env)
q2 = coax.Q(func_q, env)
# target networks
q1_targ = q1.copy()
q2_targ = q2.copy()
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=1, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=128)
# updaters
determ_pg = coax.policy_objectives.DeterministicPG(pi, q1_targ, optimizer=optax.adam(0.02))
qlearning1 = coax.td_learning.ClippedDoubleQLearning(
q1, q_targ_list=[q1_targ, q2_targ], optimizer=optax.adam(0.02))
qlearning2 = coax.td_learning.ClippedDoubleQLearning(
q2, q_targ_list=[q1_targ, q2_targ], optimizer=optax.adam(0.02))
# train
for ep in range(500):
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# small incentive to keep moving
if jnp.array_equal(s_next, s):
r = -0.01
# update
tracer.add(s, a, r, done or truncated)
while tracer:
buffer.add(tracer.pop())
if len(buffer) == buffer.capacity:
transition_batch = buffer.sample(batch_size=16)
# flip a coin to decide which of the q-functions to update
qlearning = qlearning1 if jax.random.bernoulli(q1.rng) else qlearning2
qlearning.update(transition_batch)
# delayed policy updates
if env.T % 2 == 0:
determ_pg.update(transition_batch)
# sync copies
q1_targ.soft_update(q1, tau=0.1)
q2_targ.soft_update(q2, tau=0.1)
pi_targ.soft_update(pi, tau=0.1)
if done or truncated:
break
s = s_next
# early stopping
if env.avg_G > env.spec.reward_threshold:
break
# run env one more time to render
s, info = env.reset()
env.render()
for t in range(env.spec.max_episode_steps):
# print individual action probabilities
params = pi.dist_params(s)
propensities = jax.nn.softmax(params['logits'])
for i, p in enumerate(propensities):
print(" π({:s}|s) = {:.3f}".format('LDRU'[i], p))
for i, q_ in enumerate(q1(s)):
print(" q1(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
for i, q_ in enumerate(q2(s)):
print(" q2(s,{:s}) = {:.3f}".format('LDRU'[i], q_))
a = pi.mode(s)
s, r, done, truncated, info = env.step(a)
env.render()
if done or truncated:
break
if env.avg_G < env.spec.reward_threshold:
name = globals().get('__file__', 'this script')
raise RuntimeError(f"{name} failed to reach env.spec.reward_threshold")
| 3,176 | 25.256198 | 91 | py |
null | coax-main/doc/examples/linear_regression/haiku.py | import jax
import jax.numpy as jnp
import haiku as hk
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
# create our dataset
X, y = make_regression(n_features=3)
X, X_test, y, y_test = train_test_split(X, y)
# params are defined *implicitly* in haiku
def forward(X):
lin = hk.Linear(1)
return lin(X).ravel()
# a transformed haiku function consists of an 'init' and an 'apply' function
forward = hk.transform(forward, apply_rng=False)
# initialize parameters
rng = jax.random.PRNGKey(seed=13)
params = forward.init(rng, X)
# redefine 'forward' as the 'apply' function
forward = forward.apply
def loss_fn(params, X, y):
err = forward(params, X) - y
return jnp.mean(jnp.square(err)) # mse
grad_fn = jax.grad(loss_fn)
def update(params, grads):
return jax.tree_map(lambda p, g: p - 0.05 * g, params, grads)
# the main training loop
for _ in range(50):
loss = loss_fn(params, X_test, y_test)
print(loss)
grads = grad_fn(params, X, y)
params = update(params, grads)
| 1,062 | 20.693878 | 76 | py |
null | coax-main/doc/examples/linear_regression/jax.py | import jax
import jax.numpy as jnp
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
# create our dataset
X, y = make_regression(n_features=3)
X, X_test, y, y_test = train_test_split(X, y)
# model weights
params = {
'w': jnp.zeros(X.shape[1:]),
'b': 0.
}
def forward(params, X):
return jnp.dot(X, params['w']) + params['b']
def loss_fn(params, X, y):
err = forward(params, X) - y
return jnp.mean(jnp.square(err)) # mse
grad_fn = jax.grad(loss_fn)
def update(params, grads):
return jax.tree_map(lambda p, g: p - 0.05 * g, params, grads)
# the main training loop
for _ in range(50):
loss = loss_fn(params, X_test, y_test)
print(loss)
grads = grad_fn(params, X, y)
params = update(params, grads)
| 797 | 18 | 65 | py |
null | coax-main/doc/examples/pendulum/ddpg.py | import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from numpy import prod
import optax
# the name of this script
name = 'ddpg'
# the Pendulum MDP
env = gymnasium.make('Pendulum-v1', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_pi(S, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
mu = seq(S)
return {'mu': mu, 'logvar': jnp.full_like(mu, -10)} # (almost) deterministic
def func_q(S, A, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
X = jnp.concatenate((S, A), axis=-1)
return seq(X)
# main function approximators
pi = coax.Policy(func_pi, env)
q = coax.Q(func_q, env, action_preprocessor=pi.proba_dist.preprocess_variate)
# target network
q_targ = q.copy()
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=25000)
# updaters
qlearning = coax.td_learning.QLearning(
q, pi_targ, q_targ, loss_function=coax.value_losses.mse, optimizer=optax.adam(1e-3))
determ_pg = coax.policy_objectives.DeterministicPG(pi, q_targ, optimizer=optax.adam(1e-4))
# action noise
noise = coax.utils.OrnsteinUhlenbeckNoise(mu=0., sigma=0.2, theta=0.15)
# train
while env.T < 1000000:
s, info = env.reset()
noise.reset()
noise.sigma *= 0.99 # slowly decrease noise scale
for t in range(env.spec.max_episode_steps):
a = noise(pi(s))
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= 5000:
transition_batch = buffer.sample(batch_size=128)
metrics = {'OrnsteinUhlenbeckNoise/sigma': noise.sigma}
metrics.update(determ_pg.update(transition_batch))
metrics.update(qlearning.update(transition_batch))
env.record_metrics(metrics)
# sync target networks
q_targ.soft_update(q, tau=0.001)
pi_targ.soft_update(pi, tau=0.001)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 5000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 2,903 | 26.923077 | 94 | py |
null | coax-main/doc/examples/pendulum/dsac.py | import gymnasium
import jax
import coax
import haiku as hk
import jax.numpy as jnp
from numpy import prod
import optax
# the name of this script
name = 'dsac'
# the Pendulum MDP
env = gymnasium.make('Pendulum-v1', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
quantile_embedding_dim = 64
layer_size = 256
num_quantiles = 32
def func_pi(S, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape) * 2, w_init=jnp.zeros),
hk.Reshape((*env.action_space.shape, 2)),
))
x = seq(S)
mu, logvar = x[..., 0], x[..., 1]
return {'mu': mu, 'logvar': logvar}
def quantile_net(x, quantile_fractions):
quantiles_emb = coax.utils.quantile_cos_embedding(
quantile_fractions, quantile_embedding_dim)
quantiles_emb = hk.Linear(x.shape[-1])(quantiles_emb)
quantiles_emb = jax.nn.relu(quantiles_emb)
x = x[:, None, :] * quantiles_emb
x = hk.Linear(layer_size)(x)
x = jax.nn.relu(x)
return x
def func_q(S, A, is_training):
encoder = hk.Sequential((
hk.Flatten(),
hk.Linear(layer_size),
jax.nn.relu
))
quantile_fractions = coax.utils.quantiles_uniform(rng=hk.next_rng_key(),
batch_size=S.shape[0],
num_quantiles=num_quantiles)
X = jnp.concatenate((S, A), axis=-1)
x = encoder(X)
quantile_x = quantile_net(x, quantile_fractions=quantile_fractions)
quantile_values = hk.Linear(1)(quantile_x)
return {'values': quantile_values.squeeze(axis=-1),
'quantile_fractions': quantile_fractions}
# main function approximators
pi = coax.Policy(func_pi, env)
q1 = coax.StochasticQ(func_q, env, action_preprocessor=pi.proba_dist.preprocess_variate,
value_range=None, num_bins=num_quantiles)
q2 = coax.StochasticQ(func_q, env, action_preprocessor=pi.proba_dist.preprocess_variate,
value_range=None, num_bins=num_quantiles)
# target network
q1_targ = q1.copy()
q2_targ = q2.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.9, record_extra_info=True)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=50000)
alpha = 0.2
policy_regularizer = coax.regularizers.NStepEntropyRegularizer(pi,
beta=alpha / tracer.n,
gamma=tracer.gamma,
n=[tracer.n])
# updaters (use current pi to update the q-functions and use sampled action in contrast to TD3)
qlearning1 = coax.td_learning.SoftClippedDoubleQLearning(
q1, pi_targ_list=[pi], q_targ_list=[q1_targ, q2_targ],
loss_function=coax.value_losses.mse, optimizer=optax.adam(3e-4),
policy_regularizer=policy_regularizer)
qlearning2 = coax.td_learning.SoftClippedDoubleQLearning(
q2, pi_targ_list=[pi], q_targ_list=[q1_targ, q2_targ],
loss_function=coax.value_losses.mse, optimizer=optax.adam(3e-4),
policy_regularizer=policy_regularizer)
soft_pg = coax.policy_objectives.SoftPG(pi, [q1_targ, q2_targ], optimizer=optax.adam(
1e-3), regularizer=coax.regularizers.NStepEntropyRegularizer(pi,
beta=alpha / tracer.n,
gamma=tracer.gamma,
n=jnp.arange(tracer.n)))
# train
while env.T < 1000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a = pi(s)
s_next, r, done, truncated, info = env.step(a)
# trace rewards and add transition to replay buffer
tracer.add(s, a, r, done)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= 5000:
transition_batch = buffer.sample(batch_size=256)
# init metrics dict
metrics = {}
# flip a coin to decide which of the q-functions to update
qlearning = qlearning1 if jax.random.bernoulli(q1.rng) else qlearning2
metrics.update(qlearning.update(transition_batch))
# delayed policy updates
if env.T >= 7500 and env.T % 4 == 0:
metrics.update(soft_pg.update(transition_batch))
env.record_metrics(metrics)
# sync target networks
q1_targ.soft_update(q1, tau=0.005)
q2_targ.soft_update(q2, tau=0.005)
if done or truncated:
break
s = s_next
generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 5000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 5,106 | 34.713287 | 95 | py |
null | coax-main/doc/examples/pendulum/ppo.py | import gymnasium
import jax
import jax.numpy as jnp
import coax
import haiku as hk
from numpy import prod
import optax
# the name of this script
name = 'ppo'
# the Pendulum MDP
env = gymnasium.make('Pendulum-v1', render_mode='rgb_array')
env = coax.wrappers.TrainMonitor(env, name=name, tensorboard_dir=f"./data/tensorboard/{name}")
def func_pi(S, is_training):
shared = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
))
mu = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
logvar = hk.Sequential((
shared,
hk.Linear(8), jax.nn.relu,
hk.Linear(prod(env.action_space.shape), w_init=jnp.zeros),
hk.Reshape(env.action_space.shape),
))
return {'mu': mu(S), 'logvar': logvar(S)}
def func_v(S, is_training):
seq = hk.Sequential((
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(8), jax.nn.relu,
hk.Linear(1, w_init=jnp.zeros), jnp.ravel
))
return seq(S)
# define function approximators
pi = coax.Policy(func_pi, env)
v = coax.V(func_v, env)
# target network
pi_targ = pi.copy()
# experience tracer
tracer = coax.reward_tracing.NStep(n=5, gamma=0.9)
buffer = coax.experience_replay.SimpleReplayBuffer(capacity=512)
# policy regularizer (avoid premature exploitation)
policy_reg = coax.regularizers.EntropyRegularizer(pi, beta=0.01)
# updaters
simpletd = coax.td_learning.SimpleTD(v, optimizer=optax.adam(1e-3))
ppo_clip = coax.policy_objectives.PPOClip(pi, regularizer=policy_reg, optimizer=optax.adam(1e-4))
# train
while env.T < 1000000:
s, info = env.reset()
for t in range(env.spec.max_episode_steps):
a, logp = pi_targ(s, return_logp=True)
s_next, r, done, truncated, info = env.step(a)
# trace rewards
tracer.add(s, a, r, done or truncated, logp)
while tracer:
buffer.add(tracer.pop())
# learn
if len(buffer) >= buffer.capacity:
for _ in range(int(4 * buffer.capacity / 32)): # 4 passes per round
transition_batch = buffer.sample(batch_size=32)
metrics_v, td_error = simpletd.update(transition_batch, return_td_error=True)
metrics_pi = ppo_clip.update(transition_batch, td_error)
env.record_metrics(metrics_v)
env.record_metrics(metrics_pi)
buffer.clear()
pi_targ.soft_update(pi, tau=0.1)
if done or truncated:
break
s = s_next
# generate an animated GIF to see what's going on
if env.period(name='generate_gif', T_period=10000) and env.T > 5000:
T = env.T - env.T % 10000 # round to 10000s
coax.utils.generate_gif(
env=env, policy=pi, filepath=f"./data/gifs/{name}/T{T:08d}.gif")
| 2,956 | 26.896226 | 97 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.