response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Less-than function for backends that do not override <. | def lt(*args, **kwargs):
"""Less-than function for backends that do not override <."""
return backend()['lt'](*args, **kwargs) |
Identity on the forward pass but 0 (no gradient) on the backward pass. | def stop_gradient(*args, **kwargs):
"""Identity on the forward pass but 0 (no gradient) on the backward pass."""
return backend()['stop_gradient'](*args, **kwargs) |
Just-In-Time compiles the given function for use on accelerators. | def jit(*args, **kwargs):
"""Just-In-Time compiles the given function for use on accelerators."""
global _disable_jit
if _disable_jit:
return args[0] # jit(f, **unused_now_jit_kwargs) = f
return backend()['jit'](*args, **kwargs) |
Disables JIT-compilation; helpful for debugging. | def disable_jit():
"""Disables JIT-compilation; helpful for debugging."""
global _disable_jit
_disable_jit = True |
Vectorizes the specified function (returns a function). | def vmap(*args, **kwargs):
"""Vectorizes the specified function (returns a function)."""
return backend()['vmap'](*args, **kwargs) |
Computes the gradient of the specified function (returns a function). | def grad(*args, **kwargs):
"""Computes the gradient of the specified function (returns a function)."""
return backend()['grad'](*args, **kwargs) |
Computes the gradient of the specified function together with the value. | def value_and_grad(*args, **kwargs):
"""Computes the gradient of the specified function together with the value."""
if 'value_and_grad' in backend():
return backend()['value_and_grad'](*args, **kwargs)
grad_fn = grad(*args, **kwargs)
fn = args[0]
has_aux = False
if has_aux in kwargs:
has_aux = kwargs['has_aux']
if not has_aux:
def val_and_grad(*fn_args, **fn_kwargs):
return fn(*fn_args, **fn_kwargs), grad_fn(*fn_args, **fn_kwargs)
return val_and_grad
def val_and_grad_aux(*fn_args, **fn_kwargs):
g, aux = grad_fn(*fn_args, **fn_kwargs)
res, _ = fn(*fn_args, **fn_kwargs)
return (res, aux), g
return val_and_grad_aux |
Computes the vector-Jacobian product for the specified function. | def vjp(*args, **kwargs):
"""Computes the vector-Jacobian product for the specified function."""
return backend()['vjp'](*args, **kwargs) |
Set a custom gradient computation (override the default) for a function. | def custom_grad(*args, **kwargs):
"""Set a custom gradient computation (override the default) for a function."""
return backend()['custom_grad'](*args, **kwargs) |
Set a custom vjp computation (override the default) for a function. | def custom_vjp(f, f_fwd, f_bwd, nondiff_argnums=()):
"""Set a custom vjp computation (override the default) for a function."""
# Call backend custom_vjp if it exists.
# TODO(lukaszkaiser): unify the APIs and remove nondiff_argnums altogether.
if 'custom_vjp' in backend():
return backend()['custom_vjp'](f, f_fwd, f_bwd)
# Check that nondiff_argnums is (0, 1, ..., N) for some N.
# Currently we only support nondiff_argnums at the front.
counter = -1
for i in nondiff_argnums:
counter += 1
if i != counter:
raise ValueError('Currently we only support custom_vjps with all nondiff'
'_argnums up front, like (0,) or (0, 1) but not (1,) or'
' (1, 2). Found: %s' % str(nondiff_argnums))
# Use custom_grad.
if counter == -1: # no non-diff args
def f_vjp(*args):
out, residual = f_fwd(*args)
def vjpfn(g):
return f_bwd(residual, g)
return out, vjpfn
return backend()['custom_grad'](f_vjp, f)
# Handle non-diff args by closure.
def f_joint(*args):
"""This function takes all args, first counter+1 are non-diff ones."""
nondiff_args = list(args[:counter+1])
def f_diff(*diff_args): # Takes only diff args, will define custom grad.
args = nondiff_args + list(diff_args)
return f(*args)
def f_vjp(*diff_args): # Custom VJP for diff args.
args = nondiff_args + list(diff_args)
out, residual = f_fwd(*args)
def vjpfn(g):
bwd_args = [residual, g]
res = f_bwd(*bwd_args)
return res[counter+1:]
return out, vjpfn
# This is the function taking only diff args with custom vjp.
f_diff_vjp = backend()['custom_grad'](f_vjp, f_diff)
# Call it on the diff args.
return f_diff_vjp(*args[counter+1:])
return f_joint |
Parallel-map to apply a function on multiple accelerators in parallel. | def pmap(*args, **kwargs):
"""Parallel-map to apply a function on multiple accelerators in parallel."""
return backend()['pmap'](*args, **kwargs) |
Parallel-sum to use within a pmap'd function for aggregation. | def psum(*args, **kwargs):
"""Parallel-sum to use within a pmap'd function for aggregation."""
return backend()['psum'](*args, **kwargs) |
Evaluates function just on signatures of parameters, return signatures. | def abstract_eval(*args, **kwargs):
"""Evaluates function just on signatures of parameters, return signatures."""
return backend()['abstract_eval'](*args, **kwargs) |
Convert a tf.data.Dataset to a stream of numpy arrays. | def dataset_as_numpy(*args, **kwargs):
"""Convert a tf.data.Dataset to a stream of numpy arrays."""
if 'dataset_as_numpy' in backend():
return backend()['dataset_as_numpy'](*args, **kwargs)
return JAX_BACKEND['dataset_as_numpy'](*args, **kwargs) |
Return the number of accelerators (GPUs or TPUs) in all hosts. | def global_device_count(*args, **kwargs):
"""Return the number of accelerators (GPUs or TPUs) in all hosts."""
return backend()['global_device_count'](*args, **kwargs) |
Return the number of accelerators (GPUs or TPUs) available on this host. | def local_device_count(*args, **kwargs):
"""Return the number of accelerators (GPUs or TPUs) available on this host."""
return backend()['local_device_count'](*args, **kwargs) |
Sets the default backend to use in Trax. | def set_backend(name):
"""Sets the default backend to use in Trax."""
if name:
_assert_valid_backend_name(name)
global default_backend
default_backend = name |
Returns the backend used to provide fastmath ops ('tf' or 'jax'). | def backend(name='jax'):
"""Returns the backend used to provide fastmath ops ('tf' or 'jax')."""
if override_backend:
return _get_backend_from_string(override_backend)
if default_backend:
return _get_backend_from_string(default_backend)
if isinstance(name, Backend):
return _backend_dict[name]
# name is a string.
return _get_backend_from_string(name) |
Call fastmath functions with a specified backend. | def use_backend(name):
"""Call fastmath functions with a specified backend."""
if isinstance(name, Backend):
name = name.value
_assert_valid_backend_name(name)
global override_backend
prev_name_or_backend = override_backend
override_backend = name
# Run the decorated function in try-finally in case it throws, e.g. for tests.
try:
yield
finally:
override_backend = prev_name_or_backend |
Returns the name of the backend currently in use ('tf' or 'jax'). | def backend_name():
"""Returns the name of the backend currently in use ('tf' or 'jax')."""
return backend()['name'] |
Returns a function that evaluates `f` given input shapes and dtypes.
It transforms function `f` to a function that performs the same computation as
`f` but only on shapes and dtypes (a.k.a. shape inference).
Args:
f: the function to be transformed.
Returns:
A function whose input arguments can be either the same as `f`'s or only
their shapes/dtypes represented by `ShapeDtype`, and whose return values are
`ShapeDtype`s with the same nested structure as `f`'s return values. | def tf_abstract_eval(f):
"""Returns a function that evaluates `f` given input shapes and dtypes.
It transforms function `f` to a function that performs the same computation as
`f` but only on shapes and dtypes (a.k.a. shape inference).
Args:
f: the function to be transformed.
Returns:
A function whose input arguments can be either the same as `f`'s or only
their shapes/dtypes represented by `ShapeDtype`, and whose return values are
`ShapeDtype`s with the same nested structure as `f`'s return values.
"""
f_shape = tf_np_extensions.eval_on_shapes(f)
def from_shape_type(x):
if isinstance(x, ShapeDtype):
return tf.TensorSpec(x.shape, x.dtype)
else:
return x
def to_shape_type(x): # pylint: disable=missing-docstring
# TODO(wangpeng): handle partial output shapes using `tf.shape`.
def to_numpy_shape(s):
if s.is_fully_defined():
return tuple(s.as_list())
else:
raise ValueError("The output shapes (%s) of the dry-run'ed function are"
' not fully defined.' % s)
def to_numpy_dtype(t):
return np.dtype(t.as_numpy_dtype)
if isinstance(x, tf.TensorSpec):
return ShapeDtype(to_numpy_shape(x.shape), to_numpy_dtype(x.dtype))
else:
return x
def f_return(*args):
args = tf.nest.map_structure(from_shape_type, args)
res = f_shape(*args)
return tf.nest.map_structure(to_shape_type, res)
return f_return |
Sample uniform random values in [minval, maxval) with given shape/dtype.
Args:
key: a PRNGKey used as the random key.
shape: a tuple of nonnegative integers representing the shape.
minval: int or array of ints broadcast-compatible with ``shape``, a minimum
(inclusive) value for the range.
maxval: int or array of ints broadcast-compatible with ``shape``, a maximum
(exclusive) value for the range.
dtype: optional, an int dtype for the returned values (default int32).
Returns:
A random array with the specified shape and dtype. | def tf_randint(key, shape, minval, maxval, dtype=np.int32):
"""Sample uniform random values in [minval, maxval) with given shape/dtype.
Args:
key: a PRNGKey used as the random key.
shape: a tuple of nonnegative integers representing the shape.
minval: int or array of ints broadcast-compatible with ``shape``, a minimum
(inclusive) value for the range.
maxval: int or array of ints broadcast-compatible with ``shape``, a maximum
(exclusive) value for the range.
dtype: optional, an int dtype for the returned values (default int32).
Returns:
A random array with the specified shape and dtype.
"""
return tf_np_extensions.uniform(key, shape, minval=minval, maxval=maxval,
dtype=dtype) |
Grad with support for argnums. | def _tf_grad(f, **kwargs):
"""Grad with support for argnums."""
argnums = kwargs.pop('argnums', 0)
if argnums != 0:
def g(*args, **kwargs):
args = list(args)
args[0], args[argnums] = args[argnums], args[0]
return f(*args, **kwargs)
else:
g = f
grad_g = tf_np_extensions.grad(g, **kwargs)
if argnums == 0:
return grad_g
def grad_f(*args, **kwargs):
args = list(args)
args[0], args[argnums] = args[argnums], args[0]
return grad_g(*args, **kwargs)
return grad_f |
Equivalent of jax.random.fold_in. | def _fold_in(rng, d):
"""Equivalent of jax.random.fold_in."""
# TODO(lukaszkaiser): verify that this function has good randomness
# properties or switch to an implementation equivalent to JAX.
_, rng = tf_np_extensions.split(rng + tf_np.sum(d).astype(tf_np.int64), 2)
return rng |
Computes the mean of a distributed value ``x``.
Args:
n_devices: Number of devices.
x: Distributed array.
axis: Axis along which to compute means; can only be ``0`` or ``None``.
Returns:
A local array. | def mean_or_pmean(n_devices, x, axis=None):
"""Computes the mean of a distributed value ``x``.
Args:
n_devices: Number of devices.
x: Distributed array.
axis: Axis along which to compute means; can only be ``0`` or ``None``.
Returns:
A local array.
"""
if fastmath.backend_name() == 'tensorflow-numpy' and n_devices > 1:
if axis not in (None, 0):
raise ValueError('axis can only be None or 0')
x = fastmath.pmap(fastmath.psum)(x)[0] / n_devices
if axis is None:
x = jnp.mean(x)
return x
else:
return jnp.mean(x, axis=axis) |
Returns a JIT-compiled forward function running on ``n_devices``. | def jit_forward(forward, n_devices, do_mean=True):
"""Returns a JIT-compiled forward function running on ``n_devices``."""
model_predict = _accelerate(forward, n_devices)
# n_devices == 0 => CPU
if n_devices < 2:
return model_predict
def predict(x, weights, state, rng):
"""Predict function JIT-compiled and parallelized as requested."""
res, state = model_predict(
reshape_by_device(x, n_devices),
weights,
state,
jnp.stack(fastmath.random.split(rng, n_devices)))
res = _combine_devices(res)
if do_mean:
return fastmath.nested_map(
lambda y: mean_or_pmean(n_devices, y, axis=0), res), state
else:
return res, state
return predict |
Combines multi-device tensors into a single batch. | def _combine_devices(x_tuple):
"""Combines multi-device tensors into a single batch."""
def f(x):
if len(x.shape) < 2:
return x # No extra batch dimension: use devices as batch, so return.
batch_size = x.shape[0] * x.shape[1]
return jnp.reshape(x, [batch_size] + list(x.shape[2:]))
return fastmath.nested_map(f, x_tuple) |
Returns an accelerated version of ``f`` running on ``n_devices``. | def _accelerate(f, n_devices):
"""Returns an accelerated version of ``f`` running on ``n_devices``."""
if n_devices == 0: # no accelerators - run on CPU
return fastmath.jit(f, device=jax.local_devices(backend='cpu')[0])
if n_devices == 1:
return fastmath.jit(f)
return fastmath.pmap(f, axis_name='batch') |
Reshapes possibly nested ``x`` into a shape ``(n_devices, ...)``. | def reshape_by_device(x, n_devices, pure_np=False):
"""Reshapes possibly nested ``x`` into a shape ``(n_devices, ...)``."""
def f(x):
x_shape = list(x.shape)
batch_size = x_shape[0]
batch_size_per_device = batch_size // n_devices
if batch_size_per_device * n_devices != batch_size:
raise ValueError(f'Number of devices ({n_devices}) does not evenly '
f'divide batch size ({batch_size}).')
new_shape_prefix = [n_devices, batch_size_per_device]
if pure_np:
return np.reshape(x, new_shape_prefix + x_shape[1:])
else:
return jnp.reshape(x, new_shape_prefix + x_shape[1:])
return fastmath.nested_map(f, x) |
Replicates/broadcasts ``x`` for ``n_devices``. | def for_n_devices(x, n_devices):
"""Replicates/broadcasts ``x`` for ``n_devices``."""
def f(x):
if n_devices > 1 and fastmath.is_backend(fastmath.Backend.JAX):
return jax.device_put_replicated(x, jax.local_devices())
elif n_devices > 1:
return jnp.broadcast_to(x, (n_devices,) + jnp.asarray(x).shape)
else:
return x
return fastmath.nested_map(f, x) |
Puts ``x`` in CPU memory in JAX. | def on_cpu(x):
"""Puts ``x`` in CPU memory in JAX."""
if fastmath.is_backend(fastmath.Backend.JAX):
return jax.device_put(x, jax.local_devices(backend='cpu')[0])
else:
return x |
Puts ``x`` in (single) accelerator memory in JAX. | def on_accelerator(x):
"""Puts ``x`` in (single) accelerator memory in JAX."""
try:
accelerator_devices = jax.devices('gpu')
except RuntimeError:
try:
accelerator_devices = jax.devices('tpu')
except RuntimeError:
accelerator_devices = []
if not accelerator_devices:
return x
if len(accelerator_devices) != 1:
return x
return jax.device_put(x, accelerator_devices[0]) |
Returns a layer that computes the Rectified Linear Unit (ReLU) function.
.. math::
f(x) = \left\{ \begin{array}{cl}
0 & \text{if}\ x \leq 0, \\
x & \text{otherwise}.
\end{array} \right. | def Relu():
r"""Returns a layer that computes the Rectified Linear Unit (ReLU) function.
.. math::
f(x) = \left\{ \begin{array}{cl}
0 & \text{if}\ x \leq 0, \\
x & \text{otherwise}.
\end{array} \right.
"""
return Fn('Relu', lambda x: jnp.where(x <= 0, jnp.zeros_like(x), x)) |
Returns a layer that computes a ReLU function with the given slope.
.. math::
f(x) = \left\{ \begin{array}{cl}
0 & \text{if}\ x \leq 0, \\
ax & \text{otherwise}.
\end{array} \right.
Args:
a: Slope of line for positive inputs. | def ParametricRelu(a=1.):
r"""Returns a layer that computes a ReLU function with the given slope.
.. math::
f(x) = \left\{ \begin{array}{cl}
0 & \text{if}\ x \leq 0, \\
ax & \text{otherwise}.
\end{array} \right.
Args:
a: Slope of line for positive inputs.
"""
return Fn('ParametricRelu', lambda x: jnp.maximum(a * x, jnp.zeros_like(x))) |
Returns a ReLU-like layer with linear nonzero outputs for negative inputs.
.. math::
f(x) = \left\{ \begin{array}{cl}
ax & \text{if}\ x \leq 0, \\
x & \text{otherwise}.
\end{array} \right.
Args:
a: Slope of line for negative inputs. | def LeakyRelu(a=0.01):
r"""Returns a ReLU-like layer with linear nonzero outputs for negative inputs.
.. math::
f(x) = \left\{ \begin{array}{cl}
ax & \text{if}\ x \leq 0, \\
x & \text{otherwise}.
\end{array} \right.
Args:
a: Slope of line for negative inputs.
"""
return Fn('LeakyRelu', lambda x: jnp.where(x >= 0, x, a * x)) |
Returns a ReLU-like layer with exponential outputs for negative inputs.
.. math::
f(x) = \left\{ \begin{array}{cl}
a \cdot (e^x - 1) & \text{if}\ x \leq 0, \\
x & \text{otherwise}.
\end{array} \right.
(Asymptotically, :math:`f(x)\rightarrow -a` as :math:`x\rightarrow - \infty`.)
Args:
a: Coefficient multiplying the exponential, for negative inputs. | def Elu(a=1.):
r"""Returns a ReLU-like layer with exponential outputs for negative inputs.
.. math::
f(x) = \left\{ \begin{array}{cl}
a \cdot (e^x - 1) & \text{if}\ x \leq 0, \\
x & \text{otherwise}.
\end{array} \right.
(Asymptotically, :math:`f(x)\rightarrow -a` as :math:`x\rightarrow - \infty`.)
Args:
a: Coefficient multiplying the exponential, for negative inputs.
"""
return Fn('Elu', lambda x: jnp.where(x > 0, x, a * jnp.expm1(x))) |
Returns an `Elu`-like layer with an additional scaling/slope parameter.
.. math::
f(x) = \left\{ \begin{array}{cl}
\lambda \cdot \alpha \cdot (e^x - 1) & \text{if}\ x \leq 0, \\
\lambda \cdot x & \text{otherwise}.
\end{array} \right.
Args:
alpha: Coefficient multiplying the exponential, for negative inputs.
lmbda: Coefficient scaling the whole function. | def Selu(alpha=1.6732632423543772848170429916717,
lmbda=1.0507009873554804934193349852946):
r"""Returns an `Elu`-like layer with an additional scaling/slope parameter.
.. math::
f(x) = \left\{ \begin{array}{cl}
\lambda \cdot \alpha \cdot (e^x - 1) & \text{if}\ x \leq 0, \\
\lambda \cdot x & \text{otherwise}.
\end{array} \right.
Args:
alpha: Coefficient multiplying the exponential, for negative inputs.
lmbda: Coefficient scaling the whole function.
"""
return Fn('Selu', lambda x: lmbda * jnp.where(x > 0, x, alpha * jnp.expm1(x))) |
Returns a layer that computes the Gaussian Error Linear Unit function.
.. math::
f(x) = \frac{x}{2} \cdot (1 + \hbox{erf}(\frac{x}{\sqrt{2}})) | def Gelu():
r"""Returns a layer that computes the Gaussian Error Linear Unit function.
.. math::
f(x) = \frac{x}{2} \cdot (1 + \hbox{erf}(\frac{x}{\sqrt{2}}))
"""
return Fn('Gelu', lambda x: x * 0.5 * (1.0 + fastmath.erf(x / jnp.sqrt(2.0)))) |
Returns a layer that computes a fast approximation to `Gelu`.
.. math::
f(x) = \frac{x}{2} \cdot (1 + \tanh(ax + abx^3))
where :math:`a = 0.7978845608` and :math:`b = 0.044715`. | def FastGelu():
r"""Returns a layer that computes a fast approximation to `Gelu`.
.. math::
f(x) = \frac{x}{2} \cdot (1 + \tanh(ax + abx^3))
where :math:`a = 0.7978845608` and :math:`b = 0.044715`.
"""
def f(x): # pylint: disable=invalid-name
return 0.5 * x * (1 + jnp.tanh(x * 0.7978845608 * (1 + 0.044715 * x * x)))
return Fn('FastGelu', f) |
Returns a layer that computes the sigmoid function.
.. math::
f(x) = \frac{1}{1 + e^{-x}} | def Sigmoid():
r"""Returns a layer that computes the sigmoid function.
.. math::
f(x) = \frac{1}{1 + e^{-x}}
"""
return Fn('Sigmoid', lambda x: fastmath.expit(x)) |
Returns a layer that computes the hyperbolic tangent function.
.. math::
f(x) = \frac{e^x - e^{-x}}{e^x + e^{-x}} | def Tanh():
r"""Returns a layer that computes the hyperbolic tangent function.
.. math::
f(x) = \frac{e^x - e^{-x}}{e^x + e^{-x}}
"""
return Fn('Tanh', lambda x: jnp.tanh(x)) |
Returns a layer that computes a linear approximation to `Sigmoid`.
.. math::
f(x) = \left\{ \begin{array}{cl}
0 & \text{if}\ x \leq 0, \\
x & \text{if}\ 0 < x < 1, \\
1 & \text{otherwise}.
\end{array} \right. | def HardSigmoid():
r"""Returns a layer that computes a linear approximation to `Sigmoid`.
.. math::
f(x) = \left\{ \begin{array}{cl}
0 & \text{if}\ x \leq 0, \\
x & \text{if}\ 0 < x < 1, \\
1 & \text{otherwise}.
\end{array} \right.
"""
return Fn('HardSigmoid', lambda x: jnp.maximum(0, jnp.minimum(1, (1 + x)))) |
Returns a layer that computes a linear approximation to `Tanh`.
.. math::
f(x) = \left\{ \begin{array}{cl}
-1 & \text{if}\ x \leq -1, \\
x & \text{if}\ -1 < x < 1, \\
1 & \text{otherwise}.
\end{array} \right. | def HardTanh():
r"""Returns a layer that computes a linear approximation to `Tanh`.
.. math::
f(x) = \left\{ \begin{array}{cl}
-1 & \text{if}\ x \leq -1, \\
x & \text{if}\ -1 < x < 1, \\
1 & \text{otherwise}.
\end{array} \right.
"""
return Fn('HardTanh', lambda x: jnp.maximum(-1, jnp.minimum(1, x))) |
Returns a layer that computes the softplus function.
.. math::
f(x) = \ln(e^x + 1) | def Softplus():
r"""Returns a layer that computes the softplus function.
.. math::
f(x) = \ln(e^x + 1)
"""
return Fn('Softplus', lambda x: jnp.logaddexp(x, 0.)) |
Returns a layer that computes the element-wise exponential of a tensor. | def Exp():
"""Returns a layer that computes the element-wise exponential of a tensor."""
return Fn('Exp', lambda x: jnp.exp(x)) |
Returns a layer that computes the element-wise logarithm of a tensor. | def Log():
"""Returns a layer that computes the element-wise logarithm of a tensor."""
return Fn('Log', lambda x: jnp.log(x)) |
Returns a layer that computes the Swish function.
.. math::
f(x) = x \cdot \text{sigmoid}(x) | def Swish():
r"""Returns a layer that computes the Swish function.
.. math::
f(x) = x \cdot \text{sigmoid}(x)
"""
return Fn('Swish', lambda x: x * fastmath.expit(x)) |
Returns a layer that computes the Gated Linear Unit function.
.. math::
f(x) = a \cdot \text{sigmoid}(b)
where a and b are formed by splitting input in half along axis | def Glu():
r"""Returns a layer that computes the Gated Linear Unit function.
.. math::
f(x) = a \cdot \text{sigmoid}(b)
where a and b are formed by splitting input in half along axis
"""
def _f(x, axis=-1): # pylint: disable=invalid-name
size = x.shape[axis]
assert size % 2 == 0, f'axis {axis} of size {size} is not be divisible by 2'
a, b = jnp.split(x, 2, axis)
return a * fastmath.expit(b)
return Fn('Glu', _f) |
Decorator for checking the input and output shapes of Layer.
Decorator can be applied on trax.base.Layer class, or a function returning
a trax.base.Layer class. It uses notation similar to einsum (Einstein
summation convention), achieving concise and simple representation of tensors.
For example 'ij,jh->ih' is a valid representation of a function taking two
2D matrices as input, and returning a single output, also a 2D matrix.
It improves readability and puts puts three levels of asserts on the function:
first level is the number of input tensors and output tensors; second level is
the rank of each tensor; third level is the size of each dimension of each
tensor. The decorator inserts those asserts right before and right after
'forward' call.
First level, assert on number of inputs and outputs. In the representation
input tensors are separated from output tensors by an arrow '->'. For layers
taking multiple input tensors or returning multiple output tensors, those
tensors will be separated by a comma ','.
For example, specification 'bsd,df->bsf' asserts that there will be two
input tensors, with shapes represented by 'bsd' and 'df' respectively; and
a single output tensor with shape represented by 'bsf'.
Second level, asserts on possible rank of each tensor. Most commonly,
each letter represents a single dimension. For example,the tensor with shapes
represented by 'bsd' has rank three; with 'df' it has rank two. The special
case is an ellipsis ('...'), which expand to arbitrary number of dimensions,
including zero. For example, the tensor with specification '...sf' has at
least two dimensions. Each tensor may have in its representation one ellipsis.
Third level, asserts the size of each dimension. If two dimensions in any
of input or output tensors have the same letter in the representation then
they must have the same size. For example, with a tensor A represented by 'df'
and a tensor B represented by 'bsf', the size of the second dimension of A
must equal the size of the third dimension of B. Another example: with a
tensor C represented by '...dv' and a tensor D represented by 'd', the size of
the first and only dimension of D must be equal to the size of the second to
last dimension of tensor C.
If two distinct tensors have an ellipsis in their representation then all of
dimensions covered by those ellipses must match. For example, with a tensor E
represented by '...d' and tensor F represented by '...x' then E and F must
have the same rank, and the sizes of all but the last dimensions must match.
Examples:
# In Dense layer there is a single input and single output; the last dimension
# may change in size, while the sizes of all previous dimensions, marked by
# an ellipsis, will stay the same.
@assert_shape('...a->...b')
class Dense(base.Layer):
(...)
# DotProductCausalAttention takes three tensors as input: Queries, Keys, and
# Values, and outputs a single tensor. Sizes of the first two dimensions in
# all those tensors must match, while the last dimension must match only
# between Queries and Keys, and separately between Values and output tensor.
@assert_shape('blk,blk,bld->bld')
class DotProductCausalAttention(base.Layer):
(...)
# assert_shape can also be placed before the function returning base.Layer.
@assert_shape('...d->...')
def ReduceSum():
return Fn('ReduceSum', lambda x: jnp.sum(x, axis=-1, keepdims=False))
Args:
specification: A text specification for the input/output tensors.
Returns:
The decorator changing the class or function. | def assert_shape(specification):
"""Decorator for checking the input and output shapes of Layer.
Decorator can be applied on trax.base.Layer class, or a function returning
a trax.base.Layer class. It uses notation similar to einsum (Einstein
summation convention), achieving concise and simple representation of tensors.
For example 'ij,jh->ih' is a valid representation of a function taking two
2D matrices as input, and returning a single output, also a 2D matrix.
It improves readability and puts puts three levels of asserts on the function:
first level is the number of input tensors and output tensors; second level is
the rank of each tensor; third level is the size of each dimension of each
tensor. The decorator inserts those asserts right before and right after
'forward' call.
First level, assert on number of inputs and outputs. In the representation
input tensors are separated from output tensors by an arrow '->'. For layers
taking multiple input tensors or returning multiple output tensors, those
tensors will be separated by a comma ','.
For example, specification 'bsd,df->bsf' asserts that there will be two
input tensors, with shapes represented by 'bsd' and 'df' respectively; and
a single output tensor with shape represented by 'bsf'.
Second level, asserts on possible rank of each tensor. Most commonly,
each letter represents a single dimension. For example,the tensor with shapes
represented by 'bsd' has rank three; with 'df' it has rank two. The special
case is an ellipsis ('...'), which expand to arbitrary number of dimensions,
including zero. For example, the tensor with specification '...sf' has at
least two dimensions. Each tensor may have in its representation one ellipsis.
Third level, asserts the size of each dimension. If two dimensions in any
of input or output tensors have the same letter in the representation then
they must have the same size. For example, with a tensor A represented by 'df'
and a tensor B represented by 'bsf', the size of the second dimension of A
must equal the size of the third dimension of B. Another example: with a
tensor C represented by '...dv' and a tensor D represented by 'd', the size of
the first and only dimension of D must be equal to the size of the second to
last dimension of tensor C.
If two distinct tensors have an ellipsis in their representation then all of
dimensions covered by those ellipses must match. For example, with a tensor E
represented by '...d' and tensor F represented by '...x' then E and F must
have the same rank, and the sizes of all but the last dimensions must match.
Examples:
# In Dense layer there is a single input and single output; the last dimension
# may change in size, while the sizes of all previous dimensions, marked by
# an ellipsis, will stay the same.
@assert_shape('...a->...b')
class Dense(base.Layer):
(...)
# DotProductCausalAttention takes three tensors as input: Queries, Keys, and
# Values, and outputs a single tensor. Sizes of the first two dimensions in
# all those tensors must match, while the last dimension must match only
# between Queries and Keys, and separately between Values and output tensor.
@assert_shape('blk,blk,bld->bld')
class DotProductCausalAttention(base.Layer):
(...)
# assert_shape can also be placed before the function returning base.Layer.
@assert_shape('...d->...')
def ReduceSum():
return Fn('ReduceSum', lambda x: jnp.sum(x, axis=-1, keepdims=False))
Args:
specification: A text specification for the input/output tensors.
Returns:
The decorator changing the class or function.
"""
caller = inspect.getframeinfo(inspect.stack()[1][0])
message = f'Defined at {caller.filename}:{caller.lineno}'
def wrap_cls(cls):
forward = getattr(cls, 'forward')
init = getattr(cls, '__init__')
before_spec, after_spec = specification.split('->')
@functools.wraps(init)
def init_wrapper(self, *args, **kwargs):
before_assert = AssertShape(before_spec,
message=message + ' function input')
after_assert = AssertShape(after_spec,
message=message + ' function output')
after_assert._create_link(before_assert) # pylint: disable=protected-access
out = init(self, *args, **kwargs)
self._before_assert_fun = before_assert # pylint: disable=protected-access
self._after_assert_fun = after_assert # pylint: disable=protected-access
return out
@functools.wraps(forward)
def forward_wrapper(self, x, *args, **kwargs):
x = self._before_assert_fun.forward(x) # pylint: disable=protected-access
y = forward(self, x, *args, **kwargs)
y = self._after_assert_fun.forward(y) # pylint: disable=protected-access
return y
setattr(cls, 'forward', forward_wrapper)
setattr(cls, '__init__', init_wrapper)
return cls
# TODO(jaszczur): replace this with forward/init override.
def wrap_fun(fun):
@functools.wraps(fun)
def fun_wrapper(*args, **kwargs):
layer = fun(*args, **kwargs)
return AssertFunction(specification, layer, message)
return fun_wrapper
def wrap_fun_or_cls(fun_or_cls):
return (wrap_cls(fun_or_cls) if inspect.isclass(fun_or_cls) else
wrap_fun(fun_or_cls))
return wrap_fun_or_cls |
AssertFunction asserts shapes on the input/output tensors of a layer.
It passes all inputs to the layer, and returns all outputs of the layer
unchanged.
Args:
specification: A specification. See assert_shape decorator for a full
documentation.
layer: A base.Layer to wrap around.
message: An optional message to print if an assert fails. By default it will
print the filename and the line number where AssertFunction was called.
Returns:
The given layer wrapped in asserts on its inputs and outputs. | def AssertFunction(specification, layer, message=None): # pylint: disable=invalid-name
"""AssertFunction asserts shapes on the input/output tensors of a layer.
It passes all inputs to the layer, and returns all outputs of the layer
unchanged.
Args:
specification: A specification. See assert_shape decorator for a full
documentation.
layer: A base.Layer to wrap around.
message: An optional message to print if an assert fails. By default it will
print the filename and the line number where AssertFunction was called.
Returns:
The given layer wrapped in asserts on its inputs and outputs.
"""
if message is None:
caller = inspect.getframeinfo(inspect.stack()[1][0])
message = f'Defined at {caller.filename}:{caller.lineno}'
before_spec, after_spec = specification.split('->')
before_assert = AssertShape(before_spec, message=message + ' function input')
after_assert = AssertShape(after_spec, message=message + ' function output')
after_assert._create_link(before_assert) # pylint: disable=protected-access
return combinators.Serial(
before_assert, layer, after_assert) |
Returns a layer that maps `(vectors, mask)` to `(new_vectors, mask)`.
This layer type represents one pass of multi-head self-attention, from vector
set to vector set, using masks to represent out-of-bound (e.g., padding)
positions. It:
- makes three copies of incoming activations and maps these to multi-head
query (Q) vectors, key (K) vectors, and value (V) vectors, respectively;
- for each head, computes the scaled dot product of each Q-K pair;
- applies mask to screen out positions that come from padding tokens
(indicated by 0 value);
- [in ``'train'`` mode] applies dropout to Q-K dot products;
- for each head, computes Q-K attention strengths using a per-query softmax
of the Q-K dot products;
- for each head, for each query position, combines V vectors according
to the Q-K attention strengths; and
- concatenates and fuses resulting per-head vectors into outgoing
activations matching original input activation shapes.
Args:
d_feature: Last/innermost dimension of activations in the input to and
output from this layer.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only if layer is
created in ``'train'`` mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``. | def Attention(d_feature, n_heads=1, dropout=0.0, mode='train'):
"""Returns a layer that maps `(vectors, mask)` to `(new_vectors, mask)`.
This layer type represents one pass of multi-head self-attention, from vector
set to vector set, using masks to represent out-of-bound (e.g., padding)
positions. It:
- makes three copies of incoming activations and maps these to multi-head
query (Q) vectors, key (K) vectors, and value (V) vectors, respectively;
- for each head, computes the scaled dot product of each Q-K pair;
- applies mask to screen out positions that come from padding tokens
(indicated by 0 value);
- [in ``'train'`` mode] applies dropout to Q-K dot products;
- for each head, computes Q-K attention strengths using a per-query softmax
of the Q-K dot products;
- for each head, for each query position, combines V vectors according
to the Q-K attention strengths; and
- concatenates and fuses resulting per-head vectors into outgoing
activations matching original input activation shapes.
Args:
d_feature: Last/innermost dimension of activations in the input to and
output from this layer.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only if layer is
created in ``'train'`` mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
"""
return cb.Serial(
cb.Select([0, 0, 0]),
AttentionQKV(d_feature, n_heads=n_heads, dropout=dropout, mode=mode),
) |
Returns a layer that maps `(AQ, AK, AV, mask)` to `(new-A, mask)`.
Unlike :py:class:`Attention` above, :py:class:`AttentionQKV` allows the
incoming activations (`AQ`, `AK`, and `AV`) to come from different sources.
This is used, for instance, in encoder-decoder attention (Q-related
activations `AQ` from the decoder, K- and V-related activations -- `AK` and
`AV` -- from the encoder). Otherwise, see the :py:class:`Attention`
description for further context/details.
Args:
d_feature: Last/innermost dimension of activations in the input to and
output from this layer.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only if layer is
created in ``'train'`` mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
cache_KV_in_predict: Whether to cache K/V arrays in ``'predict'`` mode.
q_sparsity: Sparsity with which to process queries. If ``None``,
:py:class:`Dense` is used; if ``'noop'``, no processing is used.
result_sparsity: Sparsity with which to process result of the attention.
If ``None``, :py:class:`Dense` is used; if ``'noop'``, no processing is
used. | def AttentionQKV(d_feature, n_heads=1, dropout=0.0, mode='train',
cache_KV_in_predict=False, q_sparsity=None,
result_sparsity=None):
"""Returns a layer that maps `(AQ, AK, AV, mask)` to `(new-A, mask)`.
Unlike :py:class:`Attention` above, :py:class:`AttentionQKV` allows the
incoming activations (`AQ`, `AK`, and `AV`) to come from different sources.
This is used, for instance, in encoder-decoder attention (Q-related
activations `AQ` from the decoder, K- and V-related activations -- `AK` and
`AV` -- from the encoder). Otherwise, see the :py:class:`Attention`
description for further context/details.
Args:
d_feature: Last/innermost dimension of activations in the input to and
output from this layer.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only if layer is
created in ``'train'`` mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
cache_KV_in_predict: Whether to cache K/V arrays in ``'predict'`` mode.
q_sparsity: Sparsity with which to process queries. If ``None``,
:py:class:`Dense` is used; if ``'noop'``, no processing is used.
result_sparsity: Sparsity with which to process result of the attention.
If ``None``, :py:class:`Dense` is used; if ``'noop'``, no processing is
used.
"""
def _SparsifiableDense(layer_sparsity):
if layer_sparsity is None:
return core.Dense(d_feature)
elif layer_sparsity == 'noop':
return cb.Serial() # No-op layer.
else:
d_module = d_feature // layer_sparsity
return cb.Serial(
sparsity.FactoredDense(layer_sparsity, d_feature, d_feature),
sparsity.LocallyConvDense(layer_sparsity, d_module, mode=mode,
kernel_size=3, length_kernel_size=3)
)
def _CacheableDense():
if cache_KV_in_predict and mode == 'predict':
return cb.Cache(core.Dense(d_feature))
else:
return core.Dense(d_feature)
def _PureAttention():
return PureAttention(n_heads=n_heads, dropout=dropout, mode=mode)
return cb.Serial(
cb.Parallel(_SparsifiableDense(q_sparsity),
_CacheableDense(),
_CacheableDense()),
_PureAttention(),
_SparsifiableDense(result_sparsity),
) |
Computes new per-head activations via scaled dot-product attention.
This function is the core of the attention mechanism. Given per-head
``queries`` (Q), ``keys`` (K), ``values`` (V), and ``mask``, it:
- computes the scaled dot product of each Q-K pair;
- applies ``mask`` to screen out positions that come from padding tokens
(indicated by 0 value);
- [in ``'train'`` mode] applies dropout to Q-K dot products;
- computes Q-K attention strengths using a per-query softmax of the Q-K dot
products; and
- for each query position, combines V vectors according to the Q-K
attention strengths.
Args:
queries: Per-head activations representing attention queries.
keys: Per-head activations representing attention keys.
values: Per-head activations to be combined by computed attention strengths.
mask: Mask that distinguishes positions with real content vs. padding.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only in ``'train'``
mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
rng: Single-use random number generator (JAX PRNG key).
Returns:
Tuple of (activations, attn_strengths), where activations are new per-head
activation vectors and attn_strengths is a matrix of per-head attention
strengths. | def _per_head_attention(queries, keys, values, mask, dropout, mode, rng):
"""Computes new per-head activations via scaled dot-product attention.
This function is the core of the attention mechanism. Given per-head
``queries`` (Q), ``keys`` (K), ``values`` (V), and ``mask``, it:
- computes the scaled dot product of each Q-K pair;
- applies ``mask`` to screen out positions that come from padding tokens
(indicated by 0 value);
- [in ``'train'`` mode] applies dropout to Q-K dot products;
- computes Q-K attention strengths using a per-query softmax of the Q-K dot
products; and
- for each query position, combines V vectors according to the Q-K
attention strengths.
Args:
queries: Per-head activations representing attention queries.
keys: Per-head activations representing attention keys.
values: Per-head activations to be combined by computed attention strengths.
mask: Mask that distinguishes positions with real content vs. padding.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only in ``'train'``
mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
rng: Single-use random number generator (JAX PRNG key).
Returns:
Tuple of (activations, attn_strengths), where activations are new per-head
activation vectors and attn_strengths is a matrix of per-head attention
strengths.
"""
if dropout >= 1.0:
raise ValueError(f'Dropout rate ({dropout}) must be lower than 1.')
d_feature = queries.shape[-1]
dots = jnp.matmul(queries, jnp.swapaxes(keys, -1, -2)) / jnp.sqrt(d_feature)
if mask is not None:
dots = jnp.where(mask,
dots,
jnp.full_like(dots, -1e9))
attn_strengths = (
jnp.exp(dots - fastmath.logsumexp(dots, axis=-1, keepdims=True)))
if dropout is not None and dropout > 0.0 and mode == 'train':
keep = fastmath.random.bernoulli(rng, 1.0 - dropout, attn_strengths.shape)
attn_strengths = jnp.where(keep,
attn_strengths / (1.0 - dropout),
jnp.zeros_like(attn_strengths))
activations = jnp.matmul(attn_strengths, values).astype(jnp.float32)
attn_strengths = attn_strengths.astype(jnp.float32)
return activations, attn_strengths |
Returns a layer that reshapes an array for multi-head computation. | def SplitIntoHeads(n_heads, merged_batch_and_head=True):
"""Returns a layer that reshapes an array for multi-head computation."""
def f(x):
batch_size, seq_len, d_feature = x.shape
if d_feature % n_heads != 0:
raise ValueError(
f'Feature embedding dimensionality ({d_feature}) is not a multiple'
f' of the requested number of attention heads ({n_heads}).')
d_head = d_feature // n_heads
# (b_size, seq_len, d_feature) --> (b_size*n_heads, seq_len, d_head)
x = x.reshape((batch_size, seq_len, n_heads, d_head))
x = x.transpose((0, 2, 1, 3))
if merged_batch_and_head:
x = x.reshape((batch_size * n_heads, seq_len, d_head))
return x
return Fn('SplitIntoHeads', f) |
Returns a layer that rejoins heads, after multi-head computation. | def MergeHeads(n_heads, merged_batch_and_head=True):
"""Returns a layer that rejoins heads, after multi-head computation."""
def f(x):
if merged_batch_and_head:
dim_0, seq_len, d_head = x.shape
if dim_0 % n_heads != 0:
raise ValueError(
f"Array's leading dimension ({dim_0}) is not a multiple of the"
f" number of attention heads ({n_heads}).")
batch_size = dim_0 // n_heads
x = x.reshape((batch_size, n_heads, seq_len, d_head))
else:
batch_size, _, seq_len, d_head = x.shape
# (b_size, n_heads, seq_len, d_head) --> (b_size, seq_len, d_feature)
x = x.transpose((0, 2, 1, 3))
x = x.reshape((batch_size, seq_len, n_heads * d_head))
return x
return Fn('MergeHeads', f) |
Returns a configured multi-head self-attention layer.
A :py:class:`ConfigurableAttention` layer acts similarly to
:py:class:`Attention` layers, but with configurable components. It
- makes three copies of incoming activations and uses ``q_layer``,
``k_layer``, and ``v_layer`` to map activations to multi-head query (Q)
vectors, key (K) vectors, and value (V) vectors, respectively;
- uses ``qkv_attention_layer`` to compute per-head attention, similar to
:py:class:`DotProductAttention` or :py:class:`DotProductCausalAttention`;
- concatenates and fuses resulting per-head vectors into activations
matching original input activation shapes; and
- applies a final layer, ``final_layer``, mapping activations to
activations (with shape matching the original input activations).
Args:
q_layer: Layer that maps input activations to per-head query activations.
k_layer: Layer that maps input activations to per-head key activations.
v_layer: Layer that maps input activations to per-head value activations.
final_layer: After main multi-head computation and rejoining of heads,
layer that maps activations to activations (with shape matching the
original input activations).
qkv_attention_layer: Layer the does the core multi-head self-attention
computation.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``. | def ConfigurableAttention(q_layer, k_layer, v_layer, final_layer, # pylint: disable=invalid-name
qkv_attention_layer, n_heads=1):
"""Returns a configured multi-head self-attention layer.
A :py:class:`ConfigurableAttention` layer acts similarly to
:py:class:`Attention` layers, but with configurable components. It
- makes three copies of incoming activations and uses ``q_layer``,
``k_layer``, and ``v_layer`` to map activations to multi-head query (Q)
vectors, key (K) vectors, and value (V) vectors, respectively;
- uses ``qkv_attention_layer`` to compute per-head attention, similar to
:py:class:`DotProductAttention` or :py:class:`DotProductCausalAttention`;
- concatenates and fuses resulting per-head vectors into activations
matching original input activation shapes; and
- applies a final layer, ``final_layer``, mapping activations to
activations (with shape matching the original input activations).
Args:
q_layer: Layer that maps input activations to per-head query activations.
k_layer: Layer that maps input activations to per-head key activations.
v_layer: Layer that maps input activations to per-head value activations.
final_layer: After main multi-head computation and rejoining of heads,
layer that maps activations to activations (with shape matching the
original input activations).
qkv_attention_layer: Layer the does the core multi-head self-attention
computation.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
"""
return cb.Serial(
cb.Branch(
[q_layer, SplitIntoHeads(n_heads)],
[k_layer, SplitIntoHeads(n_heads)],
[v_layer, SplitIntoHeads(n_heads)],
),
qkv_attention_layer,
MergeHeads(n_heads),
final_layer
) |
Returns a layer that maps activations to activations, with causal masking.
Like :py:class:`Attention`, this layer type represents one pass of multi-head
self-attention, but with causal masking rather than padding-based masking.
Args:
d_feature: Last/innermost dimension of activations in the input to and
output from this layer.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only if layer is
created in ``'train'`` mode.
max_inference_length: Maximum sequence length allowed in non-training
modes.
use_dconv: if True, use depthwise convolutions on top of dense layers
for Q, K and V.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``. | def CausalAttention(d_feature,
n_heads=1,
dropout=0.0,
max_inference_length=2048,
use_dconv=False,
mode='train'):
"""Returns a layer that maps activations to activations, with causal masking.
Like :py:class:`Attention`, this layer type represents one pass of multi-head
self-attention, but with causal masking rather than padding-based masking.
Args:
d_feature: Last/innermost dimension of activations in the input to and
output from this layer.
n_heads: Number of attention heads. Attention heads effectively split
activation vectors into ``n_heads`` subvectors, of size
``d_feature / n_heads``.
dropout: Probababilistic rate for attention dropout, which overrides
(sets to zero) some attention strengths derived from query-key
matching. As a result, on a given forward pass, some value vectors
don't contribute to the output, analogous to how regular dropout can
cause some node activations to be ignored. Applies only if layer is
created in ``'train'`` mode.
max_inference_length: Maximum sequence length allowed in non-training
modes.
use_dconv: if True, use depthwise convolutions on top of dense layers
for Q, K and V.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
"""
if d_feature % n_heads != 0:
raise ValueError(
f'Dimensionality of feature embedding ({d_feature}) is not a multiple '
f'of the requested number of attention heads ({n_heads}).')
def QKVLayer():
"""Function returning the Q, K and V layer."""
if use_dconv:
return cb.Serial(core.Dense(d_feature), convolution.CausalDepthwiseConv())
else:
return core.Dense(d_feature)
return ConfigurableAttention(
QKVLayer(),
QKVLayer(),
QKVLayer(),
core.Dense(d_feature),
n_heads=n_heads,
qkv_attention_layer=DotProductCausalAttention(
dropout=dropout, max_inference_length=max_inference_length,
mode=mode)) |
Returns a layer that can insert padding to shift the input sequence.
Args:
n_positions: Number of positions to shift the input sequence rightward;
initial positions freed by the shift get padded with zeros. Applies
only if layer is created in a non-``'eval'`` mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``. | def ShiftRight(n_positions=1, mode='train'):
"""Returns a layer that can insert padding to shift the input sequence.
Args:
n_positions: Number of positions to shift the input sequence rightward;
initial positions freed by the shift get padded with zeros. Applies
only if layer is created in a non-``'eval'`` mode.
mode: One of ``'train'``, ``'eval'``, or ``'predict'``.
"""
# TODO(jonni): Include pad arg, like PaddingMask, to allow non-default pads?
def f(x):
if mode == 'predict':
return x
padded = _zero_pad(x, (n_positions, 0), 1)
return padded[:, :-n_positions]
return Fn(f'ShiftRight({n_positions})', f) |
Returns a layer that maps integer sequences to padding masks.
The layer expects as input a batch of integer sequences. The layer output is
an N-D array that marks for each sequence position whether the integer (e.g.,
a token ID) in that position represents padding -- value ``pad`` -- versus
text/content -- all other values. The padding mask shape is
(batch_size, 1, 1, encoder_sequence_length), such that axis 1 will broadcast
to cover any number of attention heads and axis 2 will broadcast to cover
decoder sequence positions.
Args:
pad: Integer that represents padding rather than a token/content ID. | def PaddingMask(pad=0):
"""Returns a layer that maps integer sequences to padding masks.
The layer expects as input a batch of integer sequences. The layer output is
an N-D array that marks for each sequence position whether the integer (e.g.,
a token ID) in that position represents padding -- value ``pad`` -- versus
text/content -- all other values. The padding mask shape is
(batch_size, 1, 1, encoder_sequence_length), such that axis 1 will broadcast
to cover any number of attention heads and axis 2 will broadcast to cover
decoder sequence positions.
Args:
pad: Integer that represents padding rather than a token/content ID.
"""
def f(x):
if len(x.shape) != 2:
raise ValueError(
f'Input to PaddingMask must be a 2-D array with shape '
f'(batch_size, sequence_length); instead got shape {x.shape}.')
batch_size = x.shape[0]
sequence_length = x.shape[1]
content_positions = (x != pad)
return content_positions.reshape((batch_size, 1, 1, sequence_length))
return Fn(f'PaddingMask({pad})', f) |
Returns a layer that creates a mask for encoder-decoder cross attention.
The layer expects two inputs:
- decoder_input: batch of integer (e.g., token ID) sequences
- mask: padding mask from the encoder
The layer output is a mask that marks for each sequence position (for both
encoder and decoder) whether that position can be attended to or not. The
encoder-decoder mask shape is (batch_size, 1, decoder_sequence_length,
encoder_sequence_length), such that axis 1 will automatically broadcast to
cover any number of attention heads. | def EncoderDecoderMask():
"""Returns a layer that creates a mask for encoder-decoder cross attention.
The layer expects two inputs:
- decoder_input: batch of integer (e.g., token ID) sequences
- mask: padding mask from the encoder
The layer output is a mask that marks for each sequence position (for both
encoder and decoder) whether that position can be attended to or not. The
encoder-decoder mask shape is (batch_size, 1, decoder_sequence_length,
encoder_sequence_length), such that axis 1 will automatically broadcast to
cover any number of attention heads.
"""
def f(decoder_input, mask):
if len(decoder_input.shape) != 3:
raise ValueError(
f'Decoder input to EncoderDecoderMask must be a 3-D array with '
f'shape (batch_size, decoder_sequence_length, d_model); instead got '
f'shape {decoder_input.shape}.')
batch_size = mask.shape[0]
encoder_sequence_length = mask.shape[-1]
decoder_sequence_length = decoder_input.shape[1]
mask = mask.reshape((batch_size, 1, 1, encoder_sequence_length))
return mask + jnp.zeros((1, 1, decoder_sequence_length, 1))
return Fn('EncoderDecoderMask', f) |
Helper for jnp.pad with 0s for single-axis case. | def _zero_pad(x, pad, axis):
"""Helper for jnp.pad with 0s for single-axis case."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = pad # Padding on axis.
return jnp.pad(x, pad_widths, mode='constant') |
Returns an initial state for causal attention layer fast inference. | def _fast_inference_init_state(input_signature, buffer_length,
predict_mask=None):
"""Returns an initial state for causal attention layer fast inference."""
def zeros_for(batch_size, shape_dtype):
shape, dtype = shape_dtype.as_tuple()
d_feature = shape[-1]
return jnp.zeros((batch_size, buffer_length, d_feature), dtype=dtype)
batch_size = input_signature[0].shape[0]
k = zeros_for(batch_size, input_signature[1])
v = zeros_for(batch_size, input_signature[2])
if predict_mask is not None:
mask_for_predict = jnp.zeros((buffer_length,)) != 0
return (mask_for_predict, k, v, jnp.array(0))
else:
return (k, v, jnp.array(0)) |
Updates state of a causal attention layer for fast inference.
The layer state stores arrays with cached values of keys and values,
as well as an index. To make shapes static, keys and values in the state are
long, and the index indicates where the new keys and values from inputs need
to be appended.
During update, we append new_keys and new_values to keys and values at
position given by index. And we increment index by length of new keys.
We also create a mask to be 1 at appropriate positions (causal mask).
Args:
inputs: a triple (new_queries, new_keys, new_values)
state: layer state with (keys, values, index)
mask_for_predict: mask used for predict mode. This is used only in
Terraformer.
Returns:
Updated state and mask to be used. | def _fast_inference_update_state(inputs, state, mask_for_predict=None):
"""Updates state of a causal attention layer for fast inference.
The layer state stores arrays with cached values of keys and values,
as well as an index. To make shapes static, keys and values in the state are
long, and the index indicates where the new keys and values from inputs need
to be appended.
During update, we append new_keys and new_values to keys and values at
position given by index. And we increment index by length of new keys.
We also create a mask to be 1 at appropriate positions (causal mask).
Args:
inputs: a triple (new_queries, new_keys, new_values)
state: layer state with (keys, values, index)
mask_for_predict: mask used for predict mode. This is used only in
Terraformer.
Returns:
Updated state and mask to be used.
"""
# Fast inference: run step-by-step, storing the sequence
# of keys and values calculated so far in state.
(_, new_k, new_v) = inputs
if mask_for_predict is not None:
(state_mask_for_predict, ks, vs, idx) = state
else:
(ks, vs, idx) = state
length = new_k.shape[1]
# TODO(lukaszkaiser): benchmark speed and decide if using a separate code path
# with index_update when length == 1 is worth it.
# Keys and values are of shape [batch_size, length, d_kv].
ks = fastmath.dynamic_update_slice_in_dim(ks, new_k, idx, axis=1)
vs = fastmath.dynamic_update_slice_in_dim(vs, new_v, idx, axis=1)
k_length = ks.shape[1]
# Mask is of shape [1, q_length, k_length].
# Mask should be true for every pair of (query_token, key_token) such that
# index of query_token is equal or larger to index of key_token.
mask = (jnp.reshape(jnp.arange(k_length), (1, 1, k_length))
<= jnp.reshape(jnp.arange(length) + idx, (1, length, 1)))
if mask_for_predict is None:
return (ks, vs, idx + length), mask
else:
state_mask_for_predict = fastmath.dynamic_update_slice_in_dim(
state_mask_for_predict != 0, mask_for_predict.reshape((-1)) != 0, 0,
axis=0)
state_mask_for_predict = fastmath.dynamic_update_slice_in_dim(
state_mask_for_predict != 0, jnp.ones((1,)) != 0,
jnp.sum(mask_for_predict, dtype=jnp.int32), axis=0)
state_mask_for_predict = fastmath.dynamic_update_slice_in_dim(
state_mask_for_predict != 0, jnp.ones((1,)) != 0, idx, axis=0)
placeholder = jnp.reshape(state_mask_for_predict != 0,
(1, 1, mask.shape[2],))
mask = mask * placeholder
return (state_mask_for_predict, ks, vs, idx + length), mask |
Returns a layer with no weights that applies the function `f`.
`f` can take and return any number of arguments, and takes only positional
arguments -- no default or keyword arguments. It often uses JAX-numpy (`jnp`).
The following, for example, would create a layer that takes two inputs and
returns two outputs -- element-wise sums and maxima:
`Fn('SumAndMax', lambda x0, x1: (x0 + x1, jnp.maximum(x0, x1)), n_out=2)`
The layer's number of inputs (`n_in`) is automatically set to number of
positional arguments in `f`, but you must explicitly set the number of
outputs (`n_out`) whenever it's not the default value 1.
Args:
name: Class-like name for the resulting layer; for use in debugging.
f: Pure function from input tensors to output tensors, where each input
tensor is a separate positional arg, e.g., `f(x0, x1) --> x0 + x1`.
Output tensors must be packaged as specified in the `Layer` class
docstring.
n_out: Number of outputs promised by the layer; default value 1.
Returns:
Layer executing the function `f`. | def Fn(name, f, n_out=1): # pylint: disable=invalid-name
"""Returns a layer with no weights that applies the function `f`.
`f` can take and return any number of arguments, and takes only positional
arguments -- no default or keyword arguments. It often uses JAX-numpy (`jnp`).
The following, for example, would create a layer that takes two inputs and
returns two outputs -- element-wise sums and maxima:
`Fn('SumAndMax', lambda x0, x1: (x0 + x1, jnp.maximum(x0, x1)), n_out=2)`
The layer's number of inputs (`n_in`) is automatically set to number of
positional arguments in `f`, but you must explicitly set the number of
outputs (`n_out`) whenever it's not the default value 1.
Args:
name: Class-like name for the resulting layer; for use in debugging.
f: Pure function from input tensors to output tensors, where each input
tensor is a separate positional arg, e.g., `f(x0, x1) --> x0 + x1`.
Output tensors must be packaged as specified in the `Layer` class
docstring.
n_out: Number of outputs promised by the layer; default value 1.
Returns:
Layer executing the function `f`.
"""
argspec = inspect.getfullargspec(f)
if argspec.defaults is not None:
raise ValueError('Function has default arguments (not allowed).')
if argspec.varkw is not None:
raise ValueError('Function has keyword arguments (not allowed).')
if argspec.varargs is not None:
raise ValueError('Function has variable args (not allowed).')
def _forward(xs): # pylint: disable=invalid-name
if not isinstance(xs, (tuple, list)):
xs = (xs,)
return f(*xs)
n_in = len(argspec.args)
name = name or 'Fn'
return PureLayer(_forward, n_in=n_in, n_out=n_out, name=name) |
Flatten weights and state into lists, excluding empty and cached ones. | def flatten_weights_and_state(weights, state):
"""Flatten weights and state into lists, excluding empty and cached ones."""
def _is_empty_weight(x):
return (x is EMPTY_WEIGHTS or
(isinstance(x, dict) and x == GET_WEIGHTS_FROM_CACHE))
flat_weights = [w for w in fastmath.tree_flatten(weights)
if not _is_empty_weight(w)]
def _is_empty_state(x):
return (x is EMPTY_STATE or
(isinstance(x, dict) and x == GET_STATE_FROM_CACHE))
flat_state = [s for s in fastmath.tree_flatten(state)
if not _is_empty_state(s)]
return flat_weights, flat_state |
Unflatten weights and state given their signatures. | def unflatten_weights_and_state(
flat_weights, flat_state, weights_and_state_signature, weights_only=False):
"""Unflatten weights and state given their signatures."""
weights_tree, state_tree = weights_and_state_signature
weights_to_copy = [EMPTY_WEIGHTS, GET_WEIGHTS_FROM_CACHE]
weights, _ = fastmath.tree_unflatten(flat_weights, weights_tree,
copy_from_tree=weights_to_copy)
state = None
if not weights_only:
states_to_copy = [EMPTY_STATE, GET_STATE_FROM_CACHE]
state, _ = fastmath.tree_unflatten(flat_state, state_tree,
copy_from_tree=states_to_copy)
return weights, state |
Save numpy arrays to file_path with gzipping and failure protection. | def np_to_file(list_of_nparrays, file_path, compresslevel):
"""Save numpy arrays to file_path with gzipping and failure protection."""
# Pickle to tmp file and overwrite to prevent writing partial files.
tmp_file_path = file_path + '._tmp_'
with tf.io.gfile.GFile(tmp_file_path, 'wb') as f:
with gzip.GzipFile(fileobj=f, compresslevel=compresslevel) as gzipf:
for x in list_of_nparrays:
np.save(gzipf, x, allow_pickle=False)
# Moving a file is much less error-prone than pickling large files.
tf.io.gfile.rename(tmp_file_path, file_path, overwrite=True) |
Load numpy arrays from file_path with gzipping. | def np_from_file(file_path, compresslevel):
"""Load numpy arrays from file_path with gzipping."""
if not tf.io.gfile.exists(file_path):
raise FileNotFoundError(file_path)
res = []
with tf.io.gfile.GFile(file_path, 'rb') as f:
with gzip.GzipFile(fileobj=f, compresslevel=compresslevel) as gzipf:
while True:
try:
res.append(np.load(gzipf, allow_pickle=False))
except Exception: # pylint: disable=broad-except
break
return res |
Converts layer outputs to a nested list, for easier equality testing.
Args:
outputs: A tensor or tuple/list of tensors coming from the forward
application of a layer. Each tensor is NumPy ndarray-like, which
complicates simple equality testing (e.g., via `assertEquals`):
such tensors require equality testing to use either `all` (all
elements match) or `any` (at least one element matches), which is not
directly supported in `absltest`.
Returns:
A nested list structure containing all the output values, but now directly
testable using `assertEquals`. | def to_list(outputs):
"""Converts layer outputs to a nested list, for easier equality testing.
Args:
outputs: A tensor or tuple/list of tensors coming from the forward
application of a layer. Each tensor is NumPy ndarray-like, which
complicates simple equality testing (e.g., via `assertEquals`):
such tensors require equality testing to use either `all` (all
elements match) or `any` (at least one element matches), which is not
directly supported in `absltest`.
Returns:
A nested list structure containing all the output values, but now directly
testable using `assertEquals`.
"""
if isinstance(outputs, (list, tuple)):
return [y.tolist() for y in outputs]
else:
return outputs.tolist() |
Find the frame with the caller on the stack. | def _find_frame(frame):
"""Find the frame with the caller on the stack."""
def _dirname_is_trax_layers_or_gin(frame):
"""Skip frames coming from trax/layers or .../gin."""
try:
dirname1 = frame.f_code.co_filename.split('/')[-3]
dirname2 = frame.f_code.co_filename.split('/')[-2]
return (dirname1 == 'trax' and dirname2 == 'layers') or dirname2 == 'gin'
except IndexError:
return False
while _dirname_is_trax_layers_or_gin(frame):
frame = frame.f_back
return frame |
Shorten file path in error lines for more readable tracebacks. | def _shorten_file_path(line):
"""Shorten file path in error lines for more readable tracebacks."""
start = line.lower().find('file')
if start < 0:
return line
first_quote = line.find('"', start)
if first_quote < 0:
return line
second_quote = line.find('"', first_quote + 1)
if second_quote < 0:
return line
path = line[first_quote + 1:second_quote]
new_path = '/'.join(path.split('/')[-3:])
return line[:first_quote] + '[...]/' + new_path + line[second_quote + 1:] |
Cleaned-up form of traceback. | def _short_traceback(skip=3):
"""Cleaned-up form of traceback."""
counter, res = 0, []
# Skipping 3 lines by default: the top (useless) and self-call.
# In python 3, we need to set chain to False (it doesn't exist in python 2).
lines = traceback.format_exc(chain=False).splitlines()[skip:] # pylint: disable=unexpected-keyword-arg
for l in lines:
if l.startswith('trax.layers.base.LayerError'):
l = l[len('trax.layers.base.'):] # Remove the trax.layers.base prefix.
res.append(_shorten_file_path(l))
if counter % 2 == 1:
res.append('')
counter += 1
# If we see a LayerError, the traceback has already been processed.
if l.startswith('LayerError'):
# Skip 4 back except last as these are internal base-layer calls.
res = res[:-4] + [res[-1]]
res += lines[counter:]
break
return '\n'.join(res) |
Creates random floats or ints of the given shape.
Args:
input_signature: A `ShapeDtype` instance (if `layer_obj` takes one input)
or a list/tuple of ShapeDtype instances.
rng: Single-use random number generator (JAX PRNG key).
Returns:
Random values with the shape and type specified. | def _random_values(input_signature, rng):
"""Creates random floats or ints of the given shape.
Args:
input_signature: A `ShapeDtype` instance (if `layer_obj` takes one input)
or a list/tuple of ShapeDtype instances.
rng: Single-use random number generator (JAX PRNG key).
Returns:
Random values with the shape and type specified.
"""
if isinstance(input_signature, ShapeDtype):
shape, dtype = input_signature.shape, input_signature.dtype
if np.issubdtype(dtype, np.integer):
return fastmath.random.bernoulli(rng, 0.5, shape).astype(np.int32)
else:
return fastmath.random.uniform(rng, shape, minval=-1.0, maxval=1.0)
elif isinstance(input_signature, (list, tuple)):
return tuple(_random_values(x, rng) for x in input_signature)
else:
raise TypeError(type(input_signature)) |
Gets a structure of shapes for a structure of nested arrays. | def _shapes(x):
"""Gets a structure of shapes for a structure of nested arrays."""
def shape(x):
try:
return tuple([int(i) for i in x.shape])
except Exception: # pylint: disable=broad-except
return ()
return tuple(nested_map(shape, x)) |
Return the axis indices. | def _axis_index(unused_x):
"""Return the axis indices."""
return jax.lax.axis_index('batch') |
Chooses an axis to shard on - a simple heuristic to be revisited. | def _axis_to_shard_heuristic(shape):
"""Chooses an axis to shard on - a simple heuristic to be revisited."""
axis = 0 if len(shape) < 3 else -1
return axis |
Shard tensors across n_shards. | def shard(tensors, n_shards=None):
"""Shard tensors across n_shards."""
n_shards = N_WEIGHTS_SHARDS if n_shards is None else n_shards
indices = _axis_index(np.zeros(fastmath.local_device_count()))
def _shard_fn(x):
axis = _axis_to_shard_heuristic(x.shape)
if int(x.shape[axis]) % n_shards != 0:
raise ValueError(f'Cannot split x with shape {x.shape} into {n_shards}.')
split_x = jnp.split(x, n_shards, axis=axis)
split_x = [split_x[i % n_shards] for i in indices]
return np.stack(split_x, axis=0)
return fastmath.nested_map(_shard_fn, tensors) |
Unshard tensors that were sharded into n_shards (call inside pmap). | def unshard_in_pmap(tensors, n_shards):
"""Unshard tensors that were sharded into n_shards (call inside pmap)."""
groups = [[n_shards * i + d for d in range(n_shards)]
for i in range(fastmath.global_device_count() // n_shards)]
def _unshard_fn(x):
y = jax.lax.all_gather(x, 'batch', axis_index_groups=groups)
split_y = jnp.split(y, n_shards, axis=0)
split_y = [jnp.squeeze(sy, axis=0) for sy in split_y]
axis = _axis_to_shard_heuristic(split_y[0].shape)
return jnp.concatenate(split_y, axis=axis)
try:
jax.lax.axis_index('batch') # will throw if not in pmap, e.g., on init
res = fastmath.nested_map(_unshard_fn, tensors)
return res, True
except NameError: # thrown from axis_index above
return tensors, False |
Unshard tensors that were sharded into n_shards (outside of pmap). | def unshard(tensors, n_shards=None):
"""Unshard tensors that were sharded into n_shards (outside of pmap)."""
n_shards = N_WEIGHTS_SHARDS if n_shards is None else n_shards
def _unshard_fn(x):
# We use numpy here to put the large un-sharded arrays in CPU memory.
# For unsharding on accelerators use ushard_in_pmap above and pmap it.
split_y = np.split(np.asarray(x), n_shards, axis=0)
split_y = [np.squeeze(sy, axis=0) for sy in split_y]
axis = _axis_to_shard_heuristic(split_y[0].shape)
return np.concatenate(split_y, axis=axis)
return fastmath.nested_map(_unshard_fn, tensors) |
Scans the f over the given axis of xs.
In pseudo-python, the scan function would look as follows:
def scan(f, xs, init_value, axis):
xs = [xs[..., i, ...] for i in range(xs.shape[axis])]
cur_value = init_value
ys = []
for x in xs:
y, cur_value = f(x, cur_value)
ys.append(y)
return np.stack(ys, axis), cur_value
Args:
f: function (x, carry) -> (y, new_carry)
xs: tensor, x will be xs slices on axis
init_value: tensor, initial value of the carry-over
axis: int, the axis on which to slice xs
remat: whether to re-materialize f
Returns:
A pair (ys, last_value) as described above. | def _scan(f, xs, init_value, axis=0, remat=False):
"""Scans the f over the given axis of xs.
In pseudo-python, the scan function would look as follows:
def scan(f, xs, init_value, axis):
xs = [xs[..., i, ...] for i in range(xs.shape[axis])]
cur_value = init_value
ys = []
for x in xs:
y, cur_value = f(x, cur_value)
ys.append(y)
return np.stack(ys, axis), cur_value
Args:
f: function (x, carry) -> (y, new_carry)
xs: tensor, x will be xs slices on axis
init_value: tensor, initial value of the carry-over
axis: int, the axis on which to slice xs
remat: whether to re-materialize f
Returns:
A pair (ys, last_value) as described above.
"""
def swapaxes(x):
transposed_axes = list(range(len(x.shape)))
transposed_axes[axis] = 0
transposed_axes[0] = axis
return jnp.transpose(x, axes=transposed_axes)
if axis != 0:
xs = fastmath.nested_map(swapaxes, xs)
def transposed_f(c, x):
y, d = f(x, c)
return d, y
if remat:
transposed_f = fastmath.remat(transposed_f)
last_value, ys = fastmath.scan(transposed_f, init_value, xs)
if axis != 0:
ys = fastmath.nested_map(swapaxes, ys)
return ys, last_value |
Executes `layer` using batch chunks of size `chunk_size` to save memory. | def Chunk(layer, chunk_size, pass_unchunkable=True):
"""Executes `layer` using batch chunks of size `chunk_size` to save memory."""
if chunk_size < 1:
return layer
def reshape_to_chunks(x):
chunk_batch = x.shape[0]
size = chunk_size
n_chunks = chunk_batch // size
if chunk_batch % size != 0:
if pass_unchunkable:
n_chunks = 1
size = chunk_batch
else:
raise ValueError(f'Chunk size {size} must divide batch '
f'size {chunk_batch}')
return jnp.reshape(x, [n_chunks, size] + list(x.shape[1:]))
reshape_to_chunks_layer = base.PureLayer(
lambda xs: fastmath.nested_map(reshape_to_chunks, xs),
n_in=layer.n_in, n_out=layer.n_in, name='ReshapeToChunks')
def reshape_from_chunks(x):
batch_size = x.shape[0] * x.shape[1]
return jnp.reshape(x, [batch_size] + list(x.shape[2:]))
reshape_from_chunks_layer = base.PureLayer(
lambda xs: fastmath.nested_map(reshape_from_chunks, xs),
n_in=layer.n_out, n_out=layer.n_out, name='ReshapeFromChunks')
return Serial(
reshape_to_chunks_layer,
Scan(layer, axis=0, n_carry=0, remat=True),
reshape_from_chunks_layer,
) |
Combinator that applies a list of layers in parallel to copies of inputs.
Each layer in the input list is applied to as many inputs from the stack
as it needs, and their outputs are successively combined on stack.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Branch(F, G, H) will take 3 inputs and give 4 outputs:
- inputs: a, b, c
- outputs: F(a), G(a, b, c), h1, h2 where h1, h2 = H(a, b)
As an important special case, a None argument to Branch acts as if it takes
one argument, which it leaves unchanged. (It acts as a one-arg no-op.)
Args:
*layers: List of layers.
name: Descriptive name for this layer.
Returns:
A branch layer built from the given sublayers. | def Branch(*layers, name='Branch'):
"""Combinator that applies a list of layers in parallel to copies of inputs.
Each layer in the input list is applied to as many inputs from the stack
as it needs, and their outputs are successively combined on stack.
For example, suppose one has three layers:
- F: 1 input, 1 output
- G: 3 inputs, 1 output
- H: 2 inputs, 2 outputs (h1, h2)
Then Branch(F, G, H) will take 3 inputs and give 4 outputs:
- inputs: a, b, c
- outputs: F(a), G(a, b, c), h1, h2 where h1, h2 = H(a, b)
As an important special case, a None argument to Branch acts as if it takes
one argument, which it leaves unchanged. (It acts as a one-arg no-op.)
Args:
*layers: List of layers.
name: Descriptive name for this layer.
Returns:
A branch layer built from the given sublayers.
"""
if len(layers) == 1:
return layers[0]
parallel_layer = Parallel(*layers)
indices = [list(range(layer.n_in)) for layer in parallel_layer.sublayers]
return Serial(Select(_deep_flatten(indices)), parallel_layer,
name=name, sublayers_to_print=layers) |
Wraps a series of layers with a residual connection.
Args:
*layers: One or more layers, to be applied in series.
shortcut: If None (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If specified, the `shortcut` layer applies to a copy of the
inputs and (elementwise) adds its output to the output from the main
layer series.
Returns:
A layer representing a residual connection paired with a layer series. | def Residual(*layers, shortcut=None):
"""Wraps a series of layers with a residual connection.
Args:
*layers: One or more layers, to be applied in series.
shortcut: If None (the usual case), the Residual layer computes the
element-wise sum of the stack-top input with the output of the layer
series. If specified, the `shortcut` layer applies to a copy of the
inputs and (elementwise) adds its output to the output from the main
layer series.
Returns:
A layer representing a residual connection paired with a layer series.
"""
layers = _ensure_flat(layers)
layer = layers[0] if len(layers) == 1 else Serial(layers)
# TODO(jonni): Should we require layer.n_out = 1 and shortcut.n_out = 1?
return Serial(
Branch(shortcut, layer),
Add(), # pylint: disable=no-value-for-parameter
) |
Copies, reorders, or deletes stack elements according to `indices`.
Args:
indices: A list or tuple of 0-based indices to select elements relative to
the top of the stack.
n_in: Number of input elements to pop from the stack, and replace with
those specified by `indices`. If not specified, its value will be
calculated as `max(indices) + 1`.
name: Descriptive name for this layer.
Returns:
Tensors, matching the number selected (`n_out = len(indices)`).
Specifically:
- n_out = 0: an empty tuple
- n_out = 1: one tensor (NOT wrapped in a tuple)
- n_out > 1: a tuple of tensors, with n_out items | def Select(indices, n_in=None, name=None):
"""Copies, reorders, or deletes stack elements according to `indices`.
Args:
indices: A list or tuple of 0-based indices to select elements relative to
the top of the stack.
n_in: Number of input elements to pop from the stack, and replace with
those specified by `indices`. If not specified, its value will be
calculated as `max(indices) + 1`.
name: Descriptive name for this layer.
Returns:
Tensors, matching the number selected (`n_out = len(indices)`).
Specifically:
- n_out = 0: an empty tuple
- n_out = 1: one tensor (NOT wrapped in a tuple)
- n_out > 1: a tuple of tensors, with n_out items
"""
if n_in is None:
n_in = max(indices) + 1
if name is None:
name = f'Select{indices}'.replace(' ', '')
def select(xs): # pylint: disable=invalid-name
if not isinstance(xs, (tuple, list)):
xs = (xs,)
selected = tuple(xs[i] for i in indices)
return selected[0] if len(selected) == 1 else selected
return base.PureLayer(select, n_in=n_in, n_out=len(indices), name=name) |
Drops the top stack element. | def Drop():
"""Drops the top stack element."""
return Fn('Drop', lambda x: (), n_out=0) |
Duplicates (copies) the top element on the data stack. | def Dup():
"""Duplicates (copies) the top element on the data stack."""
return Fn('Dup', lambda x: (x, x), n_out=2) |
Swaps the top two stack elements. | def Swap():
"""Swaps the top two stack elements."""
return Fn('Swap', lambda x0, x1: (x1, x0), n_out=2) |
Serial layer with side outputs.
This layer makes it easier to manage the stack when layers have side outputs.
In the simplest case of layers with n_in=1, n_out=2 and with
n_side_outputs=1, this layer runs the following computation on x::
side_outputs = []
for i in range(len(layers)):
x, side_output = layers[i](x)
side_outputs.append(side_output)
return [x] + side_outputs
In the general case of layers with variable n_in and n_out and
n_side_outputs being a list of N integers, it does the following::
side_outputs = []
for i in range(N):
res = layer[i](cur_stack) # remove n_in from stack
cur_stack.append(res[:n_side_outputs[i]]) # put back some on stack
side_outputs.extend(res[n_side_outputs:])
return cur_stack + side_outputs
Args:
layers: a list of layers to execute
n_side_outputs: an int or a list of ints, how many outputs of each layer
to put aside
Returns:
A layer that performs the above computation. | def SerialWithSideOutputs(layers, n_side_outputs=1):
"""Serial layer with side outputs.
This layer makes it easier to manage the stack when layers have side outputs.
In the simplest case of layers with n_in=1, n_out=2 and with
n_side_outputs=1, this layer runs the following computation on x::
side_outputs = []
for i in range(len(layers)):
x, side_output = layers[i](x)
side_outputs.append(side_output)
return [x] + side_outputs
In the general case of layers with variable n_in and n_out and
n_side_outputs being a list of N integers, it does the following::
side_outputs = []
for i in range(N):
res = layer[i](cur_stack) # remove n_in from stack
cur_stack.append(res[:n_side_outputs[i]]) # put back some on stack
side_outputs.extend(res[n_side_outputs:])
return cur_stack + side_outputs
Args:
layers: a list of layers to execute
n_side_outputs: an int or a list of ints, how many outputs of each layer
to put aside
Returns:
A layer that performs the above computation.
"""
if isinstance(n_side_outputs, int):
n_side_outputs = [n_side_outputs] * len(layers)
# Calculate the n_in for this layer.
running_max = 0
running_total = 0
for layer, n_side_output in zip(layers, n_side_outputs):
running_total += layer.n_in
running_max = max(running_max, running_total)
running_total -= layer.n_out - n_side_output
n_in = running_max
# Create the list of layers to run serially.
cur_stack_size = n_in
serial_layers = []
for layer, n_side_output in zip(layers, n_side_outputs):
serial_layers.append(layer)
cur_stack_size += layer.n_out - layer.n_in
# Indices to move n_side_outputs to the back of the stack.
# Don't touch first n_out - n_side_outputs.
move_back_indices = list(range(layer.n_out - n_side_output))
# Then comes the rest of the stack that we're not moving.
move_back_indices += [i + layer.n_out
for i in range(cur_stack_size - layer.n_out)]
# Finally the indices we move.
move_back_indices += [i + layer.n_out - n_side_output
for i in range(n_side_output)]
# Swap them on stack.
serial_layers.append(Select(move_back_indices))
return Serial(serial_layers) |
Flatten lists. | def FlattenList():
"""Flatten lists."""
# TODO(jonni): Consider renaming layer to DeepFlatten.
return Fn('FlattenList', lambda x: tuple(_deep_flatten(x))) |
Adds two tensors. | def Add():
"""Adds two tensors."""
return Fn('Add', lambda x0, x1: x0 + x1) |
Subtracts the first tensor from the second. | def SubtractTop():
"""Subtracts the first tensor from the second."""
return Fn('SubtractTop', lambda x0, x1: x1 - x0) |
Multiplies two tensors. | def Multiply():
"""Multiplies two tensors."""
return Fn('Multiply', lambda x0, x1: x0 * x1) |
Returns a gating layer on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1 - gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387 | def Gate():
"""Returns a gating layer on a (memory, gate, candidate) tuple.
Final update is memory * gate + (1 - gate) * candidate
This gating equation may also be referred to as Highway Network.
Highway Networks: https://arxiv.org/abs/1505.00387
"""
return Fn('Gate', lambda m, g, c: g * m + (1.0 - g) * c) |
Bidirectional combinator for RNNs.
Args:
forward_layer: A layer, such as `trax.layers.LSTM` or `trax.layers.GRU`.
axis: a time axis of the inputs. Default value is `1`.
merge_layer: A combinator used to combine outputs of the forward
and backward RNNs. Default value is 'trax.layers.Concatenate'.
Example:
Bidirectional(RNN(n_units=8))
Returns:
The Bidirectional combinator for RNNs. | def Bidirectional(forward_layer, axis=1, merge_layer=Concatenate()):
"""Bidirectional combinator for RNNs.
Args:
forward_layer: A layer, such as `trax.layers.LSTM` or `trax.layers.GRU`.
axis: a time axis of the inputs. Default value is `1`.
merge_layer: A combinator used to combine outputs of the forward
and backward RNNs. Default value is 'trax.layers.Concatenate'.
Example:
Bidirectional(RNN(n_units=8))
Returns:
The Bidirectional combinator for RNNs.
"""
backward_layer = copy.deepcopy(forward_layer)
flip = base.Fn('_FlipAlongTimeAxis', lambda x: jnp.flip(x, axis=axis))
backward = Serial(
flip,
backward_layer,
flip,
)
return Serial(
Branch(forward_layer, backward),
merge_layer,
) |
Returns a list of objects, flattening sublists/subtuples along the way.
Example: _deep_flatten([1, (2, 3, (4, 5), [6, 7]), [[[8]]]]) would return
the list [1, 2, 3, 4, 5, 6, 7, 8].
Args:
items: An iterable. If elements of this iterable are lists or tuples, they
will be (recursively) flattened until non-list non-tuple objects are
reached.
Returns:
A list of non-list, non-tuple objects. | def _deep_flatten(items):
"""Returns a list of objects, flattening sublists/subtuples along the way.
Example: _deep_flatten([1, (2, 3, (4, 5), [6, 7]), [[[8]]]]) would return
the list [1, 2, 3, 4, 5, 6, 7, 8].
Args:
items: An iterable. If elements of this iterable are lists or tuples, they
will be (recursively) flattened until non-list non-tuple objects are
reached.
Returns:
A list of non-list, non-tuple objects.
"""
def _flat_gen(xs):
for x in xs:
if isinstance(x, (list, tuple)):
for y in _flat_gen(x):
yield y
else:
yield x
return list(_flat_gen(items)) |
Ensures that elements in a layer list are layers.
Args:
layers: A tuple or list whose elements can each be a layer, tuple, or list,
and so on recursively.
Returns:
An analogous collection of layers in which embedded layer lists are
wrapped in Serial layer instances. | def _ensure_sublayers(layers):
"""Ensures that elements in a layer list are layers.
Args:
layers: A tuple or list whose elements can each be a layer, tuple, or list,
and so on recursively.
Returns:
An analogous collection of layers in which embedded layer lists are
wrapped in Serial layer instances.
"""
if not layers: # None or an empty list can signal a no-op.
return Serial(None) # no-op, but still handles shapes and initialization
elif isinstance(layers, (list, tuple)):
sublayers_not_lists = []
for layer in layers:
sublayers_not_lists.append(
Serial(layer) if isinstance(layer, (list, tuple)) else layer)
return sublayers_not_lists
else:
raise TypeError(type(layers)) |
Returns n inputs from stack. | def inputs_from_stack(stack, n):
"""Returns n inputs from stack."""
stack = _make_tuple(stack)
return _make_singleitem_or_original(stack[:n]) |
"Returns the new stack after removing n items and pushing outputs there. | def outputs_onto_stack(outputs, stack, n):
""""Returns the new stack after removing n items and pushing outputs there."""
outputs = _make_tuple(outputs)
stack = _make_tuple(stack)
return _make_singleitem_or_original(outputs + stack[n:]) |
Returns a tuple from a list, a tuple, or a single element. | def _make_tuple(xs):
"""Returns a tuple from a list, a tuple, or a single element."""
if isinstance(xs, (list, tuple)):
return tuple(xs)
else:
return (xs,) |
Returns a single element if possible, or the original list/tuple if not. | def _make_singleitem_or_original(xs):
"""Returns a single element if possible, or the original list/tuple if not."""
if isinstance(xs, (list, tuple)) and len(xs) == 1:
return xs[0]
else:
return xs |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.