response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Ensures that layers is a single flat list of Layer instances.
def _ensure_flat(layers): """Ensures that layers is a single flat list of Layer instances.""" if len(layers) == 1 and layers[0] is None: layers = () else: layers = _deep_flatten(layers) for obj in layers: if not isinstance(obj, base.Layer): raise ValueError( f'Found nonlayer object ({obj}) in layers: {layers}') return layers
Returns a simple division layer with n_in == 1 and n_out == 1.
def DivideBy(val): # pylint: disable=invalid-name """Returns a simple division layer with n_in == 1 and n_out == 1.""" return tl.Fn('DivideBy', lambda x: x / val)
Returns a simple const layer with n_in == 0 and n_out == 1.
def ReturnConst(val): # pylint: disable=invalid-name """Returns a simple const layer with n_in == 0 and n_out == 1.""" return tl.Fn('ReturnConst', lambda: val)
Checks if the input is smaller than certain value.
def SmallerThan(val): # pylint: disable=invalid-name """Checks if the input is smaller than certain value.""" return tl.Fn('SmallerThan', lambda x: x < val)
Converts layer outputs to a nested list, for easier equality testing. Args: outputs: A tensor or tuple/list of tensors coming from the forward application of a layer. Each tensor is NumPy ndarray-like, which complicates simple equality testing (e.g., via `assertEquals`): such tensors require equality testing to use either `all` (all elements match) or `any` (at least one element matches), which is not directly supported in absltest. Returns: A nested list structure containing all the output values, but now directly testable using `assertEquals`.
def as_list(outputs): """Converts layer outputs to a nested list, for easier equality testing. Args: outputs: A tensor or tuple/list of tensors coming from the forward application of a layer. Each tensor is NumPy ndarray-like, which complicates simple equality testing (e.g., via `assertEquals`): such tensors require equality testing to use either `all` (all elements match) or `any` (at least one element matches), which is not directly supported in absltest. Returns: A nested list structure containing all the output values, but now directly testable using `assertEquals`. """ if isinstance(outputs, (list, tuple)): return [as_list(y) for y in outputs] else: return outputs.tolist()
Helper for jnp.pad with 0s for single-axis case.
def _zero_pad(x, pad, axis): # pylint: disable = invalid-name """Helper for jnp.pad with 0s for single-axis case.""" pad_widths = [(0, 0)] * len(x.shape) pad_widths[axis] = pad # Padding on axis. return jnp.pad(x, pad_widths, mode='constant')
Prints the shapes of `n_in` inputs and returns then unchanged.
def PrintShape(n_in=1, msg=''): """Prints the shapes of `n_in` inputs and returns then unchanged.""" def Fwd(xs): def format_shape(x): # pylint: disable = invalid-name return str(x.shape) + f'[{x.dtype}]' if n_in > 1: shapes_and_dtypes = ', '.join([format_shape(x) for x in xs]) else: shapes_and_dtypes = format_shape(xs) info = f'PrintShape: {msg}: [{shapes_and_dtypes}]' print(info) logging.info(info) return xs return base.PureLayer(Fwd, n_in=n_in, n_out=n_in, name=f'PrintShape_{n_in}')
Returns a layer that combines one or more trailing axes of a tensor. Flattening keeps all the values of the input tensor, but reshapes it by collapsing one or more trailing axes into a single axis. For example, a `Flatten(n_axes_to_keep=2)` layer would map a tensor with shape `(2, 3, 5, 7, 11)` to the same values with shape `(2, 3, 385)`. Args: n_axes_to_keep: Number of leading axes to leave unchanged when reshaping; collapse only the axes after these.
def Flatten(n_axes_to_keep=1): """Returns a layer that combines one or more trailing axes of a tensor. Flattening keeps all the values of the input tensor, but reshapes it by collapsing one or more trailing axes into a single axis. For example, a `Flatten(n_axes_to_keep=2)` layer would map a tensor with shape `(2, 3, 5, 7, 11)` to the same values with shape `(2, 3, 385)`. Args: n_axes_to_keep: Number of leading axes to leave unchanged when reshaping; collapse only the axes after these. """ layer_name = f'Flatten_keep{n_axes_to_keep}' def f(x): # pylint: disable=invalid-name in_rank = len(x.shape) if in_rank <= n_axes_to_keep: raise ValueError(f'Input rank ({in_rank}) must exceed the number of ' f'axes to keep ({n_axes_to_keep}) after flattening.') shape = x.shape if isinstance(shape, tf.TensorShape): shape = tuple(shape.as_list()) return jnp.reshape(x, (shape[:n_axes_to_keep] + (-1,))) return Fn(layer_name, f)
Returns a layer that applies log softmax along one tensor axis. Note that the implementation actually computes x - LogSumExp(x), which is mathematically equal to LogSoftmax(x). `LogSoftmax` acts on a group of values and normalizes them to look like a set of log probability values. (Probability values must be non-negative, and as a set must sum to 1. A group of log probability values can be seen as the natural logarithm function applied to a set of probability values.) Args: axis: Axis along which values are grouped for computing log softmax.
def LogSoftmax(axis=-1): """Returns a layer that applies log softmax along one tensor axis. Note that the implementation actually computes x - LogSumExp(x), which is mathematically equal to LogSoftmax(x). `LogSoftmax` acts on a group of values and normalizes them to look like a set of log probability values. (Probability values must be non-negative, and as a set must sum to 1. A group of log probability values can be seen as the natural logarithm function applied to a set of probability values.) Args: axis: Axis along which values are grouped for computing log softmax. """ return Fn('LogSoftmax', lambda x: log_softmax(x, axis=axis))
Returns a layer that computes log(sum(exp(x))) along one tensor axis. Args: axis: Axis along which values are grouped for computing log-sum-exp.
def LogSumExp(axis=-1): """Returns a layer that computes log(sum(exp(x))) along one tensor axis. Args: axis: Axis along which values are grouped for computing log-sum-exp. """ return Fn('LogSumExp', lambda x: fastmath.logsumexp(x, axis=axis, keepdims=True))
Returns a layer that applies softmax along one tensor axis. `Softmax` acts on a group of values and normalizes them to look like a set of probability values. (Probability values must be non-negative, and as a set must sum to 1.) Args: axis: Axis along which values are grouped for computing softmax.
def Softmax(axis=-1): """Returns a layer that applies softmax along one tensor axis. `Softmax` acts on a group of values and normalizes them to look like a set of probability values. (Probability values must be non-negative, and as a set must sum to 1.) Args: axis: Axis along which values are grouped for computing softmax. """ return Fn('Softmax', lambda x: jnp.exp(log_softmax(x, axis=axis)))
Returns a layer that changes the dtype of a tensor to `float32`.
def ToFloat(): """Returns a layer that changes the dtype of a tensor to `float32`.""" return Fn('ToFloat', lambda x: x.astype(np.float32))
Returns a layer that computes mean values using one tensor axis. `Mean` uses one tensor axis to form groups of values and replaces each group with the mean value of that group. The resulting values can either remain in their own size 1 axis (`keepdims=True`), or that axis can be removed from the overall tensor (default `keepdims=False`), lowering the rank of the tensor by one. Args: axis: Axis along which values are grouped for computing a mean. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis.
def Mean(axis=-1, keepdims=False): """Returns a layer that computes mean values using one tensor axis. `Mean` uses one tensor axis to form groups of values and replaces each group with the mean value of that group. The resulting values can either remain in their own size 1 axis (`keepdims=True`), or that axis can be removed from the overall tensor (default `keepdims=False`), lowering the rank of the tensor by one. Args: axis: Axis along which values are grouped for computing a mean. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis. """ return Fn('Mean', lambda x: jnp.mean(x, axis=axis, keepdims=keepdims))
Returns a layer that applies min along one tensor axis. Args: axis: Axis along which values are grouped for computing minimum. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis.
def Min(axis=-1, keepdims=False): """Returns a layer that applies min along one tensor axis. Args: axis: Axis along which values are grouped for computing minimum. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis. """ return Fn('Min', lambda x: jnp.min(x, axis, keepdims=keepdims))
Returns a layer that applies max along one tensor axis. Args: axis: Axis along which values are grouped for computing maximum. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis.
def Max(axis=-1, keepdims=False): """Returns a layer that applies max along one tensor axis. Args: axis: Axis along which values are grouped for computing maximum. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis. """ return Fn('Max', lambda x: jnp.max(x, axis, keepdims=keepdims))
Returns a layer that computes sums using one tensor axis. `Sum` uses one tensor axis to form groups of values and replaces each group with the sum of that group. The resulting sum values can either remain in their own size 1 axis (`keepdims=True`), or that axis can be removed from the overall tensor (default `keepdims=False`), lowering the rank of the tensor by one. Args: axis: Axis along which values are grouped for computing a sum; if None, compute sum over all elements in tensor. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis.
def Sum(axis=None, keepdims=False): """Returns a layer that computes sums using one tensor axis. `Sum` uses one tensor axis to form groups of values and replaces each group with the sum of that group. The resulting sum values can either remain in their own size 1 axis (`keepdims=True`), or that axis can be removed from the overall tensor (default `keepdims=False`), lowering the rank of the tensor by one. Args: axis: Axis along which values are grouped for computing a sum; if None, compute sum over all elements in tensor. keepdims: If `True`, keep the resulting size 1 axis as a separate tensor axis; else, remove that axis. """ return Fn('Sum', lambda x: jnp.sum(x, axis=axis, keepdims=keepdims))
Returns a layer that thresholds inputs to yield outputs in {0, 1}.
def ThresholdToBinary(threshold=.5): """Returns a layer that thresholds inputs to yield outputs in {0, 1}.""" def f(model_output): # pylint: disable=invalid-name return (model_output > threshold).astype(jnp.int32) return Fn('ThresholdToBinary', f)
Returns a layer that calculates argmax along the given axis.
def ArgMax(axis=-1): """Returns a layer that calculates argmax along the given axis.""" def f(model_output): # pylint: disable=invalid-name return jnp.argmax(model_output, axis=axis) return Fn('ArgMax', f)
Returns a layer that computes the element-wise negation of a tensor.
def Negate(): """Returns a layer that computes the element-wise negation of a tensor.""" return Fn('Negate', lambda x: -x)
Returns an identity layer with a stop gradient.
def StopGradient(): """Returns an identity layer with a stop gradient.""" return Fn('StopGradient', lambda x: fastmath.stop_gradient(x))
Makes a one-hot array (n+1 dims) from an int-categorical array (n dims).
def one_hot(x, n_categories, dtype=jnp.float32): # pylint: disable=invalid-name """Makes a one-hot array (n+1 dims) from an int-categorical array (n dims).""" indices_less_than_n = jnp.arange(n_categories) return jnp.array(x[..., jnp.newaxis] == indices_less_than_n, dtype)
Transforms activation vectors to log-probability vectors. Log probability vectors are derived by, in effect, applying softmax to raw activation vectors and then applying log element-wise. The actual implementation uses a mathematically valid simplification of this. Args: x: An ndarray with activation vectors along the given axis. axis: Axis along which values are grouped for computing log softmax. Returns: An ndarray containing log-probability vectors derived from the raw activation vectors in `x`.
def log_softmax(x, axis=-1): # pylint: disable=invalid-name """Transforms activation vectors to log-probability vectors. Log probability vectors are derived by, in effect, applying softmax to raw activation vectors and then applying log element-wise. The actual implementation uses a mathematically valid simplification of this. Args: x: An ndarray with activation vectors along the given axis. axis: Axis along which values are grouped for computing log softmax. Returns: An ndarray containing log-probability vectors derived from the raw activation vectors in `x`. """ return x - fastmath.logsumexp(x, axis=axis, keepdims=True)
Returns `log N(x | mu, sigma)`. Args: x: <tbd> mu: <tbd> sigma: <tbd>
def log_gaussian_pdf(x, mu, sigma): # pylint: disable=invalid-name """Returns `log N(x | mu, sigma)`. Args: x: <tbd> mu: <tbd> sigma: <tbd> """ a = mu.shape[-1] * jnp.log(2 * jnp.pi) _, b = jnp.linalg.slogdet(sigma) y = jnp.linalg.solve(sigma, x - mu) y = jnp.expand_dims(y, axis=-1) xm = jnp.expand_dims(x - mu, axis=-2) c = jnp.matmul(xm, y) c = jnp.squeeze(jnp.squeeze(c, axis=-1), axis=-1) return -0.5 * (a + b + c)
Returns `log N(x | mu, eye(diag_sigma))`. Args: x: <tbd> mu: <tbd> diag_sigma: <tbd>
def log_gaussian_diag_pdf(x, mu, diag_sigma): # pylint: disable=invalid-name """Returns `log N(x | mu, eye(diag_sigma))`. Args: x: <tbd> mu: <tbd> diag_sigma: <tbd> """ a = mu.shape[-1] * jnp.log(2 * jnp.pi) b = jnp.sum(jnp.log(diag_sigma), axis=-1) y = x - mu / diag_sigma y = jnp.expand_dims(y, axis=-1) xm = jnp.expand_dims(x - mu, axis=-2) c = jnp.matmul(xm, y) c = jnp.squeeze(jnp.squeeze(c, axis=-1), axis=-1) return -0.5 * (a + b + c)
Returns a mixture of gaussians loss. Args: preds: <tbd> targets: <tbd> ngauss: <tbd>
def multigaussian_loss(preds, targets, ngauss=1): # pylint: disable=invalid-name """Returns a mixture of gaussians loss. Args: preds: <tbd> targets: <tbd> ngauss: <tbd> """ ndims = targets.shape[-1] logits = preds[:, :ngauss] mus = preds[:, ngauss:ngauss*(ndims + 1)] sigmas = preds[:, ngauss(ndims + 1):] sigmas = sigmas * sigmas + 1e-6 # Make positive. loglogits = logits - fastmath.logsumexp(logits, axis=-1, keepdims=True) mus = jnp.reshape(mus, [-1, ngauss, ndims]) sigmas = jnp.reshape(sigmas, [-1, ngauss, ndims]) targets = jnp.reshape(targets, [-1, 1, ndims]) glogprobs = log_gaussian_diag_pdf(targets, mus, sigmas) return fastmath.logsumexp(loglogits + glogprobs, axis=-1)
Returns a sample from a log-softmax output, with temperature. Args: log_probs: Logarithms of probabilities (often coming from LogSoftmax) temperature: For scaling before sampling (1.0 = default, 0.0 = pick argmax)
def logsoftmax_sample(log_probs, temperature=1.0): # pylint: disable=invalid-name """Returns a sample from a log-softmax output, with temperature. Args: log_probs: Logarithms of probabilities (often coming from LogSoftmax) temperature: For scaling before sampling (1.0 = default, 0.0 = pick argmax) """ # This is equivalent to sampling from a softmax with temperature. u = np.random.uniform(low=1e-6, high=1.0 - 1e-6, size=log_probs.shape) g = -np.log(-np.log(u)) return np.argmax(log_probs + g * temperature, axis=-1)
Get the fan-in and fan-out sizes for the given shape and dims.
def _GetFans(shape, out_dim=-1, in_dim=-2, nonreceptive_dims=None): """Get the fan-in and fan-out sizes for the given shape and dims.""" # Temporary fix until numpy.delete supports negative indices. if out_dim < 0: out_dim += len(shape) if in_dim < 0: in_dim += len(shape) if nonreceptive_dims is None: nonreceptive_dims = [] if not isinstance(nonreceptive_dims, (list, tuple)): nonreceptive_dims = [nonreceptive_dims] receptive_field = jnp.prod(np.delete(shape, [in_dim, out_dim, *nonreceptive_dims])) if len(shape) >= 2: fan_in, fan_out = shape[in_dim], shape[out_dim] elif len(shape) == 1: fan_in = shape[0] fan_out = shape[0] else: fan_in = 1. fan_out = 1. fan_in *= receptive_field fan_out *= receptive_field return fan_in, fan_out
Loads parameters from .npy file.
def InitializerFromFile(path): """Loads parameters from .npy file.""" def Initializer(shape, rng): del rng logging.info('Loading pretrained embeddings from %s', path) with tf.io.gfile.GFile(path, 'rb') as f: parameters = jnp.load(f) assert jnp.shape(parameters) == shape, ( 'Expected shape %s, got %s' % (shape, jnp.shape(parameters))) return parameters return Initializer
Make sure shape does not contain int tensors by calling int().
def _PureShape(shape): """Make sure shape does not contain int tensors by calling int().""" return [int(x) for x in shape]
Returns an initializer for random normal coefficients.
def RandomNormalInitializer(stddev=1e-2): """Returns an initializer for random normal coefficients.""" return lambda shape, rng: (stddev * random.normal( # pylint: disable=g-long-lambda rng, _PureShape(shape)).astype('float32'))
Returns an initializer for random uniform coefficients.
def RandomUniformInitializer(lim=1.0): """Returns an initializer for random uniform coefficients.""" # Make sure shape does not contain int tensors by calling int() below. return lambda shape, rng: random.uniform( # pylint: disable=g-long-lambda rng, _PureShape(shape), jnp.float32, -lim, lim)
Returns an initializer that adjusts its scale based on weight shapes.
def ScaledInitializer(out_dim, in_dim, scale, mode, distribution): """Returns an initializer that adjusts its scale based on weight shapes.""" if scale <= 0.: raise ValueError('scale must be positive float, {} given'.format(scale)) if mode not in {'fan_in', 'fan_out', 'fan_avg'}: raise ValueError( 'Invalid mode argument:, {}, must be either fan_in, fan_out or fan_avg' .format(mode)) def Init(shape, rng, nonreceptive_dims=None): """Returns random values for initializing weights of the given `shape`.""" shape = _PureShape(shape) fan_in, fan_out = _GetFans(shape, out_dim, in_dim, nonreceptive_dims) gain = scale if mode == 'fan_in': gain /= fan_in elif mode == 'fan_out': gain /= fan_out elif mode == 'fan_avg': gain /= (fan_in + fan_out) / 2 if distribution == 'truncated_normal': # constant from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.) stddev = jnp.sqrt(gain) / .87962566103423978 new_weights = random.truncated_normal(rng, -2, 2, shape) * stddev return new_weights.astype('float32') elif distribution == 'normal': new_weights = random.normal(rng, shape) * jnp.sqrt(gain) return new_weights.astype('float32') elif distribution == 'uniform': lim = jnp.sqrt(3. * gain) return random.uniform(rng, shape, jnp.float32, -lim, lim) else: raise ValueError('invalid distribution for ScaleInitializer') return Init
Returns an initializer for random Glorot-scaled coefficients.
def GlorotNormalInitializer(out_dim=-1, in_dim=-2, scale=1.): """Returns an initializer for random Glorot-scaled coefficients.""" return ScaledInitializer(out_dim, in_dim, scale, 'fan_avg', 'normal')
Returns an initializer for random uniform Glorot-scaled coefficients.
def GlorotUniformInitializer(out_dim=-1, in_dim=-2, scale=1.): """Returns an initializer for random uniform Glorot-scaled coefficients.""" return ScaledInitializer(out_dim, in_dim, scale, 'fan_avg', 'uniform')
Returns an initializer for random LeCun-scaled coefficients.
def LeCunNormalInitializer(out_dim=-1, in_dim=-2, scale=1.): """Returns an initializer for random LeCun-scaled coefficients.""" return ScaledInitializer(out_dim, in_dim, scale, 'fan_in', 'normal')
Returns an initializer for random uniform LeCun-scaled coefficients.
def LeCunUniformInitializer(out_dim=-1, in_dim=-2, scale=1.): """Returns an initializer for random uniform LeCun-scaled coefficients.""" return ScaledInitializer(out_dim, in_dim, scale, 'fan_in', 'uniform')
Returns an initializer for random Kaiming-scaled coefficients.
def KaimingNormalInitializer(out_dim=-1, in_dim=-2, param=0.): """Returns an initializer for random Kaiming-scaled coefficients.""" return ScaledInitializer( out_dim, in_dim, 2.0 / jnp.sqrt(1 + param**2), 'fan_in', 'normal')
Returns an initializer for random uniform Kaiming-scaled coefficients.
def KaimingUniformInitializer(out_dim=-1, in_dim=-2, param=0.): """Returns an initializer for random uniform Kaiming-scaled coefficients.""" return ScaledInitializer( out_dim, in_dim, 2.0 / jnp.sqrt(1 + param**2), 'fan_in', 'uniform')
Returns an orthogonal initializer.
def OrthogonalInitializer(stddev=1.0): """Returns an orthogonal initializer.""" def Init(shape, rng): """Returns orthogonalized random normal values with the given `shape`.""" # Have at least 2 elements in shape. cur_shape = list(shape) while len(cur_shape) < 2: cur_shape = [1] + cur_shape # Flatten the input shape with the last dimension remaining. n_rows = 1 for dim in cur_shape[:-1]: n_rows *= dim n_cols = cur_shape[-1] flat_shape = (n_cols, n_rows) if n_rows < n_cols else (n_rows, n_cols) # Generate a random matrix a = random.normal(rng, flat_shape, dtype=jnp.float32) # Compute the qr factorization q, r = jnp.linalg.qr(a) # Make Q uniform d = jnp.diag(r) q *= jnp.sign(d) # Transpose and reshape back q if needed. if n_rows < n_cols: q = jnp.transpose(q) q = jnp.reshape(q, shape) # Return scaled as requested. return stddev * q return Init
The standard init for Conv laters and Atari.
def AtariConvInit(kernel_shape, rng, dtype=jnp.float32): """The standard init for Conv laters and Atari.""" filter_height, filter_width, fan_in, _ = kernel_shape std = 1 / jnp.sqrt(fan_in * filter_height * filter_width) return random.uniform(rng, kernel_shape, dtype, minval=-std, maxval=std)
Returns a layer that computes category prediction accuracy. The layer takes two inputs: - A batch of activation vectors. The components in a given vector should be mappable to a probability distribution in the following loose sense: within a vector, a higher component value corresponds to a higher probability, such that argmax within a vector (``axis=-1``) picks the index (category) having the highest probablity. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`. The predicted category from each vector is the index of the highest-valued vector component. The layer returns the accuracy of these predictions averaged over the batch.
def CategoryAccuracy(): r"""Returns a layer that computes category prediction accuracy. The layer takes two inputs: - A batch of activation vectors. The components in a given vector should be mappable to a probability distribution in the following loose sense: within a vector, a higher component value corresponds to a higher probability, such that argmax within a vector (``axis=-1``) picks the index (category) having the highest probablity. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`. The predicted category from each vector is the index of the highest-valued vector component. The layer returns the accuracy of these predictions averaged over the batch. """ def f(model_output, targets): # pylint: disable=invalid-name predictions = jnp.argmax(model_output, axis=-1) shapes.assert_same_shape(predictions, targets) n_total = predictions.size n_correct = jnp.sum(jnp.equal(predictions, targets)) return n_correct / n_total return base.Fn('CategoryAccuracy', f)
Calculates the number of weights per core. In multi-device settings, gradients and losses are averaged over all devices. When loss is weighted and the number of weights can differ by device, e.g., when the weights represent the number of tokens in a batch of sentences (which can differ from device to device), we want to make sure each token on each device is weighted in the same way. This function ensures that by reporting the number of weights per core in multi-core settings (and simply np.sum(weights) in a single-core setting). Args: weights: tensor with arbitrary shape Returns: a scalar equal to np.sum(weights) in 1-machine settings and to the sum of weights over all cores divided by the number of cores otherwise
def _n_weights_per_core(weights): # pylint: disable=invalid-name """Calculates the number of weights per core. In multi-device settings, gradients and losses are averaged over all devices. When loss is weighted and the number of weights can differ by device, e.g., when the weights represent the number of tokens in a batch of sentences (which can differ from device to device), we want to make sure each token on each device is weighted in the same way. This function ensures that by reporting the number of weights per core in multi-core settings (and simply np.sum(weights) in a single-core setting). Args: weights: tensor with arbitrary shape Returns: a scalar equal to np.sum(weights) in 1-machine settings and to the sum of weights over all cores divided by the number of cores otherwise """ weights_sum = jnp.sum(weights) if fastmath.global_device_count() < 2: return weights_sum else: try: n_devices_total = fastmath.psum(1, 'batch') return fastmath.psum(weights_sum, 'batch') / n_devices_total except (NameError, ValueError): # running outside of pmap, e.g., on init return weights_sum
Replaces NaN values with zeros. A support function replaces NaN values with zeros to escape the undefined behavior of the division by zero. Args: x: tensor with arbitrary shape. Returns: Array with NaNs replaced with 0.
def _non_nan(x): # pylint: disable=invalid-name """Replaces NaN values with zeros. A support function replaces NaN values with zeros to escape the undefined behavior of the division by zero. Args: x: tensor with arbitrary shape. Returns: Array with NaNs replaced with 0. """ return jnp.where(jnp.isnan(x), 0., x)
Returns precision, recall, and intermediate values for the category `k`. A support function for calculating precision, recall, and intermediate values for the single category `k` for future use in metric layers. Args: predictions: predicted categories. targets: target categories. k: a category number. Returns a tuple: n_correct: a number of correct (or true) examples. n_k_predictions: a number of predictions of the `k` category. n_k_targets: a number of targets for the `k` category. precision: a precision score. recall: a recall score.
def _precision_recall(predictions, targets, k): # pylint: disable=invalid-name """Returns precision, recall, and intermediate values for the category `k`. A support function for calculating precision, recall, and intermediate values for the single category `k` for future use in metric layers. Args: predictions: predicted categories. targets: target categories. k: a category number. Returns a tuple: n_correct: a number of correct (or true) examples. n_k_predictions: a number of predictions of the `k` category. n_k_targets: a number of targets for the `k` category. precision: a precision score. recall: a recall score. """ n_correct = sum((predictions == k) & (targets == k)) n_k_predictions = sum(predictions == k) precision = _non_nan(n_correct / n_k_predictions) n_k_targets = sum(targets == k) recall = _non_nan(n_correct / n_k_targets) return (n_correct, n_k_predictions, n_k_targets, precision, recall)
Returns F-score. Args: precision: a precision score. recall: a recall score. beta2: a square of the parameter that determines the weight of recall. A support function to calculate F-score for the single category.
def _f_score(precision, recall, beta2): # pylint: disable=invalid-name """Returns F-score. Args: precision: a precision score. recall: a recall score. beta2: a square of the parameter that determines the weight of recall. A support function to calculate F-score for the single category. """ return _non_nan( (beta2 + 1) * (precision * recall) / ((beta2 * precision) + recall))
Returns a layer that computes a weighted category prediction accuracy. The layer takes three inputs: - A batch of activation vectors. The components in a given vector should be mappable to a probability distribution in the following loose sense: within a vector, a higher component value corresponds to a higher probability, such that argmax within a vector (``axis=-1``) picks the index (category) having the highest probablity. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. - A batch of weights, which matches or can be broadcast to match the shape of the target ndarray. This arg can give uneven weighting to different items in the batch (depending, for instance, on the item's target category). The predicted category from each vector is the index of the highest-valued vector component. The layer returns a weighted average accuracy of these predictions.
def WeightedCategoryAccuracy(): r"""Returns a layer that computes a weighted category prediction accuracy. The layer takes three inputs: - A batch of activation vectors. The components in a given vector should be mappable to a probability distribution in the following loose sense: within a vector, a higher component value corresponds to a higher probability, such that argmax within a vector (``axis=-1``) picks the index (category) having the highest probablity. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. - A batch of weights, which matches or can be broadcast to match the shape of the target ndarray. This arg can give uneven weighting to different items in the batch (depending, for instance, on the item's target category). The predicted category from each vector is the index of the highest-valued vector component. The layer returns a weighted average accuracy of these predictions. """ def f(model_output, targets, weights): # pylint: disable=invalid-name predictions = jnp.argmax(model_output, axis=-1) shapes.assert_same_shape(predictions, targets) ones_and_zeros = jnp.equal(predictions, targets) return jnp.sum(ones_and_zeros * weights) / _n_weights_per_core(weights) return base.Fn('WeightedCategoryAccuracy', f)
Returns a layer that computes cross-entropy from activations and integers. The layer takes two inputs: - A batch of activation vectors. The components in a given vector should be pre-softmax activations (mappable to a probability distribution via softmax). For performance reasons, the softmax and cross-entropy computations are combined inside the layer. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. To compute cross-entropy per batch item, the layer derives probability distributions: - from model output (vectors): :math:`\ q = \text{softmax}(v)` - from target categories (integers): :math:`\ p = \text{one_hot}(n)` or :math:`p = (1-\varepsilon)\cdot\text{one_hot}(n) + \frac{\varepsilon}{N}`, where :math:`\varepsilon` is the label smoothing factor. (The conversion of integer category targets to one-hot vectors amounts to assigning all the probability mass to the target category.) Cross-entropy per batch item is computed between the resulting distributions: .. math:: \text{cross_entropy} = - \sum_{i=0}^{N-1} p_i \log q_i The layer returns the average of these cross-entropy values over all items in the batch. Args: label_smoothing: Creates soft targets if provided. Must be between 0 and 1.
def CategoryCrossEntropy(label_smoothing=None): r"""Returns a layer that computes cross-entropy from activations and integers. The layer takes two inputs: - A batch of activation vectors. The components in a given vector should be pre-softmax activations (mappable to a probability distribution via softmax). For performance reasons, the softmax and cross-entropy computations are combined inside the layer. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. To compute cross-entropy per batch item, the layer derives probability distributions: - from model output (vectors): :math:`\ q = \text{softmax}(v)` - from target categories (integers): :math:`\ p = \text{one_hot}(n)` or :math:`p = (1-\varepsilon)\cdot\text{one_hot}(n) + \frac{\varepsilon}{N}`, where :math:`\varepsilon` is the label smoothing factor. (The conversion of integer category targets to one-hot vectors amounts to assigning all the probability mass to the target category.) Cross-entropy per batch item is computed between the resulting distributions: .. math:: \text{cross_entropy} = - \sum_{i=0}^{N-1} p_i \log q_i The layer returns the average of these cross-entropy values over all items in the batch. Args: label_smoothing: Creates soft targets if provided. Must be between 0 and 1. """ def f(model_output, targets): # pylint: disable=invalid-name cross_entropies = _category_cross_entropy( model_output, targets, label_smoothing, 0.0) return jnp.average(cross_entropies) return base.Fn('CategoryCrossEntropy', f)
Returns a layer like ``CategoryCrossEntropy``, with weights as third input. The layer takes three inputs: - A batch of activation vectors. The components in a given vector should be pre-softmax activations (mappable to a probability distribution via softmax). For performance reasons, the softmax and cross-entropy computations are combined inside the layer. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. - A batch of weights, which matches or can be broadcast to match the shape of the target ndarray. This arg can give uneven weighting to different items in the batch (depending, for instance, on the item's target category). The layer returns the weighted average of these cross-entropy values over all items in the batch. Args: label_smoothing: Creates soft targets if provided. Must be between 0 and 1. cutoff: Prevent loss lower than this cutoff (0.0 meaning none by default).
def WeightedCategoryCrossEntropy(label_smoothing=None, cutoff=0.0): r"""Returns a layer like ``CategoryCrossEntropy``, with weights as third input. The layer takes three inputs: - A batch of activation vectors. The components in a given vector should be pre-softmax activations (mappable to a probability distribution via softmax). For performance reasons, the softmax and cross-entropy computations are combined inside the layer. - A batch of target categories; each target is an integer in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. - A batch of weights, which matches or can be broadcast to match the shape of the target ndarray. This arg can give uneven weighting to different items in the batch (depending, for instance, on the item's target category). The layer returns the weighted average of these cross-entropy values over all items in the batch. Args: label_smoothing: Creates soft targets if provided. Must be between 0 and 1. cutoff: Prevent loss lower than this cutoff (0.0 meaning none by default). """ def f(model_output, targets, weights): # pylint: disable=invalid-name cross_entropies = _category_cross_entropy( model_output, targets, label_smoothing, cutoff) return jnp.sum(cross_entropies * weights) / _n_weights_per_core(weights) return base.Fn('WeightedCategoryCrossEntropy', f)
Returns a layer that computes cross-entropy for binary classification. The layer takes two inputs: - A batch of activation values; each batch item :math:`x` is a float in :math:`(-\infty, \infty)`. - A batch of binary targets; each target :math:`t` is an integer in :math:`\{0, 1\}`. The layer maps each activation value into the range :math:`(0, 1)`, interpreted as the model-predicted probability that item's category is 1: .. math:: q = \frac 1 {1 + e^{-x}} \ \ \text{[model-predicted probability]} and computes cross-entropy (per batch item) by treating the target category as having probability 1: .. math:: \text{cross_entropy} = \left\{ \begin{array}{cl} - \log q & \text{if}\ t = 1, \\ - \log (1 - q) & \text{if}\ t = 0. \end{array} \right. The layer returns the average of these cross-entropy values over all items in the batch.
def BinaryCrossEntropy(): r"""Returns a layer that computes cross-entropy for binary classification. The layer takes two inputs: - A batch of activation values; each batch item :math:`x` is a float in :math:`(-\infty, \infty)`. - A batch of binary targets; each target :math:`t` is an integer in :math:`\{0, 1\}`. The layer maps each activation value into the range :math:`(0, 1)`, interpreted as the model-predicted probability that item's category is 1: .. math:: q = \frac 1 {1 + e^{-x}} \ \ \text{[model-predicted probability]} and computes cross-entropy (per batch item) by treating the target category as having probability 1: .. math:: \text{cross_entropy} = \left\{ \begin{array}{cl} - \log q & \text{if}\ t = 1, \\ - \log (1 - q) & \text{if}\ t = 0. \end{array} \right. The layer returns the average of these cross-entropy values over all items in the batch. """ def f(model_output, targets): # pylint: disable=invalid-name probabilities = fastmath.expit(model_output) binary_entropies = - (targets * jnp.log(probabilities) + (1 - targets) * (jnp.log(1 - probabilities))) return jnp.average(binary_entropies) return base.Fn('BinaryCrossEntropy', f)
Returns a layer that computes sequence prediction accuracy with masking. This layer type is intended for variable length sequences, especially text, represented as a batch of fixed-length sequences via padding for unused positions. The layer takes three inputs: - A batch of sequences of activation vectors. The components in a given vector should be mappable to a probability distribution in the following loose sense: within a vector, a higher component value corresponds to a higher probability, such that argmax within a vector (``axis=-1``) picks the index having the highest probablity. In text modeling, the index represents a token id from a predetermined token vocabulary (or padding). - A batch of target integer sequences, with values in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. In text modeling, these sequences typically represent token ids from a predetermined token vocabulary (or padding). - A batch of weights/masks, which matches or can be broadcast to match the shape of the target ndarray. This arg is used to give weight 0 to padding positions, which masks those positions out of the calculation. Only the zero/non-zero distinction matters; all non-zero values are treated alike as signaling non-masked (i.e., valid/in-use) positions. The predicted integer value for each sequence position is the index of the highest-valued component of the position's vector. A predicted integer sequence is judged correct if it matches the target integer sequence in all non-zero-weighted positions. The layer returns the accuracy of predicted sequences averaged over the batch.
def MaskedSequenceAccuracy(): r"""Returns a layer that computes sequence prediction accuracy with masking. This layer type is intended for variable length sequences, especially text, represented as a batch of fixed-length sequences via padding for unused positions. The layer takes three inputs: - A batch of sequences of activation vectors. The components in a given vector should be mappable to a probability distribution in the following loose sense: within a vector, a higher component value corresponds to a higher probability, such that argmax within a vector (``axis=-1``) picks the index having the highest probablity. In text modeling, the index represents a token id from a predetermined token vocabulary (or padding). - A batch of target integer sequences, with values in :math:`\{0, ..., N-1\}`, where :math:`N` is the activation vector depth/dimensionality. In text modeling, these sequences typically represent token ids from a predetermined token vocabulary (or padding). - A batch of weights/masks, which matches or can be broadcast to match the shape of the target ndarray. This arg is used to give weight 0 to padding positions, which masks those positions out of the calculation. Only the zero/non-zero distinction matters; all non-zero values are treated alike as signaling non-masked (i.e., valid/in-use) positions. The predicted integer value for each sequence position is the index of the highest-valued component of the position's vector. A predicted integer sequence is judged correct if it matches the target integer sequence in all non-zero-weighted positions. The layer returns the accuracy of predicted sequences averaged over the batch. """ def f(model_output, targets, weights): # pylint: disable=invalid-name predictions = jnp.argmax(model_output, axis=-1) shapes.assert_same_shape(predictions, targets) position_is_padding = jnp.equal(weights, 0) position_is_accurate = jnp.logical_or(jnp.equal(predictions, targets), position_is_padding) sequence_is_accurate = jnp.all(position_is_accurate, axis=-1) return jnp.average(sequence_is_accurate) return base.Fn('MaskedSequenceAccuracy', f)
Returns a layer that computes mean category prediction accuracy. DEPRECATED; use ``WeightedCategoryAccuracy`` instead. Args: classifier: Layer that transforms activation vectors into category predictions.
def Accuracy(classifier=core.ArgMax()): """Returns a layer that computes mean category prediction accuracy. DEPRECATED; use ``WeightedCategoryAccuracy`` instead. Args: classifier: Layer that transforms activation vectors into category predictions. """ return cb.Serial(classifier, _Accuracy(), _WeightedMean(), name='Accuracy', sublayers_to_print=[])
Returns a layer that computes mean sequence prediction accuracy. DEPRECATED; use ``MaskedSequenceAccuracy`` instead. Args: classifier: Layer that transforms activation vectors into category predictions.
def SequenceAccuracy(classifier=core.ArgMax()): """Returns a layer that computes mean sequence prediction accuracy. DEPRECATED; use ``MaskedSequenceAccuracy`` instead. Args: classifier: Layer that transforms activation vectors into category predictions. """ return cb.Serial(classifier, _Accuracy(), _WeightedSequenceMean(), name='SequenceAccuracy', sublayers_to_print=[])
Returns a layer that outputs multiclass prediction-target cross-entropy. DEPRECATED; refactor to use ``WeightedCategoryCrossEntropy`` or ``CategoryCrossEntropy`` instead. (``CrossEntropyLoss`` by itself does not compute cross-entropy. In older code, this layer had to be preceded by ``LogSoftmax``, and the two layers together did the work of converting category information to probability distributions and computing the cross-entropy between those distributions. All this is now done by ``WeightedCategoryCrossEntropy``.)
def CrossEntropyLoss(): """Returns a layer that outputs multiclass prediction-target cross-entropy. DEPRECATED; refactor to use ``WeightedCategoryCrossEntropy`` or ``CategoryCrossEntropy`` instead. (``CrossEntropyLoss`` by itself does not compute cross-entropy. In older code, this layer had to be preceded by ``LogSoftmax``, and the two layers together did the work of converting category information to probability distributions and computing the cross-entropy between those distributions. All this is now done by ``WeightedCategoryCrossEntropy``.) """ return cb.Serial(_CrossEntropy(), _WeightedMean(), name='CrossEntropyLoss', sublayers_to_print=[])
Mean prediction-target cross-entropy for multiclass classification.
def CrossEntropyLossWithLogSoftmax(): """Mean prediction-target cross-entropy for multiclass classification.""" return cb.Serial(core.LogSoftmax(), _CrossEntropy(), _WeightedMean(), name='CrossEntropyLossWithLogSoftmax', sublayers_to_print=[])
Returns a layer that outputs binary prediction-target cross-entropy. DEPRECATED; refactor to use ``BinaryCrossEntropy`` instead. (The newer ``BinaryCrossEntropy`` does not use weights, so refactor accordingly. Unless and until clear motivating use cases arise, the library will not include a binary cross-entropy function with weights.)
def BinaryCrossEntropyLoss(): """Returns a layer that outputs binary prediction-target cross-entropy. DEPRECATED; refactor to use ``BinaryCrossEntropy`` instead. (The newer ``BinaryCrossEntropy`` does not use weights, so refactor accordingly. Unless and until clear motivating use cases arise, the library will not include a binary cross-entropy function with weights.) """ return cb.Serial(_BinaryCrossEntropy(), _WeightedMean(), name='BinaryCrossEntropyLoss', sublayers_to_print=[])
Returns a layer that computes an L2-like loss for one batch. The layer takes three inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. - A batch of weights, which matches the shape of the model output. The layer returns a weighted average of element-wise squared error terms :math:`(y_i - t_i)^2`.
def L2Loss(): r"""Returns a layer that computes an L2-like loss for one batch. The layer takes three inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. - A batch of weights, which matches the shape of the model output. The layer returns a weighted average of element-wise squared error terms :math:`(y_i - t_i)^2`. """ def f(model_output, targets, weights): # pylint: disable=invalid-name shapes.assert_same_shape(model_output, targets) shapes.assert_same_shape(model_output, weights) weighted_sse = weights * (model_output - targets)**2 return jnp.sum(weighted_sse) / jnp.sum(weights) return base.Fn('L2Loss', f)
Returns a layer that computes a weighted, smoothed L1 loss for one batch. The layer takes three inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. - A batch of weights, which matches the shape of the model output. The layer computes a "smooth" L1 loss (a.k.a. Huber loss), for model output float :math:`y_i` and target float :math:`t_i`: .. math:: \text{output} = \left\{ \begin{array}{cl} \frac 1 2 (y_i - t_i)^2, & \text{if}\ |y_i - t_i| < 1, \\ |y_i - t_i| - \frac 1 2, & \text{otherwise}. \end{array} \right. The layer returns a weighted average of these element-wise values.
def SmoothL1Loss(): r"""Returns a layer that computes a weighted, smoothed L1 loss for one batch. The layer takes three inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. - A batch of weights, which matches the shape of the model output. The layer computes a "smooth" L1 loss (a.k.a. Huber loss), for model output float :math:`y_i` and target float :math:`t_i`: .. math:: \text{output} = \left\{ \begin{array}{cl} \frac 1 2 (y_i - t_i)^2, & \text{if}\ |y_i - t_i| < 1, \\ |y_i - t_i| - \frac 1 2, & \text{otherwise}. \end{array} \right. The layer returns a weighted average of these element-wise values. """ def f(model_output, targets, weights): # pylint: disable=invalid-name shapes.assert_same_shape(model_output, targets) shapes.assert_same_shape(model_output, weights) l1_dist = jnp.abs(model_output - targets) smooth_dist = jnp.where(l1_dist < 1, 0.5 * l1_dist**2, l1_dist - 0.5) weighted_smooth_dist = weights * smooth_dist return jnp.sum(weighted_smooth_dist) / jnp.sum(weights) return base.Fn('SmoothL1Loss', f)
Returns a layer that computes a macro-averaged F-score. The macro-averaged F-score summarize how well the classifier's `k` predictions align with the observed/gold instances of `k`. It additionally cares about all the classes equally regardless of their size. Args: beta: a parameter that determines the weight of recall in the F-score. initial_category_index: an index of the initial category. The layer takes two inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. The layer returns an macro-averaged F-score across all the classes.
def MacroAveragedFScore(beta=1., initial_category_index=0): r"""Returns a layer that computes a macro-averaged F-score. The macro-averaged F-score summarize how well the classifier's `k` predictions align with the observed/gold instances of `k`. It additionally cares about all the classes equally regardless of their size. Args: beta: a parameter that determines the weight of recall in the F-score. initial_category_index: an index of the initial category. The layer takes two inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. The layer returns an macro-averaged F-score across all the classes. """ def f(model_output, targets): # pylint: disable=invalid-name beta2 = beta ** 2 predictions = jnp.argmax(model_output, axis=-1) n_categories = model_output.shape[-1] f_scores = jnp.empty(0) for k in range(initial_category_index, n_categories): _, _, _, precision, recall = _precision_recall(predictions, targets, k) f_scores = jnp.append(f_scores, _f_score(precision, recall, beta2)) return jnp.mean(f_scores) return base.Fn('MacroAveragedFScore', f)
Returns a layer that computes a weighted F-score. The weighted F-score summarize how well the classifier's `k` predictions align with the observed/gold instances of `k`. It additionally weights the summary by the number of observed/gold and predicted examples in each class. Args: beta: a parameter that determines the weight of recall in the F-score. initial_category_index: an index of the initial category. The layer takes two inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. The layer returns a weighted F-score across all the classes.
def WeightedFScore(beta=1., initial_category_index=0): """Returns a layer that computes a weighted F-score. The weighted F-score summarize how well the classifier's `k` predictions align with the observed/gold instances of `k`. It additionally weights the summary by the number of observed/gold and predicted examples in each class. Args: beta: a parameter that determines the weight of recall in the F-score. initial_category_index: an index of the initial category. The layer takes two inputs: - Model output from one batch, an ndarray of float-valued elements. - A batch of element-wise target values, which matches the shape of the model output. The layer returns a weighted F-score across all the classes. """ def f(model_output, targets): # pylint: disable=invalid-name beta2 = beta ** 2 predictions = jnp.argmax(model_output, axis=-1) n_categories = model_output.shape[-1] f_scores = jnp.empty(0) weights = jnp.empty(0) for k in range(initial_category_index, n_categories): _, _, n_k_targets, precision, recall = _precision_recall( predictions, targets, k) f_scores = jnp.append(f_scores, _f_score(precision, recall, beta2)) weights = jnp.append(weights, n_k_targets) return jnp.average(f_scores, weights=weights) return base.Fn('WeightedFScore', f)
Returns a layer that computes a weighted sum of the given values.
def WeightedSum(): """Returns a layer that computes a weighted sum of the given values.""" def f(values, weights): # pylint: disable=invalid-name return jnp.sum(values * weights) return base.Fn('WeightedSum', f)
Returns a layer that scores predicted versus target category.
def _Accuracy(): """Returns a layer that scores predicted versus target category.""" def f(predicted_category, target_category): # pylint: disable=invalid-name # TODO(pkozakowski): This assertion breaks some tests. Fix and uncomment. # shapes.assert_same_shape(predicted_category, target_category) return jnp.equal(predicted_category, target_category).astype(jnp.float32) return base.Fn('_Accuracy', f)
Returns a layer that computes prediction-target cross entropies.
def _CrossEntropy(): """Returns a layer that computes prediction-target cross entropies.""" def f(model_output, target_category): # pylint: disable=invalid-name # TODO(pkozakowski): This assertion breaks some tests. Fix and uncomment. # shapes.assert_shape_equals(target_category, model_output.shape[:-1]) target_distribution = core.one_hot(target_category, model_output.shape[-1]) return -1.0 * jnp.sum(model_output * target_distribution, axis=-1) return base.Fn('_CrossEntropy', f)
Returns a layer that computes prediction-target cross entropies.
def _BinaryCrossEntropy(): """Returns a layer that computes prediction-target cross entropies.""" def f(model_output, target_category): # pylint: disable=invalid-name shapes.assert_same_shape(model_output, target_category) batch_size = model_output.shape[0] j = jnp.dot(jnp.transpose(target_category), jnp.log(model_output)) j += jnp.dot(jnp.transpose(1 - target_category), jnp.log(1 - model_output)) j = -1.0/batch_size * jnp.squeeze(j) return j return base.Fn('_BinaryCrossEntropy', f)
Sum of prediction-target cross entropies for multiclass classification.
def CrossEntropySum(): """Sum of prediction-target cross entropies for multiclass classification.""" return cb.Serial(_CrossEntropy(), WeightedSum(), name='CrossEntropySum', sublayers_to_print=[])
Sum of prediction-target cross entropies for binary classification.
def BinaryCrossEntropySum(): """Sum of prediction-target cross entropies for binary classification.""" return cb.Serial(_BinaryCrossEntropy(), WeightedSum(), name='BinaryCrossEntropySum', sublayers_to_print=[])
Returns a layer that computes a weighted mean of the given values.
def _WeightedMean(): """Returns a layer that computes a weighted mean of the given values.""" def f(values, weights): # pylint: disable=invalid-name return jnp.sum(values * weights) / _n_weights_per_core(weights) return base.Fn('_WeightedMean', f)
Returns a layer that computes a weighted sequence accuracy mean.
def _WeightedSequenceMean(): """Returns a layer that computes a weighted sequence accuracy mean.""" def f(values, weights): # pylint: disable=invalid-name # This function assumes weights are 0 or 1. # Then compute 1: not-correct, 0: correct or masked not_correct = (1.0 - values) * weights axis_to_sum = list(range(1, len(not_correct.shape))) # Summing not-correct on all axes but batch. We're summing 0s and 1s, # so the sum is 0 if it's all 0 and >=1 in all other cases. not_correct_seq = jnp.sum(not_correct, axis=axis_to_sum) # Sequence is correct if not_correct_seq is 0, reverting here. correct_seq = 1.0 - jnp.minimum(1.0, not_correct_seq) return jnp.mean(correct_seq) # Mean over batch. return base.Fn('_WeightedSequenceMean', f)
Computes category cross entropy with label smoothing.
def _category_cross_entropy( # pylint: disable=invalid-name model_output, targets, label_smoothing, cutoff): """Computes category cross entropy with label smoothing.""" n_categories = model_output.shape[-1] target_distributions = core.one_hot(targets, n_categories) if label_smoothing: if label_smoothing < 0. or label_smoothing > 1.: raise ValueError( f'Arg label_smoothing ({label_smoothing}) must be between 0 and 1.') target_distributions *= (1. - label_smoothing) target_distributions += label_smoothing / n_categories model_log_distributions = core.log_softmax(model_output) cross_ent = - jnp.sum(target_distributions * model_log_distributions, axis=-1) if cutoff > 0.0: return jnp.maximum(cross_ent, cutoff) - cutoff else: return cross_ent
Reduces each multi-dimensional window to the max of the window's values. Windows, as specified by `pool_size` and `strides`, involve all axes of an n-dimensional array except the first and last: :math:`(d_1, ..., d_{n-2})` from shape :math:`(d_0, d_1, ..., d_{n-2}, d_{n-1})`. Args: pool_size: Shape of window that gets reduced to a single vector value. If the layer inputs are :math:`n`-dimensional arrays, then `pool_size` must be a tuple of length :math:`n-2`. strides: Offsets from the location of one window to the locations of neighboring windows along each axis. If specified, must be a tuple of the same length as `pool_size`. If None, then offsets of 1 along each window axis, :math:`(1, ..., 1)`, will be used. padding: 'VALID' or 'SAME'. If 'VALID', no padding is done, and only full windows get reduced; partial windows are discarded. If 'SAME', padding is added at array edges as needed to avoid partial windows but does not otherwise affect the selection of max values. Returns: N-dimensional array in which each valid (or padded-valid) window position in the input is reduced to / replaced by the max value from that window. An output array has the same number of dimensions as its input, but has fewer elements.
def MaxPool(pool_size=(2, 2), strides=None, padding='VALID'): """Reduces each multi-dimensional window to the max of the window's values. Windows, as specified by `pool_size` and `strides`, involve all axes of an n-dimensional array except the first and last: :math:`(d_1, ..., d_{n-2})` from shape :math:`(d_0, d_1, ..., d_{n-2}, d_{n-1})`. Args: pool_size: Shape of window that gets reduced to a single vector value. If the layer inputs are :math:`n`-dimensional arrays, then `pool_size` must be a tuple of length :math:`n-2`. strides: Offsets from the location of one window to the locations of neighboring windows along each axis. If specified, must be a tuple of the same length as `pool_size`. If None, then offsets of 1 along each window axis, :math:`(1, ..., 1)`, will be used. padding: 'VALID' or 'SAME'. If 'VALID', no padding is done, and only full windows get reduced; partial windows are discarded. If 'SAME', padding is added at array edges as needed to avoid partial windows but does not otherwise affect the selection of max values. Returns: N-dimensional array in which each valid (or padded-valid) window position in the input is reduced to / replaced by the max value from that window. An output array has the same number of dimensions as its input, but has fewer elements. """ layer_name = f'MaxPool{pool_size}'.replace(' ', '') def f(x): return fastmath.max_pool( x, pool_size=pool_size, strides=strides, padding=padding) return Fn(layer_name, f)
Reduces each multi-dimensional window to the sum of the window's values. Windows, as specified by `pool_size` and `strides`, involve all axes of an n-dimensional array except the first and last: :math:`(d_1, ..., d_{n-2})` from shape :math:`(d_0, d_1, ..., d_{n-2}, d_{n-1})`. Args: pool_size: Shape of window that gets reduced to a single vector value. If the layer inputs are :math:`n`-dimensional arrays, then `pool_size` must be a tuple of length :math:`n-2`. strides: Offsets from the location of one window to the locations of neighboring windows along each axis. If specified, must be a tuple of the same length as `pool_size`. If None, then offsets of 1 along each window axis, :math:`(1, ..., 1)`, will be used. padding: 'VALID' or 'SAME'. If 'VALID', no padding is done, and only full windows get reduced; partial windows are discarded. If 'SAME', padding is added at array edges as needed to avoid partial windows but does not otherwise affect the computation of sums. Returns: N-dimensional array in which each valid (or padded-valid) window position in the input is reduced to / replaced by the sum of values in that window. An output array has the same number of dimensions as its input, but has fewer elements.
def SumPool(pool_size=(2, 2), strides=None, padding='VALID'): """Reduces each multi-dimensional window to the sum of the window's values. Windows, as specified by `pool_size` and `strides`, involve all axes of an n-dimensional array except the first and last: :math:`(d_1, ..., d_{n-2})` from shape :math:`(d_0, d_1, ..., d_{n-2}, d_{n-1})`. Args: pool_size: Shape of window that gets reduced to a single vector value. If the layer inputs are :math:`n`-dimensional arrays, then `pool_size` must be a tuple of length :math:`n-2`. strides: Offsets from the location of one window to the locations of neighboring windows along each axis. If specified, must be a tuple of the same length as `pool_size`. If None, then offsets of 1 along each window axis, :math:`(1, ..., 1)`, will be used. padding: 'VALID' or 'SAME'. If 'VALID', no padding is done, and only full windows get reduced; partial windows are discarded. If 'SAME', padding is added at array edges as needed to avoid partial windows but does not otherwise affect the computation of sums. Returns: N-dimensional array in which each valid (or padded-valid) window position in the input is reduced to / replaced by the sum of values in that window. An output array has the same number of dimensions as its input, but has fewer elements. """ layer_name = f'SumPool{pool_size}'.replace(' ', '') def f(x): return fastmath.sum_pool( x, pool_size=pool_size, strides=strides, padding=padding) return Fn(layer_name, f)
Reduces each multi-dimensional window to the mean of the window's values. Windows, as specified by `pool_size` and `strides`, involve all axes of an n-dimensional array except the first and last: :math:`(d_1, ..., d_{n-2})` from shape :math:`(d_0, d_1, ..., d_{n-2}, d_{n-1})`. Args: pool_size: Shape of window that gets reduced to a single vector value. If the layer inputs are :math:`n`-dimensional arrays, then `pool_size` must be a tuple of length :math:`n-2`. strides: Offsets from the location of one window to the locations of neighboring windows along each axis. If specified, must be a tuple of the same length as `pool_size`. If None, then offsets of 1 along each window axis, :math:`(1, ..., 1)`, will be used. padding: 'VALID' or 'SAME'. If 'VALID', no padding is done, and only full windows get reduced; partial windows are discarded. If 'SAME', padding is added at array edges as needed but is not counted in the computation of averages. Returns: N-dimensional array in which each valid (or padded-valid) window position in the input is reduced to / replaced by the mean of values in that window. An output array has the same number of dimensions as its input, but has fewer elements.
def AvgPool(pool_size=(2, 2), strides=None, padding='VALID'): """Reduces each multi-dimensional window to the mean of the window's values. Windows, as specified by `pool_size` and `strides`, involve all axes of an n-dimensional array except the first and last: :math:`(d_1, ..., d_{n-2})` from shape :math:`(d_0, d_1, ..., d_{n-2}, d_{n-1})`. Args: pool_size: Shape of window that gets reduced to a single vector value. If the layer inputs are :math:`n`-dimensional arrays, then `pool_size` must be a tuple of length :math:`n-2`. strides: Offsets from the location of one window to the locations of neighboring windows along each axis. If specified, must be a tuple of the same length as `pool_size`. If None, then offsets of 1 along each window axis, :math:`(1, ..., 1)`, will be used. padding: 'VALID' or 'SAME'. If 'VALID', no padding is done, and only full windows get reduced; partial windows are discarded. If 'SAME', padding is added at array edges as needed but is not counted in the computation of averages. Returns: N-dimensional array in which each valid (or padded-valid) window position in the input is reduced to / replaced by the mean of values in that window. An output array has the same number of dimensions as its input, but has fewer elements. """ layer_name = f'AvgPool{pool_size}'.replace(' ', '') def f(x): return fastmath.avg_pool( x, pool_size=pool_size, strides=strides, padding=padding) return Fn(layer_name, f)
Create forward_and_or_backward for layers that don't define it.
def _forward_and_or_backward(layer): """Create forward_and_or_backward for layers that don't define it.""" def forward_and_or_backward(inputs, weights, state, rng, output_grad=None, compute_output=True, update_state=True): """Performs batched forward and/or backward passes. Args: inputs: inputs to the attention layer weights: weights for the attention layer state: state of the attention layer rng: PRNG key for the layer (shared across all examples and heads) output_grad: gradient of the loss wrt the output of the layer, or None. This function performs the backward pass iff `output_grad` is not None. compute_output: bool: whether to return the output of the forward pass (for example, a pure backwards pass does not need to return the output). update_state: bool: whether to return an updated layer state. Returns: A tuple (output, new_state, inputs_grad, weights_grad). - output is not None iff compute_output is True - new_state is not None iff update_state is True - inputs_grad & weights_grad are not None iff output_grad is not None """ # Calculate the vector-Jacobian product of the layer pure_fn. output, vjp_fn, new_state = fastmath.vjp( layer.pure_fn, inputs, weights, state, rng, has_aux=True) output = output if compute_output else None new_state = new_state if update_state else None # The vjp function returns gradients with respect to inputs and weights. if output_grad is not None: grads_inputs, grads_weights, _, _ = vjp_fn(output_grad) else: grads_inputs, grads_weights = None, None return (output, new_state, grads_inputs, grads_weights) return forward_and_or_backward
Makes zeros of shape like x but removing the length (axis 1).
def MakeZeroState(depth_multiplier=1): """Makes zeros of shape like x but removing the length (axis 1).""" def f(x): # pylint: disable=invalid-name if len(x.shape) != 3: raise ValueError(f'Layer input should be a rank 3 tensor representing' f' (batch_size, sequence_length, feature_depth); ' f'instead got shape {x.shape}.') return jnp.zeros((x.shape[0], depth_multiplier * x.shape[-1]), dtype=jnp.float32) return base.Fn('MakeZeroState', f)
LSTM running on axis 1. Args: n_units: `n_units` for the `LSTMCell`. mode: if 'predict' then we save the previous state for one-by-one inference. return_state: Boolean. Whether to return the latest status in addition to the output. Default: False. initial_state: Boolean. If the state RNN (c, h) is to be obtained from the stack. Default: False. Returns: A LSTM layer.
def LSTM(n_units, mode='train', return_state=False, initial_state=False): """LSTM running on axis 1. Args: n_units: `n_units` for the `LSTMCell`. mode: if 'predict' then we save the previous state for one-by-one inference. return_state: Boolean. Whether to return the latest status in addition to the output. Default: False. initial_state: Boolean. If the state RNN (c, h) is to be obtained from the stack. Default: False. Returns: A LSTM layer. """ if not initial_state: zero_state = MakeZeroState(depth_multiplier=2) # pylint: disable=no-value-for-parameter if return_state: return cb.Serial( cb.Branch([], zero_state), cb.Scan(LSTMCell(n_units=n_units), axis=1, mode=mode), name=f'LSTM_{n_units}', sublayers_to_print=[]) else: return cb.Serial( cb.Branch([], zero_state), # fill state RNN with zero. cb.Scan(LSTMCell(n_units=n_units), axis=1, mode=mode), cb.Select([0], n_in=2), # Drop RNN state. # Set the name to LSTM and don't print sublayers. name=f'LSTM_{n_units}', sublayers_to_print=[]) else: if return_state: return cb.Serial( cb.Scan(LSTMCell(n_units=n_units), axis=1, mode=mode), name=f'LSTM_{n_units}', sublayers_to_print=[]) else: return cb.Serial( cb.Scan(LSTMCell(n_units=n_units), axis=1, mode=mode), cb.Select([0], n_in=2), # Drop RNN state. name=f'LSTM_{n_units}', sublayers_to_print=[])
GRU running on axis 1.
def GRU(n_units, mode='train'): """GRU running on axis 1.""" zero_state = MakeZeroState(depth_multiplier=1) # pylint: disable=no-value-for-parameter return cb.Serial( cb.Branch([], zero_state), cb.Scan(GRUCell(n_units=n_units), axis=1, mode=mode), cb.Select([0], n_in=2), # Drop RNN state. # Set the name to GRU and don't print sublayers. name=f'GRU_{n_units}', sublayers_to_print=[] )
Builds a convolutional GRU. Paper: https://arxiv.org/abs/1511.06432. Args: n_units: Number of hidden units kernel_size: Kernel size for convolution Returns: A Stax model representing a GRU cell with convolution transforms.
def ConvGRUCell(n_units, kernel_size=(3, 3)): """Builds a convolutional GRU. Paper: https://arxiv.org/abs/1511.06432. Args: n_units: Number of hidden units kernel_size: Kernel size for convolution Returns: A Stax model representing a GRU cell with convolution transforms. """ def BuildConv(): return convolution.Conv( filters=n_units, kernel_size=kernel_size, padding='SAME') return GeneralGRUCell( candidate_transform=BuildConv, memory_transform_fn=None, gate_nonlinearity=activation_fns.Sigmoid, candidate_nonlinearity=activation_fns.Tanh)
Parametrized Gated Recurrent Unit (GRU) cell construction. GRU update equations for update gate, reset gate, candidate memory, and new state: .. math:: u_t &= \sigma(U' \times s_{t-1} + B') \\ r_t &= \sigma(U'' \times s_{t-1} + B'') \\ c_t &= \tanh(U \times (r_t \odot s_{t-1}) + B) \\ s_t &= u_t \odot s_{t-1} + (1 - u_t) \odot c_t See `combinators.Gate` for details on the gating function. Args: candidate_transform: Transform to apply inside the Candidate branch. Applied before nonlinearities. memory_transform_fn: Optional transformation on the memory before gating. gate_nonlinearity: Function to use as gate activation; allows trying alternatives to `Sigmoid`, such as `HardSigmoid`. candidate_nonlinearity: Nonlinearity to apply after candidate branch; allows trying alternatives to traditional `Tanh`, such as `HardTanh`. dropout_rate_c: Amount of dropout on the transform (c) gate. Dropout works best in a GRU when applied exclusively to this branch. sigmoid_bias: Constant to add before sigmoid gates. Generally want to start off with a positive bias. Returns: A model representing a GRU cell with specified transforms.
def GeneralGRUCell(candidate_transform, memory_transform_fn=None, gate_nonlinearity=activation_fns.Sigmoid, candidate_nonlinearity=activation_fns.Tanh, dropout_rate_c=0.1, sigmoid_bias=0.5): r"""Parametrized Gated Recurrent Unit (GRU) cell construction. GRU update equations for update gate, reset gate, candidate memory, and new state: .. math:: u_t &= \sigma(U' \times s_{t-1} + B') \\ r_t &= \sigma(U'' \times s_{t-1} + B'') \\ c_t &= \tanh(U \times (r_t \odot s_{t-1}) + B) \\ s_t &= u_t \odot s_{t-1} + (1 - u_t) \odot c_t See `combinators.Gate` for details on the gating function. Args: candidate_transform: Transform to apply inside the Candidate branch. Applied before nonlinearities. memory_transform_fn: Optional transformation on the memory before gating. gate_nonlinearity: Function to use as gate activation; allows trying alternatives to `Sigmoid`, such as `HardSigmoid`. candidate_nonlinearity: Nonlinearity to apply after candidate branch; allows trying alternatives to traditional `Tanh`, such as `HardTanh`. dropout_rate_c: Amount of dropout on the transform (c) gate. Dropout works best in a GRU when applied exclusively to this branch. sigmoid_bias: Constant to add before sigmoid gates. Generally want to start off with a positive bias. Returns: A model representing a GRU cell with specified transforms. """ gate_block = [ # u_t candidate_transform(), _AddSigmoidBias(sigmoid_bias), gate_nonlinearity(), ] reset_block = [ # r_t candidate_transform(), _AddSigmoidBias(sigmoid_bias), # Want bias to start positive. gate_nonlinearity(), ] candidate_block = [ cb.Dup(), reset_block, cb.Multiply(), # Gate S{t-1} with sigmoid(candidate_transform(S{t-1})) candidate_transform(), # Final projection + tanh to get Ct candidate_nonlinearity(), # Candidate gate # Only apply dropout on the C gate. Paper reports 0.1 as a good default. core.Dropout(rate=dropout_rate_c) ] memory_transform = memory_transform_fn() if memory_transform_fn else [] return cb.Serial( cb.Branch(memory_transform, gate_block, candidate_block), cb.Gate(), )
The inner (non-parallel) computation of an SRU.
def InnerSRUCell(): """The inner (non-parallel) computation of an SRU.""" def f(cur_x_times_one_minus_f, cur_f, cur_state): # pylint: disable=invalid-name res = cur_f * cur_state + cur_x_times_one_minus_f return res, res return base.Fn('InnerSRUCell', f, n_out=2)
The inner (non-parallel) computation of an SRU.
def ScanSRUCell(mode, monkey_patched_mask=None): """The inner (non-parallel) computation of an SRU.""" if monkey_patched_mask is None: return cb.Scan(InnerSRUCell(), axis=1, mode=mode) # This is necessary for Terraformer model. See comments there. # The mask will only be used in Terraformer in predict mode. assert mode == 'predict' def update_mask(mask, x_times_one_minus_f): # pylint: disable=invalid-name initial = jnp.ones(x_times_one_minus_f.shape[:2], dtype=jnp.float32) if initial.shape[1] > 1: updated_mask = fastmath.dynamic_update_slice_in_dim( initial != 0, mask != 0, 1, axis=1) else: updated_mask = initial return updated_mask, x_times_one_minus_f def masked_inner_sru_cell(cur_mask, cur_x_times_one_minus_f, cur_f, # pylint: disable=invalid-name cur_state): res = ((cur_f * cur_state + cur_x_times_one_minus_f) * cur_mask + (1 - cur_mask) * cur_state) return res, res return cb.Serial( monkey_patched_mask.get_layer(), base.Fn('update_mask', update_mask, n_out=2), cb.Scan(base.Fn('MaskedInnerSRUCell', masked_inner_sru_cell, n_out=2), axis=1, mode=mode), )
SRU (Simple Recurrent Unit) layer as in https://arxiv.org/abs/1709.02755. As defined in the paper: .. math:: y_t &= W x_t + B \quad \hbox{(include $B$ optionally)} \\ f_t &= \sigma(Wf x_t + bf) \\ r_t &= \sigma(Wr x_t + br) \\ c_t &= f_t \times c_{t-1} + (1 - f_t) \times y_t \\ h_t &= r_t \times \hbox{activation}(c_t) + (1 - r_t) \times x_t We assume the input is of shape [batch, length, depth] and recurrence happens on the length dimension. This returns a single layer. It's best to use at least 2, they say in the paper, except inside a Transformer. Args: n_units: output depth of the SRU layer. activation: Optional activation function. mode: if 'predict' then we save the previous state for one-by-one inference Returns: The SRU layer.
def SRU(n_units, activation=None, mode='train'): r"""SRU (Simple Recurrent Unit) layer as in https://arxiv.org/abs/1709.02755. As defined in the paper: .. math:: y_t &= W x_t + B \quad \hbox{(include $B$ optionally)} \\ f_t &= \sigma(Wf x_t + bf) \\ r_t &= \sigma(Wr x_t + br) \\ c_t &= f_t \times c_{t-1} + (1 - f_t) \times y_t \\ h_t &= r_t \times \hbox{activation}(c_t) + (1 - r_t) \times x_t We assume the input is of shape [batch, length, depth] and recurrence happens on the length dimension. This returns a single layer. It's best to use at least 2, they say in the paper, except inside a Transformer. Args: n_units: output depth of the SRU layer. activation: Optional activation function. mode: if 'predict' then we save the previous state for one-by-one inference Returns: The SRU layer. """ sigmoid_activation = activation_fns.Sigmoid() return cb.Serial( # x cb.Branch(core.Dense(3 * n_units), []), # r_f_y, x cb.Split(n_items=3), # r, f, y, x cb.Parallel(sigmoid_activation, sigmoid_activation), # r, f, y, x base.Fn('', lambda r, f, y: (y * (1.0 - f), f, r), # y * (1 - f), f, r, x n_out=3), cb.Parallel([], [], cb.Branch(MakeZeroState(), [])), ScanSRUCell(mode=mode), cb.Select([0], n_in=2), # act(c), r, x activation if activation is not None else [], base.Fn('FinalSRUGate', lambda c, r, x: c * r + x * (1 - r) * (3**0.5)), # Set the name to SRU and don't print sublayers. name=f'SRU_{n_units}', sublayers_to_print=[] )
Utility method for testing if eval mode is deterministic. Args: inp: input fed to the model. It can be a tensor, or a tuple of tensors. model_fn: function creating a model after calling with `mode` argument. message: Optional message to show when outputs of eval/predict mode don't match.
def test_eval_is_deterministic(inp, model_fn, message=''): """Utility method for testing if eval mode is deterministic. Args: inp: input fed to the model. It can be a tensor, or a tuple of tensors. model_fn: function creating a model after calling with `mode` argument. message: Optional message to show when outputs of eval/predict mode don't match. """ with fastmath.use_backend(fastmath.Backend.JAX): model_eval1 = model_fn(mode='eval') model_eval2 = model_fn(mode='eval') input_signature = shapes.signature(inp) model_eval1.init(input_signature) model_eval2.init(input_signature) model_eval1.save_to_file('/tmp/unique_weights') model_eval2.init_from_file('/tmp/unique_weights', weights_only=True, input_signature=input_signature) rng = fastmath.random.get_prng(0) output_eval1 = model_eval1(inp, rng=rng) if not isinstance(output_eval1, (tuple, list)): # We will automatically check each and every tensor returned. output_eval1 = [output_eval1] output_eval2 = model_eval2(inp, rng=rng) if not isinstance(output_eval2, (tuple, list)): # We will automatically check each and every tensor returned. output_eval2 = [output_eval2] np.testing.assert_equal(len(output_eval1), len(output_eval2)) for out1, out2 in zip(output_eval1, output_eval2): np.testing.assert_array_almost_equal( out1, out2, decimal=5, err_msg='Non-deterministic.{}'.format(message))
Utility method for testing equivalence of predict and eval modes. Args: inp: input fed to the model. It can be a tensor, or a tuple of tensors. model_fn: function creating a model after calling with `mode` argument. seq_axis: axis of sequence_length. In predict mode we iterate over this axis. By default `1`, which is 2nd dimension. seq_tensor: if `inp` is a tuple, `seq_tensor` is an index of an input tensor in this tuple on which we iterate the sequence. init_tokens: how many tokens should be passed to the first `predict` call. message: Optional message to show when outputs of eval/predict mode don't match.
def test_eval_equals_predict(inp, model_fn, seq_axis=1, seq_tensor=None, init_tokens=3, message=''): """Utility method for testing equivalence of predict and eval modes. Args: inp: input fed to the model. It can be a tensor, or a tuple of tensors. model_fn: function creating a model after calling with `mode` argument. seq_axis: axis of sequence_length. In predict mode we iterate over this axis. By default `1`, which is 2nd dimension. seq_tensor: if `inp` is a tuple, `seq_tensor` is an index of an input tensor in this tuple on which we iterate the sequence. init_tokens: how many tokens should be passed to the first `predict` call. message: Optional message to show when outputs of eval/predict mode don't match. """ with fastmath.use_backend(fastmath.Backend.JAX): model_eval = model_fn(mode='eval') model_predict = model_fn(mode='predict') input_signature = shapes.signature(inp) model_eval.init(input_signature) model_predict.init(input_signature) model_eval.save_to_file('/tmp/unique_weights') model_predict.init_from_file('/tmp/unique_weights', weights_only=True, input_signature=input_signature) rng = fastmath.random.get_prng(0) output_eval = model_eval(inp, rng=rng) if not isinstance(output_eval, (tuple, list)): # We will automatically check each and every tensor returned. output_eval = [output_eval] if seq_tensor is None: length = inp.shape[seq_axis] else: length = inp[seq_tensor].shape[seq_axis] assert length >= init_tokens + 2 # Required to properly test predict mode. indices_list = [(0, init_tokens)] + [(i, i+1) for i in range(init_tokens, length)] for indices in indices_list: start, end = indices if seq_tensor is None: new_inp = inp.take(indices=np.arange(start, end), axis=seq_axis) else: new_inp = list(inp) new_inp[seq_tensor] = new_inp[seq_tensor].take( indices=np.arange(start, end), axis=seq_axis) output_predict = model_predict(new_inp, rng=rng) if not isinstance(output_predict, (tuple, list)): # We will automatically check each and every tensor returned. output_predict = [output_predict] np.testing.assert_equal(len(output_predict), len(output_eval)) for outp, oute in zip(output_predict, output_eval): np.testing.assert_array_almost_equal( oute.take(indices=np.arange(start, end), axis=seq_axis), outp.take(indices=np.arange(0, end-start), axis=seq_axis), decimal=5, err_msg='Error on element {} out of {}.{}'.format(indices, length, message))
Utility method for testing equivalence of predict and eval modes. This function iterates over a list of dictionaries `confis`, and runs the test on models with each configuration. Args: inp: input fed to the model. It can be a tensor, or a tuple of tensors. model_fn: function creating a model after calling with `mode` argument. configs: List of dictionaries, which contain configs to be fed into `model_fn`. seq_axis: axis of sequence_length. In predict mode we iterate over this axis. By default `1`, which is 2nd dimension. seq_tensor: if `inp` is a tuple, `seq_tensor` is an index of an input tensor in this tuple on which we iterate the sequence. message: Optional message to show when outputs of eval/predict mode don't match.
def test_eval_equals_predict_configs(inp, model_fn, configs, seq_axis=1, seq_tensor=None, message=''): """Utility method for testing equivalence of predict and eval modes. This function iterates over a list of dictionaries `confis`, and runs the test on models with each configuration. Args: inp: input fed to the model. It can be a tensor, or a tuple of tensors. model_fn: function creating a model after calling with `mode` argument. configs: List of dictionaries, which contain configs to be fed into `model_fn`. seq_axis: axis of sequence_length. In predict mode we iterate over this axis. By default `1`, which is 2nd dimension. seq_tensor: if `inp` is a tuple, `seq_tensor` is an index of an input tensor in this tuple on which we iterate the sequence. message: Optional message to show when outputs of eval/predict mode don't match. """ for config in configs: model_fn_configured = functools.partial(model_fn, **config) test_eval_equals_predict(inp, model_fn_configured, seq_axis=seq_axis, seq_tensor=seq_tensor, message=' Config: {}.{}'.format(config, message))
Tests the equivalence of eval and predict modes for discrete models.
def test_eval_equals_predict_discrete( model_fn, vocab_size=10, length=5, batch_size=3 ): """Tests the equivalence of eval and predict modes for discrete models.""" with fastmath.use_backend(fastmath.Backend.JAX): model_slow = model_fn(mode='eval', vocab_size=vocab_size) model_fast = model_fn(mode='predict', vocab_size=vocab_size) rng = fastmath.random.get_prng(0) input_signature = shapes.ShapeDtype((batch_size, 1), np.int32) # Given the same rng, both models initialize with the same parameters. model_slow.init(input_signature, rng) model_fast.init(input_signature, rng) buf = np.zeros((batch_size, length), dtype=np.int32) next_sym = np.zeros((batch_size, 1), dtype=np.int32) for index in range(length): logits_slow = model_slow(buf, rng=rng) logits_fast = model_fast(next_sym, rng=rng) np.testing.assert_array_almost_equal( logits_slow[:, index, :], logits_fast[:, 0, :], decimal=5, ) next_sym = np.random.randint(vocab_size, size=(batch_size, 1)) buf[:, index] = next_sym[:, 0]
Hash vectors into buckets. Args: vecs: vectors to hash, a tensor of shape [batch_size, depth] n_buckets_in: an int or a list of ints, number of hash buckets; if it is a list, we do hierarchical hashing as specified by the list n_hashes: number of hashes rng: random generator to use for hashing Returns: A pair (buckets, n_buckets) where buckets is a tensor of shape [n_hashes, batch_size] of integers -- the hash bucket IDs, and n_buckets is an int, the total number of hash buckets, equal to the product of all items in n_buckets_in.
def hash_vecs(vecs, n_buckets_in, n_hashes, rng): """Hash vectors into buckets. Args: vecs: vectors to hash, a tensor of shape [batch_size, depth] n_buckets_in: an int or a list of ints, number of hash buckets; if it is a list, we do hierarchical hashing as specified by the list n_hashes: number of hashes rng: random generator to use for hashing Returns: A pair (buckets, n_buckets) where buckets is a tensor of shape [n_hashes, batch_size] of integers -- the hash bucket IDs, and n_buckets is an int, the total number of hash buckets, equal to the product of all items in n_buckets_in. """ # See https://arxiv.org/pdf/1509.02897.pdf # We sample a different random rotation for each round of hashing to # decrease the probability of hash misses. if isinstance(n_buckets_in, int): assert n_buckets_in % 2 == 0 rot_size = n_buckets_in n_buckets = n_buckets_in else: # Factorize the hash if n_buckets_in is a list or tuple rot_size, n_buckets = 0, 1 for factor in n_buckets_in: assert factor % 2 == 0 rot_size += factor n_buckets *= factor rotations_shape = (vecs.shape[-1], n_hashes, rot_size // 2) random_rotations = fastmath.random.normal(rng, rotations_shape).astype( np.float32) if fastmath.is_backend(fastmath.Backend.JAX): rotated_vecs = np.einsum('tf,fhb->htb', vecs, random_rotations) else: random_rotations = np.reshape(random_rotations, [-1, n_hashes * (rot_size // 2)]) rotated_vecs = np.dot(vecs, random_rotations) rotated_vecs = np.reshape(rotated_vecs, [-1, n_hashes, rot_size//2]) rotated_vecs = np.transpose(rotated_vecs, (1, 0, 2)) if isinstance(n_buckets_in, int) or len(n_buckets_in) == 1: rotated_vecs = np.concatenate([rotated_vecs, -rotated_vecs], axis=-1) buckets = np.argmax(rotated_vecs, axis=-1).astype(np.int32) else: # Get the buckets for them and combine. buckets, cur_sum, cur_product = None, 0, 1 for factor in n_buckets_in: rv = rotated_vecs[..., cur_sum:cur_sum + (factor // 2)] cur_sum += factor // 2 rv = np.concatenate([rv, -rv], axis=-1) if buckets is None: buckets = np.argmax(rv, axis=-1).astype(np.int32) else: buckets += cur_product * np.argmax(rv, axis=-1).astype(np.int32) cur_product *= factor return buckets, n_buckets
Used to implement attention between consecutive chunks. Args: x: array of shape [n_chunks, chunk_len, ...] n_chunks_before: Number of previous chunks to attend to. n_chunks_after: Number of subsequent chunks to attend to. Returns: array of shape [n_chunks, N * chunk_len, ...], where N = (1 + n_chunks_before + n_chunks_after).
def look_adjacent(x, n_chunks_before, n_chunks_after): """Used to implement attention between consecutive chunks. Args: x: array of shape [n_chunks, chunk_len, ...] n_chunks_before: Number of previous chunks to attend to. n_chunks_after: Number of subsequent chunks to attend to. Returns: array of shape [n_chunks, N * chunk_len, ...], where N = (1 + n_chunks_before + n_chunks_after). """ if n_chunks_before == 0 and n_chunks_after == 0: return x slices = [] for i in range(-n_chunks_before, n_chunks_after + 1): if i == 0: slices.append(x) else: slices.append(np.concatenate([x[i:, ...], x[:i, ...]], axis=0)) return np.concatenate(slices, axis=1)
Performs masking for self-attention.
def mask_self_attention( dots, q_info, kv_info, causal=True, exclude_self=True, masked=False): """Performs masking for self-attention.""" q_info = q_info.astype(np.float32) kv_info = kv_info.astype(np.float32) if causal: mask = fastmath.lt(q_info, kv_info) dots = dots - 1e9 * mask if exclude_self: mask = np.equal(q_info, kv_info) dots = dots - 1e5 * mask if masked: zeros_like_kv_info = np.zeros_like(kv_info) mask = fastmath.lt(kv_info, zeros_like_kv_info).astype(np.float32) dots = dots - 1e9 * mask return dots
Dot-product attention, with optional chunking and/or masking. Args: q: Query vectors, shape [q_len, d_qk] k: Key vectors, shape [kv_len, d_qk]; or None v: Value vectors, shape [kv_len, d_v] q_chunk_len: Set to non-zero to enable chunking for query vectors kv_chunk_len: Set to non-zero to enable chunking for key/value vectors n_chunks_before: Number of adjacent previous chunks to attend to n_chunks_after: Number of adjacent subsequent chunks to attend to mask_fn: TODO(kitaev) doc q_info: Query-associated metadata for masking kv_info: Key-associated metadata for masking dropout: Dropout rate rng: RNG for dropout Returns: A tuple (output, dots_logsumexp). The output has shape [q_len, d_v], and dots_logsumexp has shape [q_len]. The logsumexp of the attention probabilities is useful for combining multiple rounds of attention (as in LSH attention).
def attend( q, k=None, v=None, q_chunk_len=None, kv_chunk_len=None, n_chunks_before=0, n_chunks_after=0, mask_fn=None, q_info=None, kv_info=None, dropout=0.0, rng=None, ): """Dot-product attention, with optional chunking and/or masking. Args: q: Query vectors, shape [q_len, d_qk] k: Key vectors, shape [kv_len, d_qk]; or None v: Value vectors, shape [kv_len, d_v] q_chunk_len: Set to non-zero to enable chunking for query vectors kv_chunk_len: Set to non-zero to enable chunking for key/value vectors n_chunks_before: Number of adjacent previous chunks to attend to n_chunks_after: Number of adjacent subsequent chunks to attend to mask_fn: TODO(kitaev) doc q_info: Query-associated metadata for masking kv_info: Key-associated metadata for masking dropout: Dropout rate rng: RNG for dropout Returns: A tuple (output, dots_logsumexp). The output has shape [q_len, d_v], and dots_logsumexp has shape [q_len]. The logsumexp of the attention probabilities is useful for combining multiple rounds of attention (as in LSH attention). """ assert v is not None share_qk = (k is None) # `q_info` and `kv_info` if supplied are 0 indexed, we want them to be 1 # indexed instead so that we can mask position 0 as well - see Github #820 if q_info is None: q_info = np.arange(1, q.shape[-2] + 1, dtype=np.int32) else: q_info += 1 if kv_info is None and not share_qk: kv_info = np.arange(1, v.shape[-2] + 1, dtype=np.int32) elif kv_info is not None: kv_info += 1 # Split q/k/v into chunks along the time axis, if desired. if q_chunk_len is not None: q = np.reshape(q, (-1, q_chunk_len, q.shape[-1])) q_info = np.reshape(q_info, (-1, q_chunk_len)) if share_qk: assert kv_chunk_len is None or kv_chunk_len == q_chunk_len k = q kv_chunk_len = q_chunk_len if kv_info is None: kv_info = q_info elif kv_chunk_len is not None: # kv_info is not None, but reshape as required. kv_info = np.reshape(kv_info, (-1, kv_chunk_len)) elif kv_chunk_len is not None: k = np.reshape(k, (-1, kv_chunk_len, k.shape[-1])) kv_info = np.reshape(kv_info, (-1, kv_chunk_len)) if kv_chunk_len is not None: v = np.reshape(v, (-1, kv_chunk_len, v.shape[-1])) if share_qk: k = length_normalized(k) k = k / np.sqrt(k.shape[-1]) # Optionally include adjacent chunks. if q_chunk_len is not None or kv_chunk_len is not None: assert q_chunk_len is not None and kv_chunk_len is not None else: assert n_chunks_before == 0 and n_chunks_after == 0 k = look_adjacent(k, n_chunks_before, n_chunks_after) v = look_adjacent(v, n_chunks_before, n_chunks_after) kv_info = look_adjacent(kv_info, n_chunks_before, n_chunks_after) # Dot-product attention. dots = np.matmul(q, np.swapaxes(k, -1, -2)) # Masking if mask_fn is not None: dots = mask_fn(dots, q_info[..., :, None], kv_info[..., None, :]) # Softmax. dots_logsumexp = fastmath.logsumexp(dots, axis=-1, keepdims=True) dots = np.exp(dots - dots_logsumexp) if dropout > 0.0: assert rng is not None # Dropout is broadcast across the bin dimension dropout_shape = (dots.shape[-2], dots.shape[-1]) # TODO(kitaev): verify that tie-in is safe to remove (in light of jax fix) keep_prob = 1.0 - dropout keep = fastmath.random.bernoulli(rng, keep_prob, dropout_shape) multiplier = keep.astype(dots.dtype) / keep_prob dots = dots * multiplier # The softmax normalizer (dots_logsumexp) is used by multi-round LSH attn. out = np.matmul(dots, v) out = np.reshape(out, (-1, out.shape[-1])) dots_logsumexp = np.reshape(dots_logsumexp, (-1,)) return out, dots_logsumexp
Apply dropout, broadcasted across all but the last dimension of `vecs`.
def apply_broadcasted_dropout(vecs, dropout_rate, rng): """Apply dropout, broadcasted across all but the last dimension of `vecs`.""" if dropout_rate > 0.0: assert rng is not None keep_prob = 1.0 - dropout_rate keep = fastmath.random.bernoulli(rng, keep_prob, (vecs.shape[-1],)) multiplier = keep.astype(vecs.dtype) / keep_prob return vecs * multiplier else: return vecs
Permutation helper for LSH attention.
def permute_via_gather(val, permutation, inverse_permutation, axis=0): """Permutation helper for LSH attention.""" def permute_impl(p, unused_ip, val): return np.take(val, p, axis=axis) def permute_fwd(p, ip, val): return np.take(val, p, axis=axis), ip def permute_bwd(ip, permuted_grad): # JAX autodiff would synthesize a scatter operation because it doesn't # know that the indices are a permutation. However on TPU, gathers are # faster than scatters (at least in the regime the LSH attention uses). return (None, None, np.take(permuted_grad, ip, axis=axis)) permute = fastmath.custom_vjp(permute_impl, permute_fwd, permute_bwd) return permute(permutation, inverse_permutation, val)
Permutation helper for LSH attention.
def permute_via_sort(val, keys, inverse_keys, axis=0): """Permutation helper for LSH attention.""" def permute_impl(k, unused_ik, val): # On TPU, sorting scalars by key is faster than a gather. _, permuted = fastmath.sort_key_val(k, val, dimension=axis) return permuted def permute_fwd(k, ik, val): # On TPU, sorting scalars by key is faster than a gather. _, permuted = fastmath.sort_key_val(k, val, dimension=axis) return permuted, ik def permute_bwd(ik, permuted_grad): _, val_grad = fastmath.sort_key_val( ik, permuted_grad, dimension=axis) return (None, None, val_grad) permute = fastmath.custom_vjp(permute_impl, permute_fwd, permute_bwd) return permute(keys, inverse_keys, val)
Creates the QK and V activations from input.
def _ProjectAndSplitHeads( # pylint: disable=invalid-name d_model, n_heads, use_bias, num_weights=2, sparsity=16, length_kernel_size=3, weights_format='sparse', rotary_position_emb=False, mode='train'): """Creates the QK and V activations from input.""" # There can be either two or three weights: # two - qk and v or three - q, k, v # If there are three, we want to average q and k and use that. # Weights can also be in 'heads' major format - (n_heads, d_model, d_head) # this is used by efficient_attention.LSHSelfAttention and # efficient_attention.SelfAttention # Or they can be in 'model' major format - (d_model, d_model), which is what # tl._attention/CausalAttention etc use -- so use this format if we pretrain a # model trained with those and finetuning with PureLSHSelfAttention. assert weights_format in ('heads', 'model', 'sparse') # When an earlier model was trained with 3 separate weights for Q, K, V # projections with tl._attention/tl._causalAttention etc. if weights_format == 'model' and num_weights == 3: return cb.Serial( # Create the raw Q, K, V projections. cb.Branch( core.Dense(d_model, use_bias=use_bias), core.Dense(d_model, use_bias=use_bias), core.Dense(d_model, use_bias=use_bias)), # q, k, v # Optionally, rotate Q and K vectors if rotary embeddings are used. cb.Parallel(rotary_pe.Rotate(), rotary_pe.Rotate(), None) if rotary_position_emb else [], # Average Q and K into one single QK tensor. core.Fn('QKAvg', lambda x, y: (x + y) / 2.0, n_out=1), # qk, v # Split heads and combine with batch dimension to get two tensors of # (batch * n_heads, seq_len, d_head) shape. cb.Parallel( attention.SplitIntoHeads(n_heads), attention.SplitIntoHeads(n_heads)) # qk, v ) if weights_format == 'sparse' and num_weights == 3: d_module = d_model // sparsity # This layer matches sparsity.MultiplicativeConvCausalAttention, # see there for more explanation. # TODO(lukaszkaiser): unify code so that we don't duplicate so much. return cb.Serial( cb.Select([0, 0]), # duplicate activations sp.FactoredDense(sparsity, d_model, d_model), cb.Select([0, 0, 0]), # use for q, k, v cb.Parallel( [sp.LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3, length_kernel_size=length_kernel_size), attention.SplitIntoHeads(n_heads)], [sp.LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3, length_kernel_size=length_kernel_size), attention.SplitIntoHeads(n_heads)], [cb.Select([0], n_in=2), sp.LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=1, length_kernel_size=length_kernel_size), attention.SplitIntoHeads(n_heads)], ), core.Fn('QKAvg', lambda x, y: (x + y) / 2.0, n_out=1), ) if weights_format == 'sparse' and num_weights == 2: d_module = d_model // sparsity # This layer matches sparsity.MultiplicativeConvCausalAttention, # see there for more explanation. # TODO(lukaszkaiser): unify code so that we don't duplicate so much. return cb.Serial( cb.Select([0, 0]), # pre-qkv, pre-v-for-concat sp.FactoredDense(sparsity, d_model, d_model), # shared q k cb.Select([0, 0]), # pre-qk, pre-v, pre-v-for-concat sp.LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=3, length_kernel_size=length_kernel_size), attention.SplitIntoHeads(n_heads), cb.Parallel( [], [cb.Select([0], n_in=2), sp.LocallyConvDense(sparsity, d_module, mode=mode, kernel_size=1, length_kernel_size=length_kernel_size), attention.SplitIntoHeads(n_heads)], ) ) # We want to train from scratch and have only two weights, w_qk and w_v. if weights_format == 'model' and num_weights == 2: return cb.Branch( [ core.Dense(d_model, use_bias=use_bias), rotary_pe.Rotate() if rotary_position_emb else [], attention.SplitIntoHeads(n_heads) ], [ core.Dense(d_model, use_bias=use_bias), attention.SplitIntoHeads(n_heads) ], ) assert weights_format == 'head' raise NotImplementedError('TODO(afrozm): Implement this when we want to use ' 'checkpoints trained with LSHSelfAttention or ' 'SelfAttention')
Apply the threefry PRF to an array of inputs. This function is vectorized over x. For threefry_2x32: K = X = uint32[2] Args: key: uint32[2] the key of the PRF x: uint32[..., 2] the inputs Returns: y: uint32[..., 2] the outputs
def threefry_2x32_prf(key, x: jnp.ndarray) -> jnp.ndarray: """Apply the threefry PRF to an array of inputs. This function is vectorized over x. For threefry_2x32: K = X = uint32[2] Args: key: uint32[2] the key of the PRF x: uint32[..., 2] the inputs Returns: y: uint32[..., 2] the outputs """ if not (key.shape == (2,) and key.dtype == jnp.uint32): raise TypeError('key must be uint32[2]', key) if not (x.shape[-1:] == (2,) and x.dtype == jnp.uint32): raise TypeError('x must be uint32[..., 2]', x) # Threefry-2x32 expects this weird format: x_3f = jnp.moveaxis(x, source=-1, destination=0).flatten() y_3f = jex.random.threefry_2x32(key, x_3f) y = jnp.moveaxis( jnp.reshape(y_3f, (2,) + x.shape[:-1]), source=0, destination=-1) return y
Splits a key into a stream of random keys. This uses the little-endian counter mode. Args: key: uint32[2] the key to split lo: the range to start extracting from hi: the range to stop extracting from Returns: keys: uint32[hi - lo, 2] the split keys
def threefry_2x32_prange(key, lo: int = 0, hi: int = 2): """Splits a key into a stream of random keys. This uses the little-endian counter mode. Args: key: uint32[2] the key to split lo: the range to start extracting from hi: the range to stop extracting from Returns: keys: uint32[hi - lo, 2] the split keys """ if not (key.shape == (2,) and key.dtype == jnp.uint32): raise ValueError('key must be uint32[2]') if not hi < 2**32: # You shouldn't really be using more than half the key size anyways. raise NotImplementedError('only 32-bit sizes are supported') # Create a 64-bit counter: i_lo = jnp.arange(lo, hi, dtype=jnp.uint32) i_hi = jnp.zeros_like(i_lo) i = jnp.stack([i_lo, i_hi], axis=-1) return threefry_2x32_prf(key, i)
Relative attention wrapper. Args: d_feature: Last/innermost dimension of activations in the input to and output from this layer. n_heads: Number of attention heads. Attention heads effectively split activation vectors into ``n_heads`` subvectors, of size ``d_feature / n_heads``. dropout: dropout rate. max_inference_length: max inference length. mode: One of ``'train'``, ``'eval'``, or ``'predict'``. context_bias_layer: context bias layer. location_bias_layer: location bias layer. total_pooling: total pooling. Returns: relative attention layer. Relative attention wrapper for compatibility with configurable attention, so that it can be called by `ApplyAttentionLayer`.
def RelativeAttentionWrapper(d_feature, n_heads=1, dropout=0.0, max_inference_length=2048, mode='train', context_bias_layer=None, location_bias_layer=None, total_pooling=None): """Relative attention wrapper. Args: d_feature: Last/innermost dimension of activations in the input to and output from this layer. n_heads: Number of attention heads. Attention heads effectively split activation vectors into ``n_heads`` subvectors, of size ``d_feature / n_heads``. dropout: dropout rate. max_inference_length: max inference length. mode: One of ``'train'``, ``'eval'``, or ``'predict'``. context_bias_layer: context bias layer. location_bias_layer: location bias layer. total_pooling: total pooling. Returns: relative attention layer. Relative attention wrapper for compatibility with configurable attention, so that it can be called by `ApplyAttentionLayer`. """ del max_inference_length attention = RelativeAttentionLMLayer( d_feature, context_bias_layer, location_bias_layer, total_pooling, n_heads=n_heads, dropout=dropout, mode=mode) return cb.Serial(cb.Select([0, 0, 0]), attention)
Global relative attentions bias initialization shared across layers.
def get_rel_att_inputs(d_model, n_heads): """Global relative attentions bias initialization shared across layers.""" assert d_model % n_heads == 0 and d_model % 2 == 0 d_head = d_model // n_heads bias_initializer = init.RandomNormalInitializer(1e-6) context_bias_layer = core.Weights( bias_initializer, shape=(1, n_heads, 1, d_head)) location_bias_layer = core.Weights( bias_initializer, shape=(1, n_heads, 1, d_head)) return context_bias_layer, location_bias_layer
Returns a layer that maps (q, k, v, masks) to (activations, masks). When number of keys is smaller than number of queries layer works in O(q^2*d). Otherwise it is O(q*k*d). That is because we need to shift relative distances by current_pooling. When we upsample this is current pooling is a fraction < 1 Visual explanation: [01][23][45][67] -> [0][1][2][3][4][5][6][7] For token [0] we calculate relative distances as follows: * 0 2 4 6 However for token [1] we need relative distances changed by 1, specifically: * -1 1 3 5 So we not only need to calculate the distances that corresponds to spacing between the keys but also for the ones in between because there are more than one query tokens (on different positions which means different relative distances) for single key token. Args: d_feature: Depth/dimensionality of feature embedding. context_bias_layer: Global context bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers location_bias_layer: Global location bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers. total_kv_pooling: Accumulated pool size of keys/values used at this layer separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for internal dropout applied to attention activations (based on query-key pairs) before dotting them with values. mode: One of `'train'`, `'eval'`, or `'predict'`.
def RelativeAttentionLayer(d_feature, context_bias_layer, location_bias_layer, total_kv_pooling, separate_cls, n_heads=1, dropout=0.0, mode='train'): """Returns a layer that maps (q, k, v, masks) to (activations, masks). When number of keys is smaller than number of queries layer works in O(q^2*d). Otherwise it is O(q*k*d). That is because we need to shift relative distances by current_pooling. When we upsample this is current pooling is a fraction < 1 Visual explanation: [01][23][45][67] -> [0][1][2][3][4][5][6][7] For token [0] we calculate relative distances as follows: * 0 2 4 6 However for token [1] we need relative distances changed by 1, specifically: * -1 1 3 5 So we not only need to calculate the distances that corresponds to spacing between the keys but also for the ones in between because there are more than one query tokens (on different positions which means different relative distances) for single key token. Args: d_feature: Depth/dimensionality of feature embedding. context_bias_layer: Global context bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers location_bias_layer: Global location bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers. total_kv_pooling: Accumulated pool size of keys/values used at this layer separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for internal dropout applied to attention activations (based on query-key pairs) before dotting them with values. mode: One of `'train'`, `'eval'`, or `'predict'`. """ return cb.Serial( cb.Branch( PositionalEmbeddings(d_feature, separate_cls, total_kv_pooling), cb.Select([0]), cb.Select([1])), cb.Parallel( core.Dense(d_feature), core.Dense(d_feature), core.Dense(d_feature), core.Dense(d_feature), ), context_bias_layer, location_bias_layer, RelativeAttention( # pylint: disable=no-value-for-parameter separate_cls=separate_cls, n_heads=n_heads, dropout=dropout, mode=mode), core.Dense(d_feature), )
Returns a layer that maps (q, k, v) to (activations). Same as standard Relative attention layer but additionally based on sizes of queries and keys prepares a mask that masks out the future. Masking the future is the concept primarily used for Language Modelling. Args: d_feature: Depth/dimensionality of feature embedding. context_bias_layer: Global context bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers location_bias_layer: Global location bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers. total_kv_pooling: Accumulated pool size of keys/values used at this layer. separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for internal dropout applied to attention activations (based on query-key pairs) before dotting them with values. mode: One of `'train'`, `'eval'`, or `'predict'`.
def RelativeAttentionLMLayer(d_feature, context_bias_layer, location_bias_layer, total_kv_pooling, separate_cls=False, n_heads=1, dropout=0.0, mode='train'): """Returns a layer that maps (q, k, v) to (activations). Same as standard Relative attention layer but additionally based on sizes of queries and keys prepares a mask that masks out the future. Masking the future is the concept primarily used for Language Modelling. Args: d_feature: Depth/dimensionality of feature embedding. context_bias_layer: Global context bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers location_bias_layer: Global location bias from Transformer XL's attention. There should be one such layer shared for all relative attention layers. total_kv_pooling: Accumulated pool size of keys/values used at this layer. separate_cls: True/False if we separate_cls in calculations. n_heads: Number of attention heads. dropout: Probabilistic rate for internal dropout applied to attention activations (based on query-key pairs) before dotting them with values. mode: One of `'train'`, `'eval'`, or `'predict'`. """ attention = RelativeAttentionLayer( d_feature, context_bias_layer, location_bias_layer, total_kv_pooling, separate_cls, n_heads=n_heads, dropout=dropout, mode=mode) return cb.Serial( CreateAttentionMaskLayer(), # q, k, v, mask attention, # vecs, mask cb.Select([0], n_in=2), # vecs )
Computes new activations via masked attention-weighted sum of values. Args: queries: Per-head activations representing attention queries. keys: Per-head activations representing attention keys. values: Per-head activations to be combined by computed attention weights. pos_emb: Per-head activations representing positional embeddings. context_bias: Global context bias from Transformer XL's attention. location_bias: Global location bias from Transformer XL's attention. mask: Mask that distinguishes positions with real content vs. padding. separate_cls: True/False if we separate_cls in calculations. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`. rng: Single-use random number generator (JAX PRNG key). Returns: Per-head activations resulting from masked per-head attention-weighted sum of per-head values. This function is the core of the attention mechanism. It: - computes per-head attention weights from per-head `queries` and `keys`, - applies `mask` to screen out positions that come from padding tokens, - optionally applies dropout to attention weights, and - uses attention weights to combine per-head `values` vectors.
def DotProductAttention(queries, keys, values, pos_emb, context_bias, location_bias, mask, separate_cls, dropout, mode, rng): """Computes new activations via masked attention-weighted sum of values. Args: queries: Per-head activations representing attention queries. keys: Per-head activations representing attention keys. values: Per-head activations to be combined by computed attention weights. pos_emb: Per-head activations representing positional embeddings. context_bias: Global context bias from Transformer XL's attention. location_bias: Global location bias from Transformer XL's attention. mask: Mask that distinguishes positions with real content vs. padding. separate_cls: True/False if we separate_cls in calculations. dropout: Probabilistic rate for dropout applied to attention strengths (based on query-key pairs) before applying them to values. mode: One of `'train'`, `'eval'`, or `'predict'`. rng: Single-use random number generator (JAX PRNG key). Returns: Per-head activations resulting from masked per-head attention-weighted sum of per-head values. This function is the core of the attention mechanism. It: - computes per-head attention weights from per-head `queries` and `keys`, - applies `mask` to screen out positions that come from padding tokens, - optionally applies dropout to attention weights, and - uses attention weights to combine per-head `values` vectors. """ d_feature = queries.shape[-1] keys_len, queries_len = keys.shape[-2], queries.shape[-2] funnel_factor, is_upsampling = calc_funnel_ratio(keys_len, queries_len) ac = jnp.einsum('bnid,bnjd->bnij', queries + context_bias, keys) bd = jnp.einsum('bnid,jnd->bnij', queries + location_bias, pos_emb) bd = _fast_matrix_shift(bd, funnel_factor, is_upsampling) if separate_cls: # Masking out location part of attention for cls token bd = bd.at[:, :, :, 0].set(0) bd = bd.at[:, :, 0, :].set(0) dots = (ac + bd) / jnp.sqrt(d_feature) if mask is not None: dots = jnp.where(mask, dots, jnp.full_like(dots, -1e9)) # Softmax. dots = jnp.exp(dots - fastmath.logsumexp(dots, axis=-1, keepdims=True)) if dropout >= 1.0: raise ValueError('Dropout rates must be lower than 1.') if dropout is not None and dropout > 0.0 and mode == 'train': keep = fastmath.random.bernoulli(rng, 1.0 - dropout, dots.shape) dots = jnp.where(keep, dots / (1.0 - dropout), jnp.zeros_like(dots)) out = jnp.matmul(dots, values) out = out.astype(jnp.float32) dots = dots.astype(jnp.float32) return out, dots
Positional embeddings. Args: d_feature: Depth/dimensionality of feature embedding. separate_cls: True/False if we separate_cls in calculations. total_kv_pooling: Accumulated pool size of keys/values until this layer. Returns: a layer that based on queries, keys and accumulated pool size of keys/values until this layer calculates sinusoidal positional embeddings for relative attention calculations.
def PositionalEmbeddings(d_feature, separate_cls, total_kv_pooling): """Positional embeddings. Args: d_feature: Depth/dimensionality of feature embedding. separate_cls: True/False if we separate_cls in calculations. total_kv_pooling: Accumulated pool size of keys/values until this layer. Returns: a layer that based on queries, keys and accumulated pool size of keys/values until this layer calculates sinusoidal positional embeddings for relative attention calculations. """ def PositionsVectors(queries, keys): assert not separate_cls keys_len, queries_len = keys.shape[-2], queries.shape[-2] funnel_factor, is_upsampling = calc_funnel_ratio(keys_len, queries_len) if funnel_factor == 1: offset = keys_len - 1 positions = (jnp.arange(keys_len) - offset) * total_kv_pooling else: if is_upsampling: positions = jnp.arange(-queries_len + 1, queries_len, 1.0) else: positions = jnp.arange(-keys_len + 1, keys_len, 1.0) * total_kv_pooling return positions def Sinusoidal_Embeddings(positions): inv_freq = 1 / (10000**(jnp.arange(0.0, d_feature, 2.0) / d_feature)) sinusoid_freq = jnp.einsum('i,j->ij', positions, inv_freq) pos_emb = jnp.concatenate( [jnp.sin(sinusoid_freq), jnp.cos(sinusoid_freq)], axis=1) return pos_emb return cb.Serial( cb.Fn('Generate positions vectors', PositionsVectors, n_out=1), cb.Fn( 'Transform to sinusoidal encodings', Sinusoidal_Embeddings, n_out=1))